Rename AtomicPtr::fetch_{add,sub}{,_bytes}

This commit is contained in:
Thom Chiovoloni 2022-05-14 12:37:19 -07:00 committed by Thom Chiovoloni
parent 2f872afdb5
commit e65ecee90e
No known key found for this signature in database
GPG Key ID: D7733D1D7A775F0A
2 changed files with 29 additions and 29 deletions

View File

@ -1462,12 +1462,12 @@ impl<T> AtomicPtr<T> {
/// to offset the pointer by an amount which is not a multiple of /// to offset the pointer by an amount which is not a multiple of
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
/// work with a deliberately misaligned pointer. In such cases, you may use /// work with a deliberately misaligned pointer. In such cases, you may use
/// the [`fetch_add_bytes`](Self::fetch_add_bytes) method instead. /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
/// ///
/// `fetch_add` takes an [`Ordering`] argument which describes the memory /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
/// ordering of this operation. All ordering modes are possible. Note that /// memory ordering of this operation. All ordering modes are possible. Note
/// using [`Acquire`] makes the store part of this operation [`Relaxed`], /// that using [`Acquire`] makes the store part of this operation
/// and using [`Release`] makes the load part [`Relaxed`]. /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
/// ///
/// **Note**: This method is only available on platforms that support atomic /// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`]. /// operations on [`AtomicPtr`].
@ -1481,15 +1481,15 @@ impl<T> AtomicPtr<T> {
/// use core::sync::atomic::{AtomicPtr, Ordering}; /// use core::sync::atomic::{AtomicPtr, Ordering};
/// ///
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut()); /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
/// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0); /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
/// // Note: units of `size_of::<i64>()`. /// // Note: units of `size_of::<i64>()`.
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
/// ``` /// ```
#[inline] #[inline]
#[cfg(target_has_atomic = "ptr")] #[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
self.fetch_add_bytes(val.wrapping_mul(core::mem::size_of::<T>()), order) self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
} }
/// Offsets the pointer's address by subtracting `val` (in units of `T`), /// Offsets the pointer's address by subtracting `val` (in units of `T`),
@ -1502,9 +1502,9 @@ impl<T> AtomicPtr<T> {
/// to offset the pointer by an amount which is not a multiple of /// to offset the pointer by an amount which is not a multiple of
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
/// work with a deliberately misaligned pointer. In such cases, you may use /// work with a deliberately misaligned pointer. In such cases, you may use
/// the [`fetch_sub_bytes`](Self::fetch_sub_bytes) method instead. /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
/// ///
/// `fetch_sub` takes an [`Ordering`] argument which describes the memory /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
/// ordering of this operation. All ordering modes are possible. Note that /// ordering of this operation. All ordering modes are possible. Note that
/// using [`Acquire`] makes the store part of this operation [`Relaxed`], /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
/// and using [`Release`] makes the load part [`Relaxed`]. /// and using [`Release`] makes the load part [`Relaxed`].
@ -1524,7 +1524,7 @@ impl<T> AtomicPtr<T> {
/// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
/// ///
/// assert!(core::ptr::eq( /// assert!(core::ptr::eq(
/// atom.fetch_sub(1, Ordering::Relaxed), /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
/// &array[1], /// &array[1],
/// )); /// ));
/// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
@ -1532,8 +1532,8 @@ impl<T> AtomicPtr<T> {
#[inline] #[inline]
#[cfg(target_has_atomic = "ptr")] #[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
self.fetch_sub_bytes(val.wrapping_mul(core::mem::size_of::<T>()), order) self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
} }
/// Offsets the pointer's address by adding `val` *bytes*, returning the /// Offsets the pointer's address by adding `val` *bytes*, returning the
@ -1542,7 +1542,7 @@ impl<T> AtomicPtr<T> {
/// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
/// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`. /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
/// ///
/// `fetch_add_bytes` takes an [`Ordering`] argument which describes the /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note /// memory ordering of this operation. All ordering modes are possible. Note
/// that using [`Acquire`] makes the store part of this operation /// that using [`Acquire`] makes the store part of this operation
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@ -1560,14 +1560,14 @@ impl<T> AtomicPtr<T> {
/// use core::sync::atomic::{AtomicPtr, Ordering}; /// use core::sync::atomic::{AtomicPtr, Ordering};
/// ///
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut()); /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
/// assert_eq!(atom.fetch_add_bytes(1, Ordering::Relaxed).addr(), 0); /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
/// // Note: in units of bytes, not `size_of::<i64>()`. /// // Note: in units of bytes, not `size_of::<i64>()`.
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
/// ``` /// ```
#[inline] #[inline]
#[cfg(target_has_atomic = "ptr")] #[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
pub fn fetch_add_bytes(&self, val: usize, order: Ordering) -> *mut T { pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
#[cfg(not(bootstrap))] #[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics. // SAFETY: data races are prevented by atomic intrinsics.
unsafe { unsafe {
@ -1586,7 +1586,7 @@ impl<T> AtomicPtr<T> {
/// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
/// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`. /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
/// ///
/// `fetch_add_bytes` takes an [`Ordering`] argument which describes the /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note /// memory ordering of this operation. All ordering modes are possible. Note
/// that using [`Acquire`] makes the store part of this operation /// that using [`Acquire`] makes the store part of this operation
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@ -1604,13 +1604,13 @@ impl<T> AtomicPtr<T> {
/// use core::sync::atomic::{AtomicPtr, Ordering}; /// use core::sync::atomic::{AtomicPtr, Ordering};
/// ///
/// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1)); /// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
/// assert_eq!(atom.fetch_sub_bytes(1, Ordering::Relaxed).addr(), 1); /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
/// ``` /// ```
#[inline] #[inline]
#[cfg(target_has_atomic = "ptr")] #[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
pub fn fetch_sub_bytes(&self, val: usize, order: Ordering) -> *mut T { pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
#[cfg(not(bootstrap))] #[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics. // SAFETY: data races are prevented by atomic intrinsics.
unsafe { unsafe {

View File

@ -131,16 +131,16 @@ fn int_max() {
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
fn ptr_add_null() { fn ptr_add_null() {
let atom = AtomicPtr::<i64>::new(core::ptr::null_mut()); let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0); assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0);
assert_eq!(atom.load(SeqCst).addr(), 8); assert_eq!(atom.load(SeqCst).addr(), 8);
assert_eq!(atom.fetch_add_bytes(1, SeqCst).addr(), 8); assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8);
assert_eq!(atom.load(SeqCst).addr(), 9); assert_eq!(atom.load(SeqCst).addr(), 9);
assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9); assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9);
assert_eq!(atom.load(SeqCst).addr(), 1); assert_eq!(atom.load(SeqCst).addr(), 1);
assert_eq!(atom.fetch_sub_bytes(1, SeqCst).addr(), 1); assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1);
assert_eq!(atom.load(SeqCst).addr(), 0); assert_eq!(atom.load(SeqCst).addr(), 0);
} }
@ -150,23 +150,23 @@ fn ptr_add_data() {
let num = 0i64; let num = 0i64;
let n = &num as *const i64 as *mut _; let n = &num as *const i64 as *mut _;
let atom = AtomicPtr::<i64>::new(n); let atom = AtomicPtr::<i64>::new(n);
assert_eq!(atom.fetch_add(1, SeqCst), n); assert_eq!(atom.fetch_ptr_add(1, SeqCst), n);
assert_eq!(atom.load(SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n.wrapping_add(1));
assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1));
assert_eq!(atom.load(SeqCst), n); assert_eq!(atom.load(SeqCst), n);
let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>(); let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
assert_eq!(atom.fetch_add_bytes(1, SeqCst), n); assert_eq!(atom.fetch_byte_add(1, SeqCst), n);
assert_eq!(atom.load(SeqCst), bytes_from_n(1)); assert_eq!(atom.load(SeqCst), bytes_from_n(1));
assert_eq!(atom.fetch_add_bytes(5, SeqCst), bytes_from_n(1)); assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1));
assert_eq!(atom.load(SeqCst), bytes_from_n(6)); assert_eq!(atom.load(SeqCst), bytes_from_n(6));
assert_eq!(atom.fetch_sub_bytes(1, SeqCst), bytes_from_n(6)); assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6));
assert_eq!(atom.load(SeqCst), bytes_from_n(5)); assert_eq!(atom.load(SeqCst), bytes_from_n(5));
assert_eq!(atom.fetch_sub_bytes(5, SeqCst), bytes_from_n(5)); assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5));
assert_eq!(atom.load(SeqCst), n); assert_eq!(atom.load(SeqCst), n);
} }