@@ -259,7 +259,6 @@ use core::{
259259 } ,
260260 ops, ptr,
261261 slice:: IterMut ,
262- sync:: atomic,
263262} ;
264263
265264#[ cfg( feature = "alloc" ) ]
@@ -300,7 +299,7 @@ where
300299{
301300 fn zeroize ( & mut self ) {
302301 volatile_write ( self , Z :: default ( ) ) ;
303- atomic_fence ( ) ;
302+ optimization_barrier ( self ) ;
304303 }
305304}
306305
@@ -334,7 +333,7 @@ macro_rules! impl_zeroize_for_non_zero {
334333 None => unreachable!( ) ,
335334 } ;
336335 volatile_write( self , ONE ) ;
337- atomic_fence ( ) ;
336+ optimization_barrier ( self ) ;
338337 }
339338 }
340339 ) +
@@ -425,7 +424,7 @@ where
425424 // done so by take().
426425 unsafe { ptr:: write_volatile ( self , None ) }
427426
428- atomic_fence ( ) ;
427+ optimization_barrier ( self ) ;
429428 }
430429}
431430
@@ -441,7 +440,7 @@ impl<Z> Zeroize for MaybeUninit<Z> {
441440 // Safety:
442441 // `MaybeUninit` is valid for any byte pattern, including zeros.
443442 unsafe { ptr:: write_volatile ( self , MaybeUninit :: zeroed ( ) ) }
444- atomic_fence ( ) ;
443+ optimization_barrier ( self ) ;
445444 }
446445}
447446
@@ -467,7 +466,7 @@ impl<Z> Zeroize for [MaybeUninit<Z>] {
467466 // and 0 is a valid value for `MaybeUninit<Z>`
468467 // The memory of the slice should not wrap around the address space.
469468 unsafe { volatile_set ( ptr, MaybeUninit :: zeroed ( ) , size) }
470- atomic_fence ( ) ;
469+ optimization_barrier ( self ) ;
471470 }
472471}
473472
@@ -493,7 +492,7 @@ where
493492 // `self.len()` is also not larger than an `isize`, because of the assertion above.
494493 // The memory of the slice should not wrap around the address space.
495494 unsafe { volatile_set ( self . as_mut_ptr ( ) , Z :: default ( ) , self . len ( ) ) } ;
496- atomic_fence ( ) ;
495+ optimization_barrier ( self ) ;
497496 }
498497}
499498
@@ -749,14 +748,6 @@ where
749748 }
750749}
751750
752- /// Use fences to prevent accesses from being reordered before this
753- /// point, which should hopefully help ensure that all accessors
754- /// see zeroes after this point.
755- #[ inline( always) ]
756- fn atomic_fence ( ) {
757- atomic:: compiler_fence ( atomic:: Ordering :: SeqCst ) ;
758- }
759-
760751/// Perform a volatile write to the destination
761752#[ inline( always) ]
762753fn volatile_write < T : Copy + Sized > ( dst : & mut T , src : T ) {
@@ -847,7 +838,84 @@ pub unsafe fn zeroize_flat_type<F: Sized>(data: *mut F) {
847838 unsafe {
848839 volatile_set ( data as * mut u8 , 0 , size) ;
849840 }
850- atomic_fence ( )
841+ optimization_barrier ( & data) ;
842+ }
843+
844+ /// Observe the referenced data and prevent the compiler from removing previous writes to it.
845+ ///
846+ /// This function acts like [`core::hint::black_box`] but takes a reference and
847+ /// does not return the passed value.
848+ ///
849+ /// It's implemented using the [`core::arch::asm!`] macro on target arches where `asm!` is stable,
850+ /// i.e. `aarch64`, `arm`, `arm64ec`, `loongarch64`, `riscv32`, `riscv64`, `s390x`, `x86`, and
851+ /// `x86_64`. On all other targets it's implemented using [`core::hint::black_box`].
852+ ///
853+ /// # Examples
854+ /// ```ignore
855+ /// use core::num::NonZeroU32;
856+ /// use zeroize::{ZeroizeOnDrop, zeroize_flat_type};
857+ ///
858+ /// struct DataToZeroize {
859+ /// buf: [u8; 32],
860+ /// pos: NonZeroU32,
861+ /// }
862+ ///
863+ /// struct SomeMoreFlatData(u64);
864+ ///
865+ /// impl Drop for DataToZeroize {
866+ /// fn drop(&mut self) {
867+ /// self.buf = [0u8; 32];
868+ /// self.pos = NonZeroU32::new(32).unwrap();
869+ /// zeroize::optimization_barrier(self);
870+ /// }
871+ /// }
872+ ///
873+ /// impl zeroize::ZeroizeOnDrop for DataToZeroize {}
874+ ///
875+ /// let mut data = DataToZeroize {
876+ /// buf: [3u8; 32],
877+ /// pos: NonZeroU32::new(32).unwrap(),
878+ /// };
879+ ///
880+ /// // data gets zeroized when dropped
881+ /// ```
882+ fn optimization_barrier < R : ?Sized > ( val : & R ) {
883+ #[ cfg( all(
884+ not( miri) ,
885+ any(
886+ target_arch = "aarch64" ,
887+ target_arch = "arm" ,
888+ target_arch = "arm64ec" ,
889+ target_arch = "loongarch64" ,
890+ target_arch = "riscv32" ,
891+ target_arch = "riscv64" ,
892+ target_arch = "s390x" ,
893+ target_arch = "x86" ,
894+ target_arch = "x86_64" ,
895+ )
896+ ) ) ]
897+ unsafe {
898+ core:: arch:: asm!(
899+ "# {}" ,
900+ in( reg) val as * const R as * const ( ) ,
901+ options( readonly, preserves_flags, nostack) ,
902+ ) ;
903+ }
904+ #[ cfg( not( all(
905+ not( miri) ,
906+ any(
907+ target_arch = "aarch64" ,
908+ target_arch = "arm" ,
909+ target_arch = "arm64ec" ,
910+ target_arch = "loongarch64" ,
911+ target_arch = "riscv32" ,
912+ target_arch = "riscv64" ,
913+ target_arch = "s390x" ,
914+ target_arch = "x86" ,
915+ target_arch = "x86_64" ,
916+ )
917+ ) ) ) ]
918+ core:: hint:: black_box ( val) ;
851919}
852920
853921/// Internal module used as support for `AssertZeroizeOnDrop`.
0 commit comments