@@ -250,6 +250,9 @@ mod aarch64;
250250#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
251251mod x86;
252252
253+ mod barrier;
254+ use barrier:: optimization_barrier;
255+
253256use core:: {
254257 marker:: { PhantomData , PhantomPinned } ,
255258 mem:: { MaybeUninit , size_of} ,
@@ -259,7 +262,6 @@ use core::{
259262 } ,
260263 ops, ptr,
261264 slice:: IterMut ,
262- sync:: atomic,
263265} ;
264266
265267#[ cfg( feature = "alloc" ) ]
@@ -300,7 +302,7 @@ where
300302{
301303 fn zeroize ( & mut self ) {
302304 volatile_write ( self , Z :: default ( ) ) ;
303- atomic_fence ( ) ;
305+ optimization_barrier ( self ) ;
304306 }
305307}
306308
@@ -334,7 +336,7 @@ macro_rules! impl_zeroize_for_non_zero {
334336 None => unreachable!( ) ,
335337 } ;
336338 volatile_write( self , ONE ) ;
337- atomic_fence ( ) ;
339+ optimization_barrier ( self ) ;
338340 }
339341 }
340342 ) +
@@ -425,7 +427,7 @@ where
425427 // done so by take().
426428 unsafe { ptr:: write_volatile ( self , None ) }
427429
428- atomic_fence ( ) ;
430+ optimization_barrier ( self ) ;
429431 }
430432}
431433
@@ -441,7 +443,7 @@ impl<Z> Zeroize for MaybeUninit<Z> {
441443 // Safety:
442444 // `MaybeUninit` is valid for any byte pattern, including zeros.
443445 unsafe { ptr:: write_volatile ( self , MaybeUninit :: zeroed ( ) ) }
444- atomic_fence ( ) ;
446+ optimization_barrier ( self ) ;
445447 }
446448}
447449
@@ -467,7 +469,7 @@ impl<Z> Zeroize for [MaybeUninit<Z>] {
467469 // and 0 is a valid value for `MaybeUninit<Z>`
468470 // The memory of the slice should not wrap around the address space.
469471 unsafe { volatile_set ( ptr, MaybeUninit :: zeroed ( ) , size) }
470- atomic_fence ( ) ;
472+ optimization_barrier ( self ) ;
471473 }
472474}
473475
@@ -493,7 +495,7 @@ where
493495 // `self.len()` is also not larger than an `isize`, because of the assertion above.
494496 // The memory of the slice should not wrap around the address space.
495497 unsafe { volatile_set ( self . as_mut_ptr ( ) , Z :: default ( ) , self . len ( ) ) } ;
496- atomic_fence ( ) ;
498+ optimization_barrier ( self ) ;
497499 }
498500}
499501
@@ -753,14 +755,6 @@ where
753755 }
754756}
755757
756- /// Use fences to prevent accesses from being reordered before this
757- /// point, which should hopefully help ensure that all accessors
758- /// see zeroes after this point.
759- #[ inline( always) ]
760- fn atomic_fence ( ) {
761- atomic:: compiler_fence ( atomic:: Ordering :: SeqCst ) ;
762- }
763-
764758/// Perform a volatile write to the destination
765759#[ inline( always) ]
766760fn volatile_write < T : Copy + Sized > ( dst : & mut T , src : T ) {
@@ -851,7 +845,7 @@ pub unsafe fn zeroize_flat_type<F: Sized>(data: *mut F) {
851845 unsafe {
852846 volatile_set ( data as * mut u8 , 0 , size) ;
853847 }
854- atomic_fence ( )
848+ optimization_barrier ( & data ) ;
855849}
856850
857851/// Internal module used as support for `AssertZeroizeOnDrop`.
0 commit comments