From caebb56882bd662bdf3a049776aeeace9b4ca66a Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Mon, 24 Nov 2025 16:13:19 -0500 Subject: [PATCH 1/2] Simplify atomics using LazyUsize --- src/backends/getentropy.rs | 5 +- src/backends/getrandom.rs | 5 +- src/backends/linux_android_with_fallback.rs | 78 ++++------ src/backends/netbsd.rs | 32 ++--- src/backends/rdrand.rs | 5 +- src/backends/rndr.rs | 3 +- src/backends/solaris.rs | 5 +- src/backends/use_file.rs | 5 +- src/backends/vxworks.rs | 5 +- src/error.rs | 2 - src/lazy.rs | 6 +- src/lib.rs | 5 + src/util.rs | 1 - src/util_libc.rs | 150 +++++++++++--------- 14 files changed, 133 insertions(+), 174 deletions(-) diff --git a/src/backends/getentropy.rs b/src/backends/getentropy.rs index ed181f019..7d88f2958 100644 --- a/src/backends/getentropy.rs +++ b/src/backends/getentropy.rs @@ -7,14 +7,11 @@ //! - vita newlib since Dec 2021 //! //! For these targets, we use getentropy(2) because getrandom(2) doesn't exist. -use crate::Error; +use crate::{Error, util_libc}; use core::{ffi::c_void, mem::MaybeUninit}; pub use crate::util::{inner_u32, inner_u64}; -#[path = "../util_libc.rs"] -mod util_libc; - #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { for chunk in dest.chunks_mut(256) { diff --git a/src/backends/getrandom.rs b/src/backends/getrandom.rs index af97eab28..44948c84d 100644 --- a/src/backends/getrandom.rs +++ b/src/backends/getrandom.rs @@ -15,14 +15,11 @@ //! GRND_RANDOM is not recommended. On NetBSD/FreeBSD/Dragonfly/3ds, it does //! nothing. On illumos, the default pool is used to implement getentropy(2), //! so we assume it is acceptable here. -use crate::Error; +use crate::{Error, util_libc}; use core::mem::MaybeUninit; pub use crate::util::{inner_u32, inner_u64}; -#[path = "../util_libc.rs"] -mod util_libc; - #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { util_libc::sys_fill_exact(dest, |buf| { diff --git a/src/backends/linux_android_with_fallback.rs b/src/backends/linux_android_with_fallback.rs index d4ae6f247..2500da6a8 100644 --- a/src/backends/linux_android_with_fallback.rs +++ b/src/backends/linux_android_with_fallback.rs @@ -1,13 +1,11 @@ //! Implementation for Linux / Android with `/dev/urandom` fallback use super::{sanitizer, use_file}; -use crate::Error; +use crate::{Error, lazy, util_libc}; use core::{ ffi::c_void, mem::{MaybeUninit, transmute}, - ptr::NonNull, - sync::atomic::{AtomicPtr, Ordering}, + ptr, }; -use use_file::util_libc; pub use crate::util::{inner_u32, inner_u64}; @@ -15,49 +13,41 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) /// Sentinel value which indicates that `libc::getrandom` either not available, /// or not supported by kernel. -const NOT_AVAILABLE: NonNull = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) }; - -static GETRANDOM_FN: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); +const NOT_AVAILABLE: usize = usize::MAX; #[cold] #[inline(never)] -fn init() -> NonNull { +fn init() -> usize { // Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else #[cfg(not(target_env = "musl"))] - let raw_ptr = { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } - }; + let fptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) } as usize; #[cfg(target_env = "musl")] - let raw_ptr = { + let fptr = { let fptr: GetRandomFn = libc::getrandom; - unsafe { transmute::(fptr) } + unsafe { transmute::(fptr) } }; - let res_ptr = match NonNull::new(raw_ptr) { - Some(fptr) => { - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; - let dangling_ptr = NonNull::dangling().as_ptr(); - // Check that `getrandom` syscall is supported by kernel - let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) }; - if cfg!(getrandom_test_linux_fallback) { - NOT_AVAILABLE - } else if res.is_negative() { - match util_libc::last_os_error().raw_os_error() { - Some(libc::ENOSYS) => NOT_AVAILABLE, // No kernel support - // The fallback on EPERM is intentionally not done on Android since this workaround - // seems to be needed only for specific Linux-based products that aren't based - // on Android. See https://github.com/rust-random/getrandom/issues/229. - #[cfg(target_os = "linux")] - Some(libc::EPERM) => NOT_AVAILABLE, // Blocked by seccomp - _ => fptr, - } - } else { - fptr + let res_ptr = if fptr != 0 { + let getrandom_fn = unsafe { transmute::(fptr) }; + // Check that `getrandom` syscall is supported by kernel + let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) }; + if cfg!(getrandom_test_linux_fallback) { + NOT_AVAILABLE + } else if res.is_negative() { + match util_libc::last_os_error().raw_os_error() { + Some(libc::ENOSYS) => NOT_AVAILABLE, // No kernel support + // The fallback on EPERM is intentionally not done on Android since this workaround + // seems to be needed only for specific Linux-based products that aren't based + // on Android. See https://github.com/rust-random/getrandom/issues/229. + #[cfg(target_os = "linux")] + Some(libc::EPERM) => NOT_AVAILABLE, // Blocked by seccomp + _ => fptr, } + } else { + fptr } - None => NOT_AVAILABLE, + } else { + NOT_AVAILABLE }; #[cfg(getrandom_test_linux_without_fallback)] @@ -65,7 +55,6 @@ fn init() -> NonNull { panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`") } - GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release); res_ptr } @@ -77,23 +66,14 @@ fn use_file_fallback(dest: &mut [MaybeUninit]) -> Result<(), Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire); - let fptr = match NonNull::new(raw_ptr) { - Some(p) => p, - None => init(), - }; + static GETRANDOM_FN: lazy::LazyUsize = lazy::LazyUsize::new(); + let fptr = GETRANDOM_FN.unsync_init(init); if fptr == NOT_AVAILABLE { use_file_fallback(dest) } else { // note: `transmute` is currently the only way to convert a pointer into a function reference - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; + let getrandom_fn = unsafe { transmute::(fptr) }; util_libc::sys_fill_exact(dest, |buf| unsafe { let ret = getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0); sanitizer::unpoison_linux_getrandom_result(buf, ret); diff --git a/src/backends/netbsd.rs b/src/backends/netbsd.rs index f228a8b13..b90a52c38 100644 --- a/src/backends/netbsd.rs +++ b/src/backends/netbsd.rs @@ -3,20 +3,16 @@ //! `getrandom(2)` was introduced in NetBSD 10. To support older versions we //! implement our own weak linkage to it, and provide a fallback based on the //! KERN_ARND sysctl. -use crate::Error; +use crate::{Error, lazy, util_libc}; use core::{ cmp, ffi::c_void, mem::{self, MaybeUninit}, ptr, - sync::atomic::{AtomicPtr, Ordering}, }; pub use crate::util::{inner_u32, inner_u64}; -#[path = "../util_libc.rs"] -mod util_libc; - unsafe extern "C" fn polyfill_using_kern_arand( buf: *mut c_void, buflen: libc::size_t, @@ -42,36 +38,24 @@ unsafe extern "C" fn polyfill_using_kern_arand( type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; -static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - #[cold] #[inline(never)] -fn init() -> *mut c_void { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; +fn init() -> usize { + let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) { // Verify `polyfill_using_kern_arand` has the right signature. const POLYFILL: GetRandomFn = polyfill_using_kern_arand; ptr = POLYFILL as *mut c_void; } - GETRANDOM.store(ptr, Ordering::Release); - ptr + ptr as usize } #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let mut fptr = GETRANDOM.load(Ordering::Acquire); - if fptr.is_null() { - fptr = init(); - } - let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; + static GETRANDOM_FN: lazy::LazyUsize = lazy::LazyUsize::new(); + + let fptr = GETRANDOM_FN.unsync_init(init); + let fptr = unsafe { mem::transmute::(fptr) }; util_libc::sys_fill_exact(dest, |buf| unsafe { fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) }) diff --git a/src/backends/rdrand.rs b/src/backends/rdrand.rs index e1e8934cb..5e1e7d1ed 100644 --- a/src/backends/rdrand.rs +++ b/src/backends/rdrand.rs @@ -1,10 +1,7 @@ //! RDRAND backend for x86(-64) targets -use crate::{Error, util::slice_as_uninit}; +use crate::{Error, lazy, util::slice_as_uninit}; use core::mem::{MaybeUninit, size_of}; -#[path = "../lazy.rs"] -mod lazy; - #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!"); diff --git a/src/backends/rndr.rs b/src/backends/rndr.rs index 0b0636aea..2f048e811 100644 --- a/src/backends/rndr.rs +++ b/src/backends/rndr.rs @@ -69,8 +69,7 @@ fn is_rndr_available() -> bool { #[cfg(not(target_feature = "rand"))] fn is_rndr_available() -> bool { - #[path = "../lazy.rs"] - mod lazy; + use crate::lazy; static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new(); cfg_if::cfg_if! { diff --git a/src/backends/solaris.rs b/src/backends/solaris.rs index c27f91a5f..547bd8594 100644 --- a/src/backends/solaris.rs +++ b/src/backends/solaris.rs @@ -12,14 +12,11 @@ //! For more information, see the man page linked in lib.rs and this blog post: //! https://blogs.oracle.com/solaris/post/solaris-new-system-calls-getentropy2-and-getrandom2 //! which also explains why this crate should not use getentropy(2). -use crate::Error; +use crate::{Error, util_libc}; use core::{ffi::c_void, mem::MaybeUninit}; pub use crate::util::{inner_u32, inner_u64}; -#[path = "../util_libc.rs"] -mod util_libc; - const MAX_BYTES: usize = 1024; #[inline] diff --git a/src/backends/use_file.rs b/src/backends/use_file.rs index 796dbbc59..c585fb147 100644 --- a/src/backends/use_file.rs +++ b/src/backends/use_file.rs @@ -1,5 +1,5 @@ //! Implementations that just need to read from a file -use crate::Error; +use crate::{Error, util_libc}; use core::{ ffi::{CStr, c_void}, mem::MaybeUninit, @@ -9,9 +9,6 @@ use core::{ #[cfg(not(any(target_os = "android", target_os = "linux")))] pub use crate::util::{inner_u32, inner_u64}; -#[path = "../util_libc.rs"] -pub(super) mod util_libc; - /// For all platforms, we use `/dev/urandom` rather than `/dev/random`. /// For more information see the linked man pages in lib.rs. /// - On Linux, "/dev/urandom is preferred and sufficient in all use cases". diff --git a/src/backends/vxworks.rs b/src/backends/vxworks.rs index 5f5e6773b..663356fb8 100644 --- a/src/backends/vxworks.rs +++ b/src/backends/vxworks.rs @@ -1,14 +1,11 @@ //! Implementation for VxWorks -use crate::Error; +use crate::{Error, util_libc}; use core::{ cmp::Ordering::{Equal, Greater, Less}, mem::MaybeUninit, sync::atomic::{AtomicBool, Ordering::Relaxed}, }; -#[path = "../util_libc.rs"] -mod util_libc; - pub use crate::util::{inner_u32, inner_u64}; static RNG_INIT: AtomicBool = AtomicBool::new(false); diff --git a/src/error.rs b/src/error.rs index 069d8c109..238cd5fce 100644 --- a/src/error.rs +++ b/src/error.rs @@ -59,7 +59,6 @@ impl Error { /// Creates a new instance of an `Error` from a negative error code. #[cfg(not(target_os = "uefi"))] - #[allow(dead_code)] pub(super) fn from_neg_error_code(code: RawOsError) -> Self { if code < 0 { let code = NonZeroRawOsError::new(code).expect("`code` is negative"); @@ -71,7 +70,6 @@ impl Error { /// Creates a new instance of an `Error` from an UEFI error code. #[cfg(target_os = "uefi")] - #[allow(dead_code)] pub(super) fn from_uefi_code(code: RawOsError) -> Self { if code & UEFI_ERROR_FLAG != 0 { let code = NonZeroRawOsError::new(code).expect("The highest bit of `code` is set to 1"); diff --git a/src/lazy.rs b/src/lazy.rs index b191aa6d7..e1ef399ba 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -19,20 +19,20 @@ use core::sync::atomic::{AtomicUsize, Ordering}; // } // the effects of c() or writes to shared memory will not necessarily be // observed and additional synchronization methods may be needed. -struct LazyUsize(AtomicUsize); +pub(crate) struct LazyUsize(AtomicUsize); impl LazyUsize { // The initialization is not completed. const UNINIT: usize = usize::MAX; - const fn new() -> Self { + pub const fn new() -> Self { Self(AtomicUsize::new(Self::UNINIT)) } // Runs the init() function at most once, returning the value of some run of // init(). Multiple callers can run their init() functions in parallel. // init() should always return the same value, if it succeeds. - fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { + pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { #[cold] fn do_init(this: &LazyUsize, init: impl FnOnce() -> usize) -> usize { let val = init(); diff --git a/src/lib.rs b/src/lib.rs index ed69ad234..a651ab968 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -33,8 +33,13 @@ extern crate cfg_if; use core::mem::MaybeUninit; mod backends; +#[allow(dead_code, reason = "not used in all backends")] mod error; +#[allow(dead_code, reason = "not used in all backends")] +mod lazy; +#[allow(dead_code, reason = "not used in all backends")] mod util; +mod util_libc; #[cfg(feature = "std")] mod error_std_impls; diff --git a/src/util.rs b/src/util.rs index ef700e85f..776abc4a9 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] use crate::Error; use core::{mem::MaybeUninit, ptr, slice}; diff --git a/src/util_libc.rs b/src/util_libc.rs index 69432891c..af69ca42e 100644 --- a/src/util_libc.rs +++ b/src/util_libc.rs @@ -1,81 +1,93 @@ -use crate::Error; -use core::mem::MaybeUninit; +#[allow(unused_macros, reason = "not always used")] +macro_rules! emit_impl { + ($get_errno:expr) => { + use crate::Error; + use core::mem::MaybeUninit; -cfg_if! { - if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android", target_os = "cygwin"))] { - use libc::__errno as errno_location; - } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox", target_os = "dragonfly"))] { - use libc::__errno_location as errno_location; - } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { - use libc::___errno as errno_location; - } else if #[cfg(any(target_os = "macos", target_os = "freebsd"))] { - use libc::__error as errno_location; - } else if #[cfg(target_os = "haiku")] { - use libc::_errnop as errno_location; - } else if #[cfg(target_os = "nto")] { - use libc::__get_errno_ptr as errno_location; - } else if #[cfg(any(all(target_os = "horizon", target_arch = "arm"), target_os = "vita"))] { - unsafe extern "C" { - // Not provided by libc: https://github.com/rust-lang/libc/issues/1995 - fn __errno() -> *mut libc::c_int; + pub(crate) fn last_os_error() -> Error { + // We assume that on all targets which use the `util_libc` module `c_int` is equal to `i32` + let errno: i32 = unsafe { $get_errno }; + + if errno > 0 { + let code = errno + .checked_neg() + .expect("Positive number can be always negated"); + Error::from_neg_error_code(code) + } else { + Error::ERRNO_NOT_POSITIVE + } } - use __errno as errno_location; - } else if #[cfg(target_os = "aix")] { - use libc::_Errno as errno_location; - } -} -cfg_if! { - if #[cfg(target_os = "vxworks")] { - use libc::errnoGet as get_errno; - } else { - unsafe fn get_errno() -> libc::c_int { unsafe { *errno_location() }} - } + /// Fill a buffer by repeatedly invoking `sys_fill`. + /// + /// The `sys_fill` function: + /// - should return -1 and set errno on failure + /// - should return the number of bytes written on success + #[allow(dead_code, reason = "not used in all backends")] + pub(crate) fn sys_fill_exact( + mut buf: &mut [MaybeUninit], + sys_fill: impl Fn(&mut [MaybeUninit]) -> libc::ssize_t, + ) -> Result<(), Error> { + while !buf.is_empty() { + let res = sys_fill(buf); + match res { + res if res > 0 => { + let len = usize::try_from(res).map_err(|_| Error::UNEXPECTED)?; + buf = buf.get_mut(len..).ok_or(Error::UNEXPECTED)?; + } + -1 => { + let err = last_os_error(); + // We should try again if the call was interrupted. + if err.raw_os_error() != Some(libc::EINTR) { + return Err(err); + } + } + // Negative return codes not equal to -1 should be impossible. + // EOF (ret = 0) should be impossible, as the data we are reading + // should be an infinite stream of random bytes. + _ => return Err(Error::UNEXPECTED), + } + } + Ok(()) + } + }; } -pub(crate) fn last_os_error() -> Error { - // We assume that on all targets which use the `util_libc` module `c_int` is equal to `i32` - let errno: i32 = unsafe { get_errno() }; - - if errno > 0 { - let code = errno - .checked_neg() - .expect("Positive number can be always negated"); - Error::from_neg_error_code(code) - } else { - Error::ERRNO_NOT_POSITIVE - } +#[allow(unused_macros, reason = "not always used")] +macro_rules! emit_impl_from_ptr { + ($fn:path) => { + emit_impl!(*$fn()); + }; } -/// Fill a buffer by repeatedly invoking `sys_fill`. -/// -/// The `sys_fill` function: -/// - should return -1 and set errno on failure -/// - should return the number of bytes written on success -#[allow(dead_code)] -pub(crate) fn sys_fill_exact( - mut buf: &mut [MaybeUninit], - sys_fill: impl Fn(&mut [MaybeUninit]) -> libc::ssize_t, -) -> Result<(), Error> { - while !buf.is_empty() { - let res = sys_fill(buf); - match res { - res if res > 0 => { - let len = usize::try_from(res).map_err(|_| Error::UNEXPECTED)?; - buf = buf.get_mut(len..).ok_or(Error::UNEXPECTED)?; - } - -1 => { - let err = last_os_error(); - // We should try again if the call was interrupted. - if err.raw_os_error() != Some(libc::EINTR) { - return Err(err); +cfg_if! { + if #[cfg(target_os = "vxworks")] { + emit_impl!(libc::errnoGet()); + } else { + cfg_if! { + if #[cfg(any(all(target_os = "linux", target_env = ""), getrandom_backend = "custom", getrandom_backend = "linux_raw", getrandom_backend = "rdrand", getrandom_backend = "rndr"))] { + // No libc. + } else if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android", target_os = "cygwin"))] { + emit_impl_from_ptr!(libc::__errno); + } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox", target_os = "dragonfly"))] { + emit_impl_from_ptr!(libc::__errno_location); + } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { + emit_impl_from_ptr!(libc::___errno); + } else if #[cfg(any(target_os = "macos", target_os = "freebsd"))] { + emit_impl_from_ptr!(libc::__error); + } else if #[cfg(target_os = "haiku")] { + emit_impl_from_ptr!(libc::_errnop); + } else if #[cfg(target_os = "nto")] { + emit_impl_from_ptr!(libc::__get_errno_ptr); + } else if #[cfg(any(all(target_os = "horizon", target_arch = "arm"), target_os = "vita"))] { + unsafe extern "C" { + // Not provided by libc: https://github.com/rust-lang/libc/issues/1995 + fn __errno() -> *mut libc::c_int; } + emit_impl_from_ptr!(__errno); + } else if #[cfg(target_os = "aix")] { + emit_impl_from_ptr!(libc::_Errno); } - // Negative return codes not equal to -1 should be impossible. - // EOF (ret = 0) should be impossible, as the data we are reading - // should be an infinite stream of random bytes. - _ => return Err(Error::UNEXPECTED), } } - Ok(()) } From c0f53bf2758ac2249635833b9d189668aea56783 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Mon, 24 Nov 2025 19:06:41 -0500 Subject: [PATCH 2/2] Use AtomicPtr --- src/backends/linux_android_with_fallback.rs | 16 ++-- src/backends/netbsd.rs | 13 +-- src/lazy.rs | 94 ++++++++++----------- 3 files changed, 60 insertions(+), 63 deletions(-) diff --git a/src/backends/linux_android_with_fallback.rs b/src/backends/linux_android_with_fallback.rs index 2500da6a8..90abfdc45 100644 --- a/src/backends/linux_android_with_fallback.rs +++ b/src/backends/linux_android_with_fallback.rs @@ -13,22 +13,22 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) /// Sentinel value which indicates that `libc::getrandom` either not available, /// or not supported by kernel. -const NOT_AVAILABLE: usize = usize::MAX; +const NOT_AVAILABLE: *mut c_void = usize::MAX as *mut c_void; #[cold] #[inline(never)] -fn init() -> usize { +fn init() -> *mut c_void { // Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else #[cfg(not(target_env = "musl"))] - let fptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) } as usize; + let fptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; #[cfg(target_env = "musl")] let fptr = { let fptr: GetRandomFn = libc::getrandom; - unsafe { transmute::(fptr) } + unsafe { transmute::(fptr) } }; - let res_ptr = if fptr != 0 { - let getrandom_fn = unsafe { transmute::(fptr) }; + let res_ptr = if !fptr.is_null() { + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr) }; // Check that `getrandom` syscall is supported by kernel let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) }; if cfg!(getrandom_test_linux_fallback) { @@ -66,14 +66,14 @@ fn use_file_fallback(dest: &mut [MaybeUninit]) -> Result<(), Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - static GETRANDOM_FN: lazy::LazyUsize = lazy::LazyUsize::new(); + static GETRANDOM_FN: lazy::LazyPtr = lazy::LazyPtr::new(); let fptr = GETRANDOM_FN.unsync_init(init); if fptr == NOT_AVAILABLE { use_file_fallback(dest) } else { // note: `transmute` is currently the only way to convert a pointer into a function reference - let getrandom_fn = unsafe { transmute::(fptr) }; + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr) }; util_libc::sys_fill_exact(dest, |buf| unsafe { let ret = getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0); sanitizer::unpoison_linux_getrandom_result(buf, ret); diff --git a/src/backends/netbsd.rs b/src/backends/netbsd.rs index b90a52c38..ba7f49f7c 100644 --- a/src/backends/netbsd.rs +++ b/src/backends/netbsd.rs @@ -40,22 +40,23 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) #[cold] #[inline(never)] -fn init() -> usize { - let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; +fn init() -> *mut c_void { + let ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) { // Verify `polyfill_using_kern_arand` has the right signature. const POLYFILL: GetRandomFn = polyfill_using_kern_arand; - ptr = POLYFILL as *mut c_void; + POLYFILL as *mut c_void + } else { + ptr } - ptr as usize } #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - static GETRANDOM_FN: lazy::LazyUsize = lazy::LazyUsize::new(); + static GETRANDOM_FN: lazy::LazyPtr = lazy::LazyPtr::new(); let fptr = GETRANDOM_FN.unsync_init(init); - let fptr = unsafe { mem::transmute::(fptr) }; + let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; util_libc::sys_fill_exact(dest, |buf| unsafe { fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) }) diff --git a/src/lazy.rs b/src/lazy.rs index e1ef399ba..9503e8824 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,56 +1,52 @@ //! Helpers built around pointer-sized atomics. -use core::sync::atomic::{AtomicUsize, Ordering}; - -// This structure represents a lazily initialized static usize value. Useful -// when it is preferable to just rerun initialization instead of locking. -// unsync_init will invoke an init() function until it succeeds, then return the -// cached value for future calls. -// -// unsync_init supports init() "failing". If the init() method returns UNINIT, -// that value will be returned as normal, but will not be cached. -// -// Users should only depend on the _value_ returned by init() functions. -// Specifically, for the following init() function: -// fn init() -> usize { -// a(); -// let v = b(); -// c(); -// v -// } -// the effects of c() or writes to shared memory will not necessarily be -// observed and additional synchronization methods may be needed. -pub(crate) struct LazyUsize(AtomicUsize); - -impl LazyUsize { - // The initialization is not completed. - const UNINIT: usize = usize::MAX; - - pub const fn new() -> Self { - Self(AtomicUsize::new(Self::UNINIT)) - } - - // Runs the init() function at most once, returning the value of some run of - // init(). Multiple callers can run their init() functions in parallel. - // init() should always return the same value, if it succeeds. - pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { - #[cold] - fn do_init(this: &LazyUsize, init: impl FnOnce() -> usize) -> usize { - let val = init(); - this.0.store(val, Ordering::Relaxed); - val - } - - // Relaxed ordering is fine, as we only have a single atomic variable. - let val = self.0.load(Ordering::Relaxed); - if val != Self::UNINIT { - val - } else { - do_init(self, init) +use core::{ + ptr, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, +}; + +macro_rules! lazy_atomic { + ($name:ident $(<$($gen:ident),+>)?, $atomic:ty, $value:ty, $uninit:expr) => { + /// Lazily initialized static value backed by a single atomic. + /// + /// `unsync_init` will invoke `init` until it returns a value other than + /// the sentinel `UNINIT`, then cache that value for subsequent calls. + /// Multiple callers may race to run `init`; only the returned value is + /// guaranteed to be observed, not any side effects. + pub(crate) struct $name$(<$($gen),+>)?($atomic); + + impl<$($($gen),+)? > $name$(<$($gen),+>)? { + const UNINIT: $value = $uninit; + + pub const fn new() -> Self { + Self(<$atomic>::new(Self::UNINIT)) + } + + #[cold] + fn do_init(&self, init: impl FnOnce() -> $value) -> $value { + let val = init(); + self.0.store(val, Ordering::Relaxed); + val + } + + #[inline] + pub fn unsync_init(&self, init: impl FnOnce() -> $value) -> $value { + // Relaxed ordering is fine, as we only have a single atomic variable. + let val = self.0.load(Ordering::Relaxed); + if val != Self::UNINIT { + val + } else { + self.do_init(init) + } + } } - } + }; } -// Identical to LazyUsize except with bool instead of usize. +lazy_atomic!(LazyUsize, AtomicUsize, usize, usize::MAX); +lazy_atomic!(LazyPtr, AtomicPtr, *mut T, ptr::dangling_mut()); + +/// Lazily initializes a cached bool; reuses `LazyUsize` to avoid sentinel +/// issues with `AtomicBool`. pub(crate) struct LazyBool(LazyUsize); impl LazyBool {