@@ -129,28 +129,34 @@ class gil_safe_call_once_and_store {
129129 // Multiple threads may enter here, because the GIL is released in the next line and
130130 // CPython API calls in the `fn()` call below may release and reacquire the GIL.
131131 gil_scoped_release gil_rel; // Needed to establish lock ordering.
132- {
133- detail::with_internals ([&](detail::internals &internals) {
134- const void *key = reinterpret_cast <const void *>(this );
135- auto &storage_map = internals.call_once_storage_map ;
136- // There can be multiple threads going through here.
137- if (storage_map.find (key) == storage_map.end ()) {
138- gil_scoped_acquire gil_acq;
139- // Only one thread will enter here at a time.
140- // Fast recheck to avoid double work.
141- if (storage_map.find (key) == storage_map.end ()) {
142- auto value = new detail::call_once_storage<T>{};
143- // fn may release, but will reacquire, the GIL.
144- ::new (value->storage ) T (fn ());
145- value->finalize = finalize_fn;
146- value->is_initialized = true ;
147- storage_map.emplace (key, value);
148- last_storage_ptr_ = reinterpret_cast <T *>(value->storage );
149- }
132+ detail::with_internals ([&](detail::internals &internals) {
133+ const void *key = reinterpret_cast <const void *>(this );
134+ auto &storage_map = internals.call_once_storage_map ;
135+ // There can be multiple threads going through here.
136+ detail::call_once_storage<T> *value = nullptr ;
137+ {
138+ gil_scoped_acquire gil_acq;
139+ // Only one thread will enter here at a time.
140+ const auto it = storage_map.find (key);
141+ if (it != storage_map.end ()) {
142+ value = static_cast <detail::call_once_storage<T> *>(it->second );
143+ } else {
144+ value = new detail::call_once_storage<T>{};
145+ storage_map.emplace (key, value);
150146 }
147+ }
148+ assert (value != nullptr );
149+ std::call_once (value->once_flag , [&] {
150+ // Only one thread will ever enter here.
151+ gil_scoped_acquire gil_acq;
152+ // fn may release, but will reacquire, the GIL.
153+ ::new (value->storage ) T (fn ());
154+ value->finalize = finalize_fn;
155+ value->is_initialized = true ;
156+ last_storage_ptr_ = reinterpret_cast <T *>(value->storage );
151157 is_initialized_by_atleast_one_interpreter_ = true ;
152158 });
153- }
159+ });
154160 // All threads will observe `is_initialized_by_atleast_one_interp_` as true here.
155161 }
156162 // Intentionally not returning `T &` to ensure the calling code is self-documenting.
@@ -162,10 +168,10 @@ class gil_safe_call_once_and_store {
162168 T *result = last_storage_ptr_;
163169 if (!is_last_storage_valid ()) {
164170 detail::with_internals ([&](detail::internals &internals) {
165- const void *k = reinterpret_cast <const void *>(this );
171+ const void *key = reinterpret_cast <const void *>(this );
166172 auto &storage_map = internals.call_once_storage_map ;
167- auto *v = static_cast <detail::call_once_storage<T> *>(storage_map.at (k ));
168- result = last_storage_ptr_ = reinterpret_cast <T *>(v ->storage );
173+ auto *value = static_cast <detail::call_once_storage<T> *>(storage_map.at (key ));
174+ result = last_storage_ptr_ = reinterpret_cast <T *>(value ->storage );
169175 });
170176 }
171177 assert (result != nullptr );
0 commit comments