32#define KMP_PAD(type, sz) \
33 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
34#define KMP_GTID_DNE (-2)
53#if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_GOMP_COMPAT)
54#define OMP_LOCK_T_SIZE sizeof(int)
55#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
57#define OMP_LOCK_T_SIZE sizeof(void *)
58#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
64#define OMP_CRITICAL_SIZE sizeof(void *)
65#define INTEL_CRITICAL_SIZE 32
68typedef kmp_uint32 kmp_lock_flags_t;
70#define kmp_lf_critical_section 1
73typedef kmp_uint32 kmp_lock_index_t;
78 union kmp_user_lock *next;
79 kmp_lock_index_t index;
82typedef struct kmp_lock_pool kmp_lock_pool_t;
84extern void __kmp_validate_locks(
void);
121struct kmp_base_tas_lock {
123#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __LP64__
126 kmp_int32 depth_locked;
127 std::atomic<kmp_int32> poll;
129 std::atomic<kmp_int32> poll;
130 kmp_int32 depth_locked;
134typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
137 kmp_base_tas_lock_t lk;
138 kmp_lock_pool_t pool;
142typedef union kmp_tas_lock kmp_tas_lock_t;
146#define KMP_TAS_LOCK_INITIALIZER(lock) \
148 { KMP_LOCK_FREE(tas), 0 } \
151extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
154extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
155extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
157extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
158extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
159extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
160extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
161extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
163#define KMP_LOCK_RELEASED 1
164#define KMP_LOCK_STILL_HELD 0
165#define KMP_LOCK_ACQUIRED_FIRST 1
166#define KMP_LOCK_ACQUIRED_NEXT 0
168#define KMP_USE_FUTEX \
170 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
185struct kmp_base_futex_lock {
186 volatile kmp_int32 poll;
189 kmp_int32 depth_locked;
192typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
194union kmp_futex_lock {
195 kmp_base_futex_lock_t lk;
196 kmp_lock_pool_t pool;
201typedef union kmp_futex_lock kmp_futex_lock_t;
205#define KMP_FUTEX_LOCK_INITIALIZER(lock) \
207 { KMP_LOCK_FREE(futex), 0 } \
210extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
211extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
212extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
213extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
214extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
216extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
218extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
219extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
221extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
222extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
235struct kmp_base_ticket_lock {
237 std::atomic_bool initialized;
238 volatile union kmp_ticket_lock *self;
242 std::atomic_uint now_serving;
243 std::atomic_int owner_id;
244 std::atomic_int depth_locked;
245 kmp_lock_flags_t flags;
248struct kmp_base_ticket_lock {
250 std::atomic<bool> initialized;
251 volatile union kmp_ticket_lock *self;
253 std::atomic<unsigned>
255 std::atomic<unsigned>
257 std::atomic<int> owner_id;
258 std::atomic<int> depth_locked;
259 kmp_lock_flags_t flags;
265struct kmp_base_ticket_lock;
269typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
271union KMP_ALIGN_CACHE kmp_ticket_lock {
272 kmp_base_ticket_lock_t
274 kmp_lock_pool_t pool;
276 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
279typedef union kmp_ticket_lock kmp_ticket_lock_t;
284#define KMP_TICKET_LOCK_INITIALIZER(lock) \
286 { true, &(lock), NULL, 0U, 0U, 0, -1 } \
289extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
290extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
291extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
293extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
294extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
295extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
297extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
299extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
301extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
303extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
304extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
309#if KMP_USE_ADAPTIVE_LOCKS
311struct kmp_adaptive_lock_info;
313typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
315#if KMP_DEBUG_ADAPTIVE_LOCKS
317struct kmp_adaptive_lock_statistics {
319 kmp_adaptive_lock_info_t *next;
320 kmp_adaptive_lock_info_t *prev;
323 kmp_uint32 successfulSpeculations;
324 kmp_uint32 hardFailedSpeculations;
325 kmp_uint32 softFailedSpeculations;
326 kmp_uint32 nonSpeculativeAcquires;
327 kmp_uint32 nonSpeculativeAcquireAttempts;
328 kmp_uint32 lemmingYields;
331typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
333extern void __kmp_print_speculative_stats();
334extern void __kmp_init_speculative_stats();
338struct kmp_adaptive_lock_info {
343 kmp_uint32
volatile badness;
344 kmp_uint32
volatile acquire_attempts;
346 kmp_uint32 max_badness;
347 kmp_uint32 max_soft_retries;
349#if KMP_DEBUG_ADAPTIVE_LOCKS
350 kmp_adaptive_lock_statistics_t
volatile stats;
356struct kmp_base_queuing_lock {
359 volatile union kmp_queuing_lock
377 volatile kmp_int32 owner_id;
378 kmp_int32 depth_locked;
380 kmp_lock_flags_t flags;
383typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
385KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
387union KMP_ALIGN_CACHE kmp_queuing_lock {
388 kmp_base_queuing_lock_t
390 kmp_lock_pool_t pool;
392 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
395typedef union kmp_queuing_lock kmp_queuing_lock_t;
397extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
398extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
399extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
400extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
401extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
403extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
405extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
407extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
409extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
410extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
412#if KMP_USE_ADAPTIVE_LOCKS
416struct kmp_base_adaptive_lock {
417 kmp_base_queuing_lock qlk;
418 KMP_ALIGN(CACHE_LINE)
419 kmp_adaptive_lock_info_t
423typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
425union KMP_ALIGN_CACHE kmp_adaptive_lock {
426 kmp_base_adaptive_lock_t lk;
427 kmp_lock_pool_t pool;
429 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
431typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
433#define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
439struct kmp_base_drdpa_lock {
448 volatile union kmp_drdpa_lock
451 std::atomic<std::atomic<kmp_uint64> *> polls;
452 std::atomic<kmp_uint64> mask;
453 kmp_uint64 cleanup_ticket;
454 std::atomic<kmp_uint64> *old_polls;
455 kmp_uint32 num_polls;
461 std::atomic<kmp_uint64> next_ticket;
476 kmp_uint64 now_serving;
477 volatile kmp_uint32 owner_id;
478 kmp_int32 depth_locked;
479 kmp_lock_flags_t flags;
482typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
484union KMP_ALIGN_CACHE kmp_drdpa_lock {
485 kmp_base_drdpa_lock_t
487 kmp_lock_pool_t pool;
489 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
492typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
494extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
495extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
496extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
497extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
498extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
500extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
502extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
503extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
505extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
506extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
520typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
522#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
523#define KMP_BOOTSTRAP_LOCK_INIT(lock) \
524 kmp_bootstrap_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
526static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
527 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
530static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
531 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
534static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
535 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
538static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
539 __kmp_init_ticket_lock(lck);
542static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
543 __kmp_destroy_ticket_lock(lck);
554typedef kmp_ticket_lock_t kmp_lock_t;
556#define KMP_LOCK_INIT(lock) kmp_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
558static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
559 return __kmp_acquire_ticket_lock(lck, gtid);
562static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
563 return __kmp_test_ticket_lock(lck, gtid);
566static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
567 __kmp_release_ticket_lock(lck, gtid);
570static inline void __kmp_init_lock(kmp_lock_t *lck) {
571 __kmp_init_ticket_lock(lck);
574static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
575 __kmp_destroy_ticket_lock(lck);
591#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
599#if KMP_USE_ADAPTIVE_LOCKS
604typedef enum kmp_lock_kind kmp_lock_kind_t;
606extern kmp_lock_kind_t __kmp_user_lock_kind;
611 kmp_futex_lock_t futex;
613 kmp_ticket_lock_t ticket;
614 kmp_queuing_lock_t queuing;
615 kmp_drdpa_lock_t drdpa;
616#if KMP_USE_ADAPTIVE_LOCKS
617 kmp_adaptive_lock_t adaptive;
619 kmp_lock_pool_t pool;
622typedef union kmp_user_lock *kmp_user_lock_p;
624#if !KMP_USE_DYNAMIC_LOCK
626extern size_t __kmp_base_user_lock_size;
627extern size_t __kmp_user_lock_size;
629extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
631static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
632 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
633 return (*__kmp_get_user_lock_owner_)(lck);
636extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
640 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
642#define __kmp_acquire_user_lock_with_checks(lck, gtid) \
643 if (__kmp_user_lock_kind == lk_tas) { \
644 if (__kmp_env_consistency_check) { \
645 char const *const func = "omp_set_lock"; \
646 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
647 lck->tas.lk.depth_locked != -1) { \
648 KMP_FATAL(LockNestableUsedAsSimple, func); \
650 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
651 KMP_FATAL(LockIsAlreadyOwned, func); \
654 if (lck->tas.lk.poll != 0 || \
655 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
658 KMP_FSYNC_PREPARE(lck); \
659 KMP_INIT_YIELD(spins); \
660 KMP_INIT_BACKOFF(time); \
662 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
664 lck->tas.lk.poll != 0 || \
665 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
667 KMP_FSYNC_ACQUIRED(lck); \
669 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
670 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
674static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
676 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
677 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
681extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
685 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
688extern int __kmp_env_consistency_check;
689static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
691 if (__kmp_user_lock_kind == lk_tas) {
692 if (__kmp_env_consistency_check) {
693 char const *
const func =
"omp_test_lock";
694 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
695 lck->tas.lk.depth_locked != -1) {
696 KMP_FATAL(LockNestableUsedAsSimple, func);
699 return ((lck->tas.lk.poll == 0) &&
700 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
702 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
703 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
707static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
709 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
710 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
714extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
717static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
719 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
720 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
723extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
725static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
726 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
727 (*__kmp_init_user_lock_with_checks_)(lck);
732extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
734static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
735 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
736 (*__kmp_destroy_user_lock_)(lck);
739extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
741static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
742 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
743 (*__kmp_destroy_user_lock_with_checks_)(lck);
746extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
749#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
751#define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
752 if (__kmp_user_lock_kind == lk_tas) { \
753 if (__kmp_env_consistency_check) { \
754 char const *const func = "omp_set_nest_lock"; \
755 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
756 lck->tas.lk.depth_locked == -1) { \
757 KMP_FATAL(LockSimpleUsedAsNestable, func); \
760 if (lck->tas.lk.poll - 1 == gtid) { \
761 lck->tas.lk.depth_locked += 1; \
762 *depth = KMP_LOCK_ACQUIRED_NEXT; \
764 if ((lck->tas.lk.poll != 0) || \
765 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
768 KMP_FSYNC_PREPARE(lck); \
769 KMP_INIT_YIELD(spins); \
770 KMP_INIT_BACKOFF(time); \
772 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
774 (lck->tas.lk.poll != 0) || \
775 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
777 lck->tas.lk.depth_locked = 1; \
778 *depth = KMP_LOCK_ACQUIRED_FIRST; \
780 KMP_FSYNC_ACQUIRED(lck); \
782 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
783 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
788__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
790 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
791 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
795extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
798#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
799static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
801 if (__kmp_user_lock_kind == lk_tas) {
803 if (__kmp_env_consistency_check) {
804 char const *
const func =
"omp_test_nest_lock";
805 if ((
sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
806 lck->tas.lk.depth_locked == -1) {
807 KMP_FATAL(LockSimpleUsedAsNestable, func);
810 KMP_DEBUG_ASSERT(gtid >= 0);
811 if (lck->tas.lk.poll - 1 ==
813 return ++lck->tas.lk.depth_locked;
815 retval = ((lck->tas.lk.poll == 0) &&
816 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
819 lck->tas.lk.depth_locked = 1;
823 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
824 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
828static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
830 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
831 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
835extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
839__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
841 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
842 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
845extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
848__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
849 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
850 (*__kmp_init_nested_user_lock_with_checks_)(lck);
853extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
856__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
857 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
858 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
874extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
878extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
880static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
881 if (__kmp_get_user_lock_location_ != NULL) {
882 return (*__kmp_get_user_lock_location_)(lck);
888extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
891static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
893 if (__kmp_set_user_lock_location_ != NULL) {
894 (*__kmp_set_user_lock_location_)(lck, loc);
898extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
900extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
901 kmp_lock_flags_t flags);
903static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
904 kmp_lock_flags_t flags) {
905 if (__kmp_set_user_lock_flags_ != NULL) {
906 (*__kmp_set_user_lock_flags_)(lck, flags);
911extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
914#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
916 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
917 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
918 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
919 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
920 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
921 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
922 __kmp_init##nest##user_lock_with_checks_ = \
923 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
924 __kmp_destroy##nest##user_lock_with_checks_ = \
925 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
928#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
929#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
930 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
931#define KMP_BIND_NESTED_USER_LOCK(kind) \
932 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
933#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
934 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
959struct kmp_lock_table {
960 kmp_lock_index_t used;
961 kmp_lock_index_t allocated;
962 kmp_user_lock_p *table;
965typedef struct kmp_lock_table kmp_lock_table_t;
967extern kmp_lock_table_t __kmp_user_lock_table;
968extern kmp_user_lock_p __kmp_lock_pool;
970struct kmp_block_of_locks {
971 struct kmp_block_of_locks *next_block;
975typedef struct kmp_block_of_locks kmp_block_of_locks_t;
977extern kmp_block_of_locks_t *__kmp_lock_blocks;
978extern int __kmp_num_locks_in_block;
980extern kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock,
982 kmp_lock_flags_t flags);
983extern void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
984 kmp_user_lock_p lck);
985extern kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
987extern void __kmp_cleanup_user_locks();
989#define KMP_CHECK_USER_LOCK_INIT() \
991 if (!TCR_4(__kmp_init_user_locks)) { \
992 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
993 if (!TCR_4(__kmp_init_user_locks)) { \
994 TCW_4(__kmp_init_user_locks, TRUE); \
996 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
1005#if KMP_USE_DYNAMIC_LOCK
1043#define KMP_USE_INLINED_TAS \
1044 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
1045#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
1052#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a) m(rtm_spin, a)
1053#define KMP_FOREACH_I_LOCK(m, a) \
1054 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \
1055 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1056 m(nested_queuing, a) m(nested_drdpa, a)
1058#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a) m(rtm_spin, a)
1059#define KMP_FOREACH_I_LOCK(m, a) \
1060 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \
1061 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
1064#define KMP_LAST_D_LOCK lockseq_rtm_spin
1067#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1068#define KMP_FOREACH_I_LOCK(m, a) \
1069 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
1070 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
1071#define KMP_LAST_D_LOCK lockseq_futex
1073#define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
1074#define KMP_FOREACH_I_LOCK(m, a) \
1075 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
1076 m(nested_queuing, a) m(nested_drdpa, a)
1077#define KMP_LAST_D_LOCK lockseq_tas
1082#define KMP_LOCK_SHIFT \
1084#define KMP_FIRST_D_LOCK lockseq_tas
1085#define KMP_FIRST_I_LOCK lockseq_ticket
1086#define KMP_LAST_I_LOCK lockseq_nested_drdpa
1087#define KMP_NUM_I_LOCKS \
1088 (locktag_nested_drdpa + 1)
1091typedef kmp_uint32 kmp_dyna_lock_t;
1096 lockseq_indirect = 0,
1097#define expand_seq(l, a) lockseq_##l,
1098 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
1100} kmp_dyna_lockseq_t;
1104#define expand_tag(l, a) locktag_##l,
1105 KMP_FOREACH_I_LOCK(expand_tag, 0)
1107} kmp_indirect_locktag_t;
1110#define KMP_IS_D_LOCK(seq) \
1111 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
1112#define KMP_IS_I_LOCK(seq) \
1113 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
1114#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1115#define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
1119#define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
1120 KMP_FOREACH_D_LOCK(expand_tag, 0)
1122} kmp_direct_locktag_t;
1126 kmp_user_lock_p lock;
1127 kmp_indirect_locktag_t type;
1128} kmp_indirect_lock_t;
1132extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1133extern void (**__kmp_direct_destroy)(kmp_dyna_lock_t *);
1134extern int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32);
1135extern int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32);
1136extern int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32);
1140extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
1141extern void (**__kmp_indirect_destroy)(kmp_user_lock_p);
1142extern int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32);
1143extern int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32);
1144extern int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32);
1147#define KMP_EXTRACT_D_TAG(l) \
1148 ((kmp_dyna_lock_t)((kmp_base_tas_lock_t *)(l))->poll & \
1149 ((1 << KMP_LOCK_SHIFT) - 1) & \
1150 -((kmp_dyna_lock_t)((kmp_tas_lock_t *)(l))->lk.poll & 1))
1153#define KMP_EXTRACT_I_INDEX(l) \
1154 ((kmp_lock_index_t)((kmp_base_tas_lock_t *)(l))->poll >> 1)
1158#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
1162#define KMP_I_LOCK_FUNC(l, op) \
1163 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
1166#define KMP_INIT_D_LOCK(l, seq) \
1167 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
1170#define KMP_INIT_I_LOCK(l, seq) \
1171 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
1174#define KMP_LOCK_FREE(type) (locktag_##type)
1177#define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
1180#define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
1184extern void __kmp_init_dynamic_user_locks();
1187extern kmp_indirect_lock_t *
1188__kmp_allocate_indirect_lock(
void **, kmp_int32, kmp_indirect_locktag_t);
1191extern void __kmp_cleanup_indirect_user_locks();
1194extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1197extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1199#define KMP_SET_I_LOCK_LOCATION(lck, loc) \
1201 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1202 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1206extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1208#define KMP_SET_I_LOCK_FLAGS(lck, flag) \
1210 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1211 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1215extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1217#define KMP_GET_I_LOCK_LOCATION(lck) \
1218 (__kmp_indirect_get_location[(lck)->type] != NULL \
1219 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1223extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1225#define KMP_GET_I_LOCK_FLAGS(lck) \
1226 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1227 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1231#define KMP_I_LOCK_CHUNK 1024
1233KMP_BUILD_ASSERT(KMP_I_LOCK_CHUNK % 2 == 0);
1235#define KMP_I_LOCK_TABLE_INIT_NROW_PTRS 8
1238typedef struct kmp_indirect_lock_table {
1239 kmp_indirect_lock_t **table;
1240 kmp_uint32 nrow_ptrs;
1241 kmp_lock_index_t next;
1242 struct kmp_indirect_lock_table *next_table;
1243} kmp_indirect_lock_table_t;
1245extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1249static inline kmp_indirect_lock_t *__kmp_get_i_lock(kmp_lock_index_t idx) {
1250 kmp_indirect_lock_table_t *lock_table = &__kmp_i_lock_table;
1251 while (lock_table) {
1252 kmp_lock_index_t max_locks = lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK;
1253 if (idx < max_locks) {
1254 kmp_lock_index_t row = idx / KMP_I_LOCK_CHUNK;
1255 kmp_lock_index_t col = idx % KMP_I_LOCK_CHUNK;
1256 if (!lock_table->table[row] || idx >= lock_table->next)
1258 return &lock_table->table[row][col];
1261 lock_table = lock_table->next_table;
1269extern int __kmp_num_locks_in_block;
1272#define KMP_LOOKUP_I_LOCK(l) \
1273 ((OMP_LOCK_T_SIZE < sizeof(void *)) \
1274 ? __kmp_get_i_lock(KMP_EXTRACT_I_INDEX(l)) \
1275 : *((kmp_indirect_lock_t **)(l)))
1278extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
1282#define KMP_LOCK_BUSY(v, type) (v)
1283#define KMP_LOCK_FREE(type) 0
1284#define KMP_LOCK_STRIP(v) (v)
1291 kmp_uint32 max_backoff;
1292 kmp_uint32 min_tick;
1296extern kmp_backoff_t __kmp_spin_backoff_params;
1299extern void __kmp_spin_backoff(kmp_backoff_t *);