17 #include "kmp_config.h"
27 #ifndef KMP_STATIC_STEAL_ENABLED
28 #define KMP_STATIC_STEAL_ENABLED 1
31 #define TASK_CURRENT_NOT_QUEUED 0
32 #define TASK_CURRENT_QUEUED 1
34 #ifdef BUILD_TIED_TASK_STACK
35 #define TASK_STACK_EMPTY 0
36 #define TASK_STACK_BLOCK_BITS 5
38 #define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
40 #define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
43 #define TASK_NOT_PUSHED 1
44 #define TASK_SUCCESSFULLY_PUSHED 0
47 #define TASK_EXPLICIT 1
48 #define TASK_IMPLICIT 0
51 #define TASK_DETACHABLE 1
52 #define TASK_UNDETACHABLE 0
54 #define KMP_CANCEL_THREADS
55 #define KMP_THREAD_ATTR
59 #if defined(__ANDROID__)
60 #undef KMP_CANCEL_THREADS
70 #include <type_traits>
74 #include <sys/types.h>
83 #include "kmp_safe_c_api.h"
89 #if KMP_USE_HIER_SCHED
91 #undef KMP_USE_HIER_SCHED
92 #define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
95 #if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
97 #ifndef HWLOC_OBJ_NUMANODE
98 #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
100 #ifndef HWLOC_OBJ_PACKAGE
101 #define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
103 #if HWLOC_API_VERSION >= 0x00020000
105 typedef int kmp_hwloc_depth_t;
107 typedef unsigned int kmp_hwloc_depth_t;
111 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
112 #include <xmmintrin.h>
115 #include "kmp_debug.h"
116 #include "kmp_lock.h"
117 #include "kmp_version.h"
118 #include "kmp_barrier.h"
120 #include "kmp_debugger.h"
122 #include "kmp_i18n.h"
124 #define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
126 #include "kmp_wrapper_malloc.h"
129 #if !defined NSIG && defined _NSIG
135 #pragma weak clock_gettime
139 #include "ompt-internal.h"
143 #include "ompd-specific.h"
147 #define UNLIKELY(x) (x)
156 #ifndef USE_FAST_MEMORY
157 #define USE_FAST_MEMORY 3
160 #ifndef KMP_NESTED_HOT_TEAMS
161 #define KMP_NESTED_HOT_TEAMS 0
162 #define USE_NESTED_HOT_ARG(x)
164 #if KMP_NESTED_HOT_TEAMS
165 #define USE_NESTED_HOT_ARG(x) , x
167 #define USE_NESTED_HOT_ARG(x)
172 #ifndef USE_CMP_XCHG_FOR_BGET
173 #define USE_CMP_XCHG_FOR_BGET 1
181 #define KMP_NSEC_PER_SEC 1000000000L
182 #define KMP_USEC_PER_SEC 1000000L
206 KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
207 KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
208 KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
210 KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
211 KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
224 KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
225 KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
226 KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
227 KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
228 KMP_IDENT_OPENMP_SPEC_VERSION_MASK = 0xFF000000
249 kmp_int32 get_openmp_version() {
250 return (((
flags & KMP_IDENT_OPENMP_SPEC_VERSION_MASK) >> 24) & 0xFF);
258 typedef union kmp_team kmp_team_t;
259 typedef struct kmp_taskdata kmp_taskdata_t;
260 typedef union kmp_task_team kmp_task_team_t;
261 typedef union kmp_team kmp_team_p;
262 typedef union kmp_info kmp_info_p;
263 typedef union kmp_root kmp_root_p;
265 template <
bool C = false,
bool S = true>
class kmp_flag_32;
266 template <
bool C = false,
bool S = true>
class kmp_flag_64;
267 template <
bool C = false,
bool S = true>
class kmp_atomic_flag_64;
268 class kmp_flag_oncore;
278 #define KMP_PACK_64(HIGH_32, LOW_32) \
279 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
282 #define SKIP_WS(_x) \
284 while (*(_x) == ' ' || *(_x) == '\t') \
287 #define SKIP_DIGITS(_x) \
289 while (*(_x) >= '0' && *(_x) <= '9') \
292 #define SKIP_TOKEN(_x) \
294 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
295 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
298 #define SKIP_TO(_x, _c) \
300 while (*(_x) != '\0' && *(_x) != (_c)) \
306 #define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
307 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
312 enum kmp_state_timer {
322 #ifdef USE_LOAD_BALANCE
323 dynamic_load_balance,
326 dynamic_thread_limit,
332 #ifndef KMP_SCHED_TYPE_DEFINED
333 #define KMP_SCHED_TYPE_DEFINED
334 typedef enum kmp_sched {
337 kmp_sched_static = 1,
338 kmp_sched_dynamic = 2,
339 kmp_sched_guided = 3,
341 kmp_sched_upper_std = 5,
342 kmp_sched_lower_ext = 100,
343 kmp_sched_trapezoidal = 101,
344 #if KMP_STATIC_STEAL_ENABLED
345 kmp_sched_static_steal = 102,
348 kmp_sched_default = kmp_sched_static,
349 kmp_sched_monotonic = 0x80000000
359 kmp_sch_static_chunked = 33,
361 kmp_sch_dynamic_chunked = 35,
363 kmp_sch_runtime = 37,
365 kmp_sch_trapezoidal = 39,
368 kmp_sch_static_greedy = 40,
369 kmp_sch_static_balanced = 41,
371 kmp_sch_guided_iterative_chunked = 42,
372 kmp_sch_guided_analytical_chunked = 43,
374 kmp_sch_static_steal = 44,
377 kmp_sch_static_balanced_chunked = 45,
385 kmp_ord_static_chunked = 65,
387 kmp_ord_dynamic_chunked = 67,
388 kmp_ord_guided_chunked = 68,
389 kmp_ord_runtime = 69,
391 kmp_ord_trapezoidal = 71,
404 kmp_nm_static_chunked =
407 kmp_nm_dynamic_chunked = 163,
409 kmp_nm_runtime = 165,
411 kmp_nm_trapezoidal = 167,
414 kmp_nm_static_greedy = 168,
415 kmp_nm_static_balanced = 169,
417 kmp_nm_guided_iterative_chunked = 170,
418 kmp_nm_guided_analytical_chunked = 171,
419 kmp_nm_static_steal =
422 kmp_nm_ord_static_chunked = 193,
424 kmp_nm_ord_dynamic_chunked = 195,
425 kmp_nm_ord_guided_chunked = 196,
426 kmp_nm_ord_runtime = 197,
428 kmp_nm_ord_trapezoidal = 199,
450 #define SCHEDULE_WITHOUT_MODIFIERS(s) \
453 #define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
454 #define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
455 #define SCHEDULE_HAS_NO_MODIFIERS(s) \
456 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
457 #define SCHEDULE_GET_MODIFIERS(s) \
458 ((enum sched_type)( \
459 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
460 #define SCHEDULE_SET_MODIFIERS(s, m) \
461 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
462 #define SCHEDULE_NONMONOTONIC 0
463 #define SCHEDULE_MONOTONIC 1
470 __kmp_sched_apply_mods_stdkind(kmp_sched_t *kind,
472 if (SCHEDULE_HAS_MONOTONIC(internal_kind)) {
473 *kind = (kmp_sched_t)((
int)*kind | (int)kmp_sched_monotonic);
479 __kmp_sched_apply_mods_intkind(kmp_sched_t kind,
481 if ((
int)kind & (
int)kmp_sched_monotonic) {
482 *internal_kind = (
enum sched_type)((
int)*internal_kind |
488 static inline kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind) {
489 return (kmp_sched_t)((int)kind & ~((
int)kmp_sched_monotonic));
493 typedef union kmp_r_sched {
512 enum clock_function_type {
513 clock_function_gettimeofday,
514 clock_function_clock_gettime
518 #if KMP_MIC_SUPPORTED
519 enum mic_type { non_mic, mic1, mic2, mic3, dummy };
524 #undef KMP_FAST_REDUCTION_BARRIER
525 #define KMP_FAST_REDUCTION_BARRIER 1
527 #undef KMP_FAST_REDUCTION_CORE_DUO
528 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
529 #define KMP_FAST_REDUCTION_CORE_DUO 1
532 enum _reduction_method {
533 reduction_method_not_defined = 0,
534 critical_reduce_block = (1 << 8),
535 atomic_reduce_block = (2 << 8),
536 tree_reduce_block = (3 << 8),
537 empty_reduce_block = (4 << 8)
552 #if KMP_FAST_REDUCTION_BARRIER
553 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
554 ((reduction_method) | (barrier_type))
556 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
557 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
559 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
560 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
562 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
565 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
566 (packed_reduction_method)
568 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
571 #define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
572 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
573 (which_reduction_block))
575 #if KMP_FAST_REDUCTION_BARRIER
576 #define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
577 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
579 #define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
580 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
583 typedef int PACKED_REDUCTION_METHOD_T;
590 #pragma warning(push)
591 #pragma warning(disable : 271 310)
604 enum kmp_hw_t :
int {
621 #define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
622 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
623 #define KMP_ASSERT_VALID_HW_TYPE(type) \
624 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
626 #define KMP_FOREACH_HW_TYPE(type) \
627 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
628 type = (kmp_hw_t)((int)type + 1))
630 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural =
false);
631 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural =
false);
634 #if KMP_AFFINITY_SUPPORTED
638 #if _MSC_VER < 1600 && KMP_MSVC_COMPAT
639 typedef struct GROUP_AFFINITY {
645 #if KMP_GROUP_AFFINITY
646 extern int __kmp_num_proc_groups;
648 static const int __kmp_num_proc_groups = 1;
650 typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
651 extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
653 typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
654 extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
656 typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
657 extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
659 typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
661 extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
665 extern hwloc_topology_t __kmp_hwloc_topology;
666 extern int __kmp_hwloc_error;
669 extern size_t __kmp_affin_mask_size;
670 #define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
671 #define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
672 #define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
673 #define KMP_CPU_SET_ITERATE(i, mask) \
674 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
675 #define KMP_CPU_SET(i, mask) (mask)->set(i)
676 #define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
677 #define KMP_CPU_CLR(i, mask) (mask)->clear(i)
678 #define KMP_CPU_ZERO(mask) (mask)->zero()
679 #define KMP_CPU_COPY(dest, src) (dest)->copy(src)
680 #define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
681 #define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
682 #define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
683 #define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
684 #define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
685 #define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
686 #define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
687 #define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
688 #define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
689 #define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
690 #define KMP_CPU_ALLOC_ARRAY(arr, n) \
691 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
692 #define KMP_CPU_FREE_ARRAY(arr, n) \
693 __kmp_affinity_dispatch->deallocate_mask_array(arr)
694 #define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
695 #define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
696 #define __kmp_get_system_affinity(mask, abort_bool) \
697 (mask)->get_system_affinity(abort_bool)
698 #define __kmp_set_system_affinity(mask, abort_bool) \
699 (mask)->set_system_affinity(abort_bool)
700 #define __kmp_get_proc_group(mask) (mask)->get_proc_group()
706 void *
operator new(
size_t n);
707 void operator delete(
void *p);
708 void *
operator new[](
size_t n);
709 void operator delete[](
void *p);
712 virtual void set(
int i) {}
714 virtual bool is_set(
int i)
const {
return false; }
716 virtual void clear(
int i) {}
718 virtual void zero() {}
720 virtual void copy(
const Mask *src) {}
722 virtual void bitwise_and(
const Mask *rhs) {}
724 virtual void bitwise_or(
const Mask *rhs) {}
726 virtual void bitwise_not() {}
729 virtual int begin()
const {
return 0; }
730 virtual int end()
const {
return 0; }
731 virtual int next(
int previous)
const {
return 0; }
733 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
736 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
738 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
741 virtual int get_proc_group()
const {
return -1; }
743 void *
operator new(
size_t n);
744 void operator delete(
void *p);
746 virtual ~KMPAffinity() =
default;
748 virtual void determine_capable(
const char *env_var) {}
750 virtual void bind_thread(
int proc) {}
752 virtual Mask *allocate_mask() {
return nullptr; }
753 virtual void deallocate_mask(Mask *m) {}
754 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
755 virtual void deallocate_mask_array(Mask *m) {}
756 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
757 static void pick_api();
758 static void destroy_api();
766 virtual api_type get_api_type()
const {
772 static bool picked_api;
775 typedef KMPAffinity::Mask kmp_affin_mask_t;
776 extern KMPAffinity *__kmp_affinity_dispatch;
780 #define KMP_AFFIN_MASK_PRINT_LEN 1024
794 enum affinity_top_method {
795 affinity_top_method_all = 0,
796 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
797 affinity_top_method_apicid,
798 affinity_top_method_x2apicid,
799 affinity_top_method_x2apicid_1f,
801 affinity_top_method_cpuinfo,
802 #if KMP_GROUP_AFFINITY
803 affinity_top_method_group,
805 affinity_top_method_flat,
807 affinity_top_method_hwloc,
809 affinity_top_method_default
812 #define affinity_respect_mask_default (-1)
814 extern enum affinity_type __kmp_affinity_type;
815 extern kmp_hw_t __kmp_affinity_gran;
816 extern int __kmp_affinity_gran_levels;
817 extern int __kmp_affinity_dups;
818 extern enum affinity_top_method __kmp_affinity_top_method;
819 extern int __kmp_affinity_compact;
820 extern int __kmp_affinity_offset;
821 extern int __kmp_affinity_verbose;
822 extern int __kmp_affinity_warnings;
823 extern int __kmp_affinity_respect_mask;
824 extern char *__kmp_affinity_proclist;
825 extern kmp_affin_mask_t *__kmp_affinity_masks;
826 extern unsigned __kmp_affinity_num_masks;
827 extern void __kmp_affinity_bind_thread(
int which);
829 extern kmp_affin_mask_t *__kmp_affin_fullMask;
830 extern char *__kmp_cpuinfo_file;
835 typedef enum kmp_proc_bind_t {
845 typedef struct kmp_nested_proc_bind_t {
846 kmp_proc_bind_t *bind_types;
849 } kmp_nested_proc_bind_t;
851 extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
853 extern int __kmp_display_affinity;
854 extern char *__kmp_affinity_format;
855 static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
857 extern int __kmp_tool;
858 extern char *__kmp_tool_libraries;
861 #if KMP_AFFINITY_SUPPORTED
862 #define KMP_PLACE_ALL (-1)
863 #define KMP_PLACE_UNDEFINED (-2)
865 #define KMP_AFFINITY_NON_PROC_BIND \
866 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
867 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
868 (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
871 extern int __kmp_affinity_num_places;
873 typedef enum kmp_cancel_kind_t {
882 typedef struct kmp_hws_item {
887 extern kmp_hws_item_t __kmp_hws_socket;
888 extern kmp_hws_item_t __kmp_hws_die;
889 extern kmp_hws_item_t __kmp_hws_node;
890 extern kmp_hws_item_t __kmp_hws_tile;
891 extern kmp_hws_item_t __kmp_hws_core;
892 extern kmp_hws_item_t __kmp_hws_proc;
893 extern int __kmp_hws_requested;
894 extern int __kmp_hws_abs_flag;
898 #define KMP_PAD(type, sz) \
899 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
903 #define KMP_GTID_DNE (-2)
904 #define KMP_GTID_SHUTDOWN (-3)
905 #define KMP_GTID_MONITOR (-4)
906 #define KMP_GTID_UNKNOWN (-5)
907 #define KMP_GTID_MIN (-6)
913 typedef uintptr_t omp_uintptr_t;
916 omp_atk_sync_hint = 1,
917 omp_atk_alignment = 2,
919 omp_atk_pool_size = 4,
920 omp_atk_fallback = 5,
923 omp_atk_partition = 8
924 } omp_alloctrait_key_t;
929 omp_atv_contended = 3,
930 omp_atv_uncontended = 4,
931 omp_atv_serialized = 5,
932 omp_atv_sequential = omp_atv_serialized,
938 omp_atv_default_mem_fb = 11,
939 omp_atv_null_fb = 12,
940 omp_atv_abort_fb = 13,
941 omp_atv_allocator_fb = 14,
942 omp_atv_environment = 15,
943 omp_atv_nearest = 16,
944 omp_atv_blocked = 17,
945 omp_atv_interleaved = 18
946 } omp_alloctrait_value_t;
947 #define omp_atv_default ((omp_uintptr_t)-1)
949 typedef void *omp_memspace_handle_t;
950 extern omp_memspace_handle_t
const omp_default_mem_space;
951 extern omp_memspace_handle_t
const omp_large_cap_mem_space;
952 extern omp_memspace_handle_t
const omp_const_mem_space;
953 extern omp_memspace_handle_t
const omp_high_bw_mem_space;
954 extern omp_memspace_handle_t
const omp_low_lat_mem_space;
956 extern omp_memspace_handle_t
const llvm_omp_target_host_mem_space;
957 extern omp_memspace_handle_t
const llvm_omp_target_shared_mem_space;
958 extern omp_memspace_handle_t
const llvm_omp_target_device_mem_space;
961 omp_alloctrait_key_t key;
965 typedef void *omp_allocator_handle_t;
966 extern omp_allocator_handle_t
const omp_null_allocator;
967 extern omp_allocator_handle_t
const omp_default_mem_alloc;
968 extern omp_allocator_handle_t
const omp_large_cap_mem_alloc;
969 extern omp_allocator_handle_t
const omp_const_mem_alloc;
970 extern omp_allocator_handle_t
const omp_high_bw_mem_alloc;
971 extern omp_allocator_handle_t
const omp_low_lat_mem_alloc;
972 extern omp_allocator_handle_t
const omp_cgroup_mem_alloc;
973 extern omp_allocator_handle_t
const omp_pteam_mem_alloc;
974 extern omp_allocator_handle_t
const omp_thread_mem_alloc;
976 extern omp_allocator_handle_t
const llvm_omp_target_host_mem_alloc;
977 extern omp_allocator_handle_t
const llvm_omp_target_shared_mem_alloc;
978 extern omp_allocator_handle_t
const llvm_omp_target_device_mem_alloc;
979 extern omp_allocator_handle_t
const kmp_max_mem_alloc;
980 extern omp_allocator_handle_t __kmp_def_allocator;
985 extern int __kmp_memkind_available;
987 typedef omp_memspace_handle_t kmp_memspace_t;
989 typedef struct kmp_allocator_t {
990 omp_memspace_handle_t memspace;
993 omp_alloctrait_value_t fb;
994 kmp_allocator_t *fb_data;
995 kmp_uint64 pool_size;
996 kmp_uint64 pool_used;
999 extern omp_allocator_handle_t __kmpc_init_allocator(
int gtid,
1000 omp_memspace_handle_t,
1002 omp_alloctrait_t traits[]);
1003 extern void __kmpc_destroy_allocator(
int gtid, omp_allocator_handle_t al);
1004 extern void __kmpc_set_default_allocator(
int gtid, omp_allocator_handle_t al);
1005 extern omp_allocator_handle_t __kmpc_get_default_allocator(
int gtid);
1006 extern void *__kmpc_alloc(
int gtid,
size_t sz, omp_allocator_handle_t al);
1007 extern void *__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1008 omp_allocator_handle_t al);
1009 extern void *__kmpc_realloc(
int gtid,
void *ptr,
size_t sz,
1010 omp_allocator_handle_t al,
1011 omp_allocator_handle_t free_al);
1012 extern void __kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1014 extern void __kmp_init_memkind();
1015 extern void __kmp_fini_memkind();
1016 extern void __kmp_init_target_mem();
1020 #define KMP_UINT64_MAX \
1021 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1023 #define KMP_MIN_NTH 1
1026 #if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1027 #define KMP_MAX_NTH PTHREAD_THREADS_MAX
1029 #define KMP_MAX_NTH INT_MAX
1033 #ifdef PTHREAD_STACK_MIN
1034 #define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
1036 #define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1039 #define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1042 #define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1043 #elif KMP_ARCH_X86_64
1044 #define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1045 #define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1047 #define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1050 #define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1051 #define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1052 #define KMP_MAX_MALLOC_POOL_INCR \
1053 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1055 #define KMP_MIN_STKOFFSET (0)
1056 #define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1058 #define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1060 #define KMP_DEFAULT_STKOFFSET CACHE_LINE
1063 #define KMP_MIN_STKPADDING (0)
1064 #define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1066 #define KMP_BLOCKTIME_MULTIPLIER \
1068 #define KMP_MIN_BLOCKTIME (0)
1069 #define KMP_MAX_BLOCKTIME \
1071 #define KMP_DEFAULT_BLOCKTIME (200)
1074 #define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1075 #define KMP_MIN_MONITOR_WAKEUPS (1)
1076 #define KMP_MAX_MONITOR_WAKEUPS (1000)
1080 #define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1081 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1082 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1083 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1084 ? (monitor_wakeups) \
1085 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1089 #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1090 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1091 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1093 #define KMP_BLOCKTIME(team, tid) \
1094 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1095 #if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1097 extern kmp_uint64 __kmp_ticks_per_msec;
1098 #if KMP_COMPILER_ICC
1099 #define KMP_NOW() ((kmp_uint64)_rdtsc())
1101 #define KMP_NOW() __kmp_hardware_timestamp()
1103 #define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
1104 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
1105 (KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
1106 #define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1109 extern kmp_uint64 __kmp_now_nsec();
1110 #define KMP_NOW() __kmp_now_nsec()
1111 #define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
1112 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
1113 (KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
1114 #define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1118 #define KMP_MIN_STATSCOLS 40
1119 #define KMP_MAX_STATSCOLS 4096
1120 #define KMP_DEFAULT_STATSCOLS 80
1122 #define KMP_MIN_INTERVAL 0
1123 #define KMP_MAX_INTERVAL (INT_MAX - 1)
1124 #define KMP_DEFAULT_INTERVAL 0
1126 #define KMP_MIN_CHUNK 1
1127 #define KMP_MAX_CHUNK (INT_MAX - 1)
1128 #define KMP_DEFAULT_CHUNK 1
1130 #define KMP_MIN_DISP_NUM_BUFF 1
1131 #define KMP_DFLT_DISP_NUM_BUFF 7
1132 #define KMP_MAX_DISP_NUM_BUFF 4096
1134 #define KMP_MAX_ORDERED 8
1136 #define KMP_MAX_FIELDS 32
1138 #define KMP_MAX_BRANCH_BITS 31
1140 #define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1142 #define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1144 #define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1149 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1150 #define KMP_TLS_GTID_MIN 5
1152 #define KMP_TLS_GTID_MIN INT_MAX
1155 #define KMP_MASTER_TID(tid) (0 == (tid))
1156 #define KMP_WORKER_TID(tid) (0 != (tid))
1158 #define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1159 #define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1160 #define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1164 #define TRUE (!FALSE)
1170 #define KMP_INIT_WAIT 64U
1171 #define KMP_NEXT_WAIT 32U
1173 #define KMP_INIT_WAIT 1024U
1174 #define KMP_NEXT_WAIT 512U
1177 #define KMP_INIT_WAIT 1024U
1178 #define KMP_NEXT_WAIT 512U
1179 #elif KMP_OS_DRAGONFLY
1181 #define KMP_INIT_WAIT 1024U
1182 #define KMP_NEXT_WAIT 512U
1183 #elif KMP_OS_FREEBSD
1185 #define KMP_INIT_WAIT 1024U
1186 #define KMP_NEXT_WAIT 512U
1189 #define KMP_INIT_WAIT 1024U
1190 #define KMP_NEXT_WAIT 512U
1193 #define KMP_INIT_WAIT 1024U
1194 #define KMP_NEXT_WAIT 512U
1195 #elif KMP_OS_OPENBSD
1197 #define KMP_INIT_WAIT 1024U
1198 #define KMP_NEXT_WAIT 512U
1201 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1202 typedef struct kmp_cpuid {
1209 typedef struct kmp_cpuinfo {
1221 kmp_uint64 frequency;
1222 char name[3 *
sizeof(kmp_cpuid_t)];
1225 extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
1230 static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *p) {
1231 __asm__ __volatile__(
"cpuid"
1232 :
"=a"(p->eax),
"=b"(p->ebx),
"=c"(p->ecx),
"=d"(p->edx)
1233 :
"a"(leaf),
"c"(subleaf));
1236 static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p) {
1237 __asm__ __volatile__(
"fldcw %0" : :
"m"(*p));
1240 static inline void __kmp_store_x87_fpu_control_word(kmp_int16 *p) {
1241 __asm__ __volatile__(
"fstcw %0" :
"=m"(*p));
1243 static inline void __kmp_clear_x87_fpu_status_word() {
1246 struct x87_fpu_state {
1255 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1256 __asm__ __volatile__(
"fstenv %0\n\t"
1257 "andw $0x7f00, %1\n\t"
1259 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1261 __asm__ __volatile__(
"fnclex");
1265 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1266 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1268 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) {}
1269 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = 0; }
1273 extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *p);
1274 extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p);
1275 extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
1276 extern void __kmp_clear_x87_fpu_status_word();
1277 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1278 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1281 #define KMP_X86_MXCSR_MASK 0xffffffc0
1284 extern void __kmp_x86_pause(
void);
1290 static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1292 static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1294 #define KMP_CPU_PAUSE() __kmp_x86_pause()
1295 #elif KMP_ARCH_PPC64
1296 #define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1297 #define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1298 #define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1299 #define KMP_CPU_PAUSE() \
1301 KMP_PPC64_PRI_LOW(); \
1302 KMP_PPC64_PRI_MED(); \
1303 KMP_PPC64_PRI_LOC_MB(); \
1306 #define KMP_CPU_PAUSE()
1309 #define KMP_INIT_YIELD(count) \
1310 { (count) = __kmp_yield_init; }
1312 #define KMP_OVERSUBSCRIBED \
1313 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1315 #define KMP_TRY_YIELD \
1316 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1318 #define KMP_TRY_YIELD_OVERSUB \
1319 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1321 #define KMP_YIELD(cond) \
1324 if ((cond) && (KMP_TRY_YIELD)) \
1328 #define KMP_YIELD_OVERSUB() \
1331 if ((KMP_TRY_YIELD_OVERSUB)) \
1337 #define KMP_YIELD_SPIN(count) \
1340 if (KMP_TRY_YIELD) { \
1344 (count) = __kmp_yield_next; \
1349 #define KMP_YIELD_OVERSUB_ELSE_SPIN(count) \
1352 if ((KMP_TRY_YIELD_OVERSUB)) \
1354 else if (__kmp_use_yield == 1) { \
1358 (count) = __kmp_yield_next; \
1366 #if KMP_HAVE_WAITPKG_INTRINSICS
1367 #if KMP_HAVE_IMMINTRIN_H
1368 #include <immintrin.h>
1369 #elif KMP_HAVE_INTRIN_H
1373 KMP_ATTRIBUTE_TARGET_WAITPKG
1374 static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
1375 #if !KMP_HAVE_WAITPKG_INTRINSICS
1376 uint32_t timeHi = uint32_t(counter >> 32);
1377 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1379 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1382 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1386 return _tpause(hint, counter);
1389 KMP_ATTRIBUTE_TARGET_WAITPKG
1390 static inline void __kmp_umonitor(
void *cacheline) {
1391 #if !KMP_HAVE_WAITPKG_INTRINSICS
1392 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1397 _umonitor(cacheline);
1400 KMP_ATTRIBUTE_TARGET_WAITPKG
1401 static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
1402 #if !KMP_HAVE_WAITPKG_INTRINSICS
1403 uint32_t timeHi = uint32_t(counter >> 32);
1404 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1406 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1409 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1413 return _umwait(hint, counter);
1416 #elif KMP_HAVE_MWAIT
1418 #include <pmmintrin.h>
1423 __attribute__((target(
"sse3")))
1426 __kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1427 _mm_monitor(cacheline, extensions, hints);
1430 __attribute__((target(
"sse3")))
1433 __kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1434 _mm_mwait(extensions, hints);
1452 ct_ordered_in_parallel,
1460 #define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1464 enum cons_type type;
1470 struct cons_header {
1471 int p_top, w_top, s_top;
1472 int stack_size, stack_top;
1473 struct cons_data *stack_data;
1476 struct kmp_region_info {
1478 int offset[KMP_MAX_FIELDS];
1479 int length[KMP_MAX_FIELDS];
1486 typedef HANDLE kmp_thread_t;
1487 typedef DWORD kmp_key_t;
1491 typedef pthread_t kmp_thread_t;
1492 typedef pthread_key_t kmp_key_t;
1495 extern kmp_key_t __kmp_gtid_threadprivate_key;
1497 typedef struct kmp_sys_info {
1511 typedef int kmp_itt_mark_t;
1512 #define KMP_ITT_DEBUG 0
1515 typedef kmp_int32 kmp_critical_name[8];
1526 typedef void (*
kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1527 typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1544 typedef void *(*kmpc_ctor)(
void *);
1557 typedef void *(*kmpc_cctor)(
void *,
void *);
1567 typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1579 typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1587 typedef struct kmp_cached_addr {
1589 void ***compiler_cache;
1591 struct kmp_cached_addr *next;
1592 } kmp_cached_addr_t;
1594 struct private_data {
1595 struct private_data *next;
1601 struct private_common {
1602 struct private_common *next;
1603 struct private_common *link;
1609 struct shared_common {
1610 struct shared_common *next;
1611 struct private_data *pod_init;
1631 #define KMP_HASH_TABLE_LOG2 9
1632 #define KMP_HASH_TABLE_SIZE \
1633 (1 << KMP_HASH_TABLE_LOG2)
1634 #define KMP_HASH_SHIFT 3
1635 #define KMP_HASH(x) \
1636 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1638 struct common_table {
1639 struct private_common *data[KMP_HASH_TABLE_SIZE];
1642 struct shared_table {
1643 struct shared_common *data[KMP_HASH_TABLE_SIZE];
1648 #if KMP_USE_HIER_SCHED
1651 typedef struct kmp_hier_private_bdata_t {
1652 kmp_int32 num_active;
1654 kmp_uint64 wait_val[2];
1655 } kmp_hier_private_bdata_t;
1658 typedef struct kmp_sched_flags {
1659 unsigned ordered : 1;
1660 unsigned nomerge : 1;
1661 unsigned contains_last : 1;
1662 #if KMP_USE_HIER_SCHED
1663 unsigned use_hier : 1;
1664 unsigned unused : 28;
1666 unsigned unused : 29;
1668 } kmp_sched_flags_t;
1670 KMP_BUILD_ASSERT(
sizeof(kmp_sched_flags_t) == 4);
1672 #if KMP_STATIC_STEAL_ENABLED
1673 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1680 kmp_lock_t *steal_lock;
1687 struct KMP_ALIGN(32) {
1694 kmp_uint32 ordered_lower;
1695 kmp_uint32 ordered_upper;
1697 kmp_int32 last_upper;
1699 } dispatch_private_info32_t;
1701 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1708 kmp_lock_t *steal_lock;
1717 struct KMP_ALIGN(32) {
1724 kmp_uint64 ordered_lower;
1725 kmp_uint64 ordered_upper;
1727 kmp_int64 last_upper;
1729 } dispatch_private_info64_t;
1731 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1744 kmp_uint32 ordered_lower;
1745 kmp_uint32 ordered_upper;
1747 kmp_int32 last_upper;
1749 } dispatch_private_info32_t;
1751 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1765 kmp_uint64 ordered_lower;
1766 kmp_uint64 ordered_upper;
1768 kmp_int64 last_upper;
1770 } dispatch_private_info64_t;
1773 typedef struct KMP_ALIGN_CACHE dispatch_private_info {
1774 union private_info {
1775 dispatch_private_info32_t p32;
1776 dispatch_private_info64_t p64;
1779 kmp_sched_flags_t flags;
1780 std::atomic<kmp_uint32> steal_flag;
1781 kmp_int32 ordered_bumped;
1783 struct dispatch_private_info *next;
1784 kmp_int32 type_size;
1785 #if KMP_USE_HIER_SCHED
1789 enum cons_type pushed_ws;
1790 } dispatch_private_info_t;
1792 typedef struct dispatch_shared_info32 {
1795 volatile kmp_uint32 iteration;
1796 volatile kmp_int32 num_done;
1797 volatile kmp_uint32 ordered_iteration;
1799 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
1800 } dispatch_shared_info32_t;
1802 typedef struct dispatch_shared_info64 {
1805 volatile kmp_uint64 iteration;
1806 volatile kmp_int64 num_done;
1807 volatile kmp_uint64 ordered_iteration;
1809 kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
1810 } dispatch_shared_info64_t;
1812 typedef struct dispatch_shared_info {
1814 dispatch_shared_info32_t s32;
1815 dispatch_shared_info64_t s64;
1817 volatile kmp_uint32 buffer_index;
1818 volatile kmp_int32 doacross_buf_idx;
1819 volatile kmp_uint32 *doacross_flags;
1820 kmp_int32 doacross_num_done;
1821 #if KMP_USE_HIER_SCHED
1830 } dispatch_shared_info_t;
1832 typedef struct kmp_disp {
1834 void (*th_deo_fcn)(
int *gtid,
int *cid,
ident_t *);
1836 void (*th_dxo_fcn)(
int *gtid,
int *cid,
ident_t *);
1838 dispatch_shared_info_t *th_dispatch_sh_current;
1839 dispatch_private_info_t *th_dispatch_pr_current;
1841 dispatch_private_info_t *th_disp_buffer;
1842 kmp_uint32 th_disp_index;
1843 kmp_int32 th_doacross_buf_idx;
1844 volatile kmp_uint32 *th_doacross_flags;
1845 kmp_int64 *th_doacross_info;
1846 #if KMP_USE_INTERNODE_ALIGNMENT
1847 char more_padding[INTERNODE_CACHE_LINE];
1855 #define KMP_INIT_BARRIER_STATE 0
1856 #define KMP_BARRIER_SLEEP_BIT 0
1857 #define KMP_BARRIER_UNUSED_BIT 1
1858 #define KMP_BARRIER_BUMP_BIT 2
1860 #define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
1861 #define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
1862 #define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
1864 #if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
1865 #error "Barrier sleep bit must be smaller than barrier bump bit"
1867 #if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
1868 #error "Barrier unused bit must be smaller than barrier bump bit"
1872 #define KMP_BARRIER_NOT_WAITING 0
1873 #define KMP_BARRIER_OWN_FLAG \
1875 #define KMP_BARRIER_PARENT_FLAG \
1877 #define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
1879 #define KMP_BARRIER_SWITCHING \
1882 #define KMP_NOT_SAFE_TO_REAP \
1884 #define KMP_SAFE_TO_REAP 1
1896 bs_plain_barrier = 0,
1898 bs_forkjoin_barrier,
1899 #if KMP_FAST_REDUCTION_BARRIER
1900 bs_reduction_barrier,
1906 #if !KMP_FAST_REDUCTION_BARRIER
1907 #define bs_reduction_barrier bs_plain_barrier
1910 typedef enum kmp_bar_pat {
1917 bp_hierarchical_bar = 3,
1922 #define KMP_BARRIER_ICV_PUSH 1
1925 typedef struct kmp_internal_control {
1926 int serial_nesting_level;
1939 int max_active_levels;
1942 kmp_proc_bind_t proc_bind;
1943 kmp_int32 default_device;
1944 struct kmp_internal_control *next;
1945 } kmp_internal_control_t;
1947 static inline void copy_icvs(kmp_internal_control_t *dst,
1948 kmp_internal_control_t *src) {
1953 typedef struct KMP_ALIGN_CACHE kmp_bstate {
1958 kmp_internal_control_t th_fixed_icvs;
1961 volatile kmp_uint64 b_go;
1962 KMP_ALIGN_CACHE
volatile kmp_uint64
1964 kmp_uint32 *skip_per_level;
1965 kmp_uint32 my_level;
1966 kmp_int32 parent_tid;
1969 struct kmp_bstate *parent_bar;
1971 kmp_uint64 leaf_state;
1973 kmp_uint8 base_leaf_kids;
1974 kmp_uint8 leaf_kids;
1976 kmp_uint8 wait_flag;
1977 kmp_uint8 use_oncore_barrier;
1982 KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
1986 union KMP_ALIGN_CACHE kmp_barrier_union {
1988 char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
1992 typedef union kmp_barrier_union kmp_balign_t;
1995 union KMP_ALIGN_CACHE kmp_barrier_team_union {
1997 char b_pad[CACHE_LINE];
1999 kmp_uint64 b_arrived;
2005 kmp_uint b_master_arrived;
2006 kmp_uint b_team_arrived;
2011 typedef union kmp_barrier_team_union kmp_balign_team_t;
2018 typedef struct kmp_win32_mutex {
2020 CRITICAL_SECTION cs;
2021 } kmp_win32_mutex_t;
2023 typedef struct kmp_win32_cond {
2028 kmp_win32_mutex_t waiters_count_lock_;
2035 int wait_generation_count_;
2044 union KMP_ALIGN_CACHE kmp_cond_union {
2046 char c_pad[CACHE_LINE];
2047 pthread_cond_t c_cond;
2050 typedef union kmp_cond_union kmp_cond_align_t;
2052 union KMP_ALIGN_CACHE kmp_mutex_union {
2054 char m_pad[CACHE_LINE];
2055 pthread_mutex_t m_mutex;
2058 typedef union kmp_mutex_union kmp_mutex_align_t;
2062 typedef struct kmp_desc_base {
2064 size_t ds_stacksize;
2066 kmp_thread_t ds_thread;
2067 volatile int ds_tid;
2070 volatile int ds_alive;
2087 typedef union KMP_ALIGN_CACHE kmp_desc {
2089 char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
2093 typedef struct kmp_local {
2094 volatile int this_construct;
2099 #if !USE_CMP_XCHG_FOR_BGET
2100 #ifdef USE_QUEUING_LOCK_FOR_BGET
2101 kmp_lock_t bget_lock;
2103 kmp_bootstrap_lock_t bget_lock;
2110 PACKED_REDUCTION_METHOD_T
2111 packed_reduction_method;
2116 #define KMP_CHECK_UPDATE(a, b) \
2119 #define KMP_CHECK_UPDATE_SYNC(a, b) \
2121 TCW_SYNC_PTR((a), (b))
2123 #define get__blocktime(xteam, xtid) \
2124 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2125 #define get__bt_set(xteam, xtid) \
2126 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2128 #define get__bt_intervals(xteam, xtid) \
2129 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2132 #define get__dynamic_2(xteam, xtid) \
2133 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2134 #define get__nproc_2(xteam, xtid) \
2135 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2136 #define get__sched_2(xteam, xtid) \
2137 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2139 #define set__blocktime_team(xteam, xtid, xval) \
2140 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2144 #define set__bt_intervals_team(xteam, xtid, xval) \
2145 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2149 #define set__bt_set_team(xteam, xtid, xval) \
2150 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2152 #define set__dynamic(xthread, xval) \
2153 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2154 #define get__dynamic(xthread) \
2155 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2157 #define set__nproc(xthread, xval) \
2158 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2160 #define set__thread_limit(xthread, xval) \
2161 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2163 #define set__max_active_levels(xthread, xval) \
2164 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2166 #define get__max_active_levels(xthread) \
2167 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2169 #define set__sched(xthread, xval) \
2170 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2172 #define set__proc_bind(xthread, xval) \
2173 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2174 #define get__proc_bind(xthread) \
2175 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2179 typedef enum kmp_tasking_mode {
2180 tskm_immediate_exec = 0,
2181 tskm_extra_barrier = 1,
2182 tskm_task_teams = 2,
2184 } kmp_tasking_mode_t;
2186 extern kmp_tasking_mode_t
2188 extern int __kmp_task_stealing_constraint;
2189 extern int __kmp_enable_task_throttling;
2190 extern kmp_int32 __kmp_default_device;
2193 extern kmp_int32 __kmp_max_task_priority;
2195 extern kmp_uint64 __kmp_taskloop_min_tasks;
2199 #define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2200 #define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2204 #define KMP_TASKING_ENABLED(task_team) \
2205 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2213 typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32,
void *);
2215 typedef union kmp_cmplrdata {
2226 typedef struct kmp_task {
2233 kmp_cmplrdata_t data2;
2242 typedef struct kmp_taskgroup {
2243 std::atomic<kmp_int32> count;
2244 std::atomic<kmp_int32>
2246 struct kmp_taskgroup *parent;
2249 kmp_int32 reduce_num_data;
2250 uintptr_t *gomp_data;
2254 typedef union kmp_depnode kmp_depnode_t;
2255 typedef struct kmp_depnode_list kmp_depnode_list_t;
2256 typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2259 #define KMP_DEP_IN 0x1
2260 #define KMP_DEP_OUT 0x2
2261 #define KMP_DEP_INOUT 0x3
2262 #define KMP_DEP_MTX 0x4
2263 #define KMP_DEP_SET 0x8
2264 #define KMP_DEP_ALL 0x80
2266 typedef struct kmp_depend_info {
2267 kmp_intptr_t base_addr;
2276 unsigned unused : 3;
2280 } kmp_depend_info_t;
2283 struct kmp_depnode_list {
2284 kmp_depnode_t *node;
2285 kmp_depnode_list_t *next;
2289 #define MAX_MTX_DEPS 4
2291 typedef struct kmp_base_depnode {
2292 kmp_depnode_list_t *successors;
2294 kmp_lock_t *mtx_locks[MAX_MTX_DEPS];
2295 kmp_int32 mtx_num_locks;
2297 #if KMP_SUPPORT_GRAPH_OUTPUT
2300 std::atomic<kmp_int32> npredecessors;
2301 std::atomic<kmp_int32> nrefs;
2302 } kmp_base_depnode_t;
2304 union KMP_ALIGN_CACHE kmp_depnode {
2306 char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2307 kmp_base_depnode_t dn;
2310 struct kmp_dephash_entry {
2312 kmp_depnode_t *last_out;
2313 kmp_depnode_list_t *last_set;
2314 kmp_depnode_list_t *prev_set;
2315 kmp_uint8 last_flag;
2316 kmp_lock_t *mtx_lock;
2317 kmp_dephash_entry_t *next_in_bucket;
2320 typedef struct kmp_dephash {
2321 kmp_dephash_entry_t **buckets;
2323 kmp_depnode_t *last_all;
2325 kmp_uint32 nelements;
2326 kmp_uint32 nconflicts;
2329 typedef struct kmp_task_affinity_info {
2330 kmp_intptr_t base_addr;
2335 kmp_int32 reserved : 30;
2337 } kmp_task_affinity_info_t;
2339 typedef enum kmp_event_type_t {
2340 KMP_EVENT_UNINITIALIZED = 0,
2341 KMP_EVENT_ALLOW_COMPLETION = 1
2345 kmp_event_type_t type;
2346 kmp_tas_lock_t lock;
2352 #ifdef BUILD_TIED_TASK_STACK
2355 typedef struct kmp_stack_block {
2356 kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2357 struct kmp_stack_block *sb_next;
2358 struct kmp_stack_block *sb_prev;
2359 } kmp_stack_block_t;
2361 typedef struct kmp_task_stack {
2362 kmp_stack_block_t ts_first_block;
2363 kmp_taskdata_t **ts_top;
2364 kmp_int32 ts_entries;
2369 typedef struct kmp_tasking_flags {
2371 unsigned tiedness : 1;
2373 unsigned merged_if0 : 1;
2375 unsigned destructors_thunk : 1;
2379 unsigned priority_specified : 1;
2381 unsigned detachable : 1;
2382 unsigned hidden_helper : 1;
2383 unsigned reserved : 8;
2386 unsigned tasktype : 1;
2387 unsigned task_serial : 1;
2388 unsigned tasking_ser : 1;
2390 unsigned team_serial : 1;
2394 unsigned started : 1;
2395 unsigned executing : 1;
2396 unsigned complete : 1;
2398 unsigned native : 1;
2399 unsigned reserved31 : 7;
2401 } kmp_tasking_flags_t;
2403 struct kmp_taskdata {
2404 kmp_int32 td_task_id;
2405 kmp_tasking_flags_t td_flags;
2406 kmp_team_t *td_team;
2407 kmp_info_p *td_alloc_thread;
2409 kmp_taskdata_t *td_parent;
2411 std::atomic<kmp_int32> td_untied_count;
2415 kmp_uint32 td_taskwait_counter;
2416 kmp_int32 td_taskwait_thread;
2417 KMP_ALIGN_CACHE kmp_internal_control_t
2419 KMP_ALIGN_CACHE std::atomic<kmp_int32>
2420 td_allocated_child_tasks;
2422 std::atomic<kmp_int32>
2423 td_incomplete_child_tasks;
2430 kmp_task_team_t *td_task_team;
2431 size_t td_size_alloc;
2432 #if defined(KMP_GOMP_COMPAT)
2434 kmp_int32 td_size_loop_bounds;
2436 kmp_taskdata_t *td_last_tied;
2437 #if defined(KMP_GOMP_COMPAT)
2439 void (*td_copy_func)(
void *,
void *);
2441 kmp_event_t td_allow_completion_event;
2443 ompt_task_info_t ompt_task_info;
2448 KMP_BUILD_ASSERT(
sizeof(kmp_taskdata_t) %
sizeof(
void *) == 0);
2451 typedef struct kmp_base_thread_data {
2455 kmp_bootstrap_lock_t td_deque_lock;
2458 kmp_int32 td_deque_size;
2459 kmp_uint32 td_deque_head;
2460 kmp_uint32 td_deque_tail;
2461 kmp_int32 td_deque_ntasks;
2463 kmp_int32 td_deque_last_stolen;
2464 #ifdef BUILD_TIED_TASK_STACK
2465 kmp_task_stack_t td_susp_tied_tasks;
2468 } kmp_base_thread_data_t;
2470 #define TASK_DEQUE_BITS 8
2471 #define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2473 #define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2474 #define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2476 typedef union KMP_ALIGN_CACHE kmp_thread_data {
2477 kmp_base_thread_data_t td;
2479 char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2480 } kmp_thread_data_t;
2483 typedef struct kmp_base_task_team {
2484 kmp_bootstrap_lock_t
2487 kmp_task_team_t *tt_next;
2491 kmp_int32 tt_found_tasks;
2495 kmp_int32 tt_max_threads;
2496 kmp_int32 tt_found_proxy_tasks;
2497 kmp_int32 tt_untied_task_encountered;
2500 kmp_int32 tt_hidden_helper_task_encountered;
2503 std::atomic<kmp_int32> tt_unfinished_threads;
2508 } kmp_base_task_team_t;
2510 union KMP_ALIGN_CACHE kmp_task_team {
2511 kmp_base_task_team_t tt;
2513 char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2516 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2519 typedef struct kmp_free_list {
2520 void *th_free_list_self;
2521 void *th_free_list_sync;
2523 void *th_free_list_other;
2527 #if KMP_NESTED_HOT_TEAMS
2530 typedef struct kmp_hot_team_ptr {
2531 kmp_team_p *hot_team;
2532 kmp_int32 hot_team_nth;
2533 } kmp_hot_team_ptr_t;
2535 typedef struct kmp_teams_size {
2551 typedef struct kmp_cg_root {
2552 kmp_info_p *cg_root;
2555 kmp_int32 cg_thread_limit;
2556 kmp_int32 cg_nthreads;
2557 struct kmp_cg_root *up;
2562 typedef struct KMP_ALIGN_CACHE kmp_base_info {
2568 kmp_team_p *th_team;
2569 kmp_root_p *th_root;
2570 kmp_info_p *th_next_pool;
2571 kmp_disp_t *th_dispatch;
2577 kmp_info_p *th_team_master;
2578 int th_team_serialized;
2579 microtask_t th_teams_microtask;
2588 int th_team_bt_intervals;
2591 kmp_uint64 th_team_bt_intervals;
2594 #if KMP_AFFINITY_SUPPORTED
2595 kmp_affin_mask_t *th_affin_mask;
2597 omp_allocator_handle_t th_def_allocator;
2601 #if KMP_NESTED_HOT_TEAMS
2602 kmp_hot_team_ptr_t *th_hot_teams;
2608 #if KMP_AFFINITY_SUPPORTED
2609 int th_current_place;
2615 int th_prev_num_threads;
2617 kmp_uint64 th_bar_arrive_time;
2618 kmp_uint64 th_bar_min_time;
2619 kmp_uint64 th_frame_time;
2621 kmp_local_t th_local;
2622 struct private_common *th_pri_head;
2627 KMP_ALIGN_CACHE kmp_team_p
2631 ompt_thread_info_t ompt_thread_info;
2635 struct common_table *th_pri_common;
2637 volatile kmp_uint32 th_spin_here;
2640 volatile void *th_sleep_loc;
2641 flag_type th_sleep_loc_type;
2648 kmp_task_team_t *th_task_team;
2649 kmp_taskdata_t *th_current_task;
2650 kmp_uint8 th_task_state;
2651 kmp_uint8 *th_task_state_memo_stack;
2653 kmp_uint32 th_task_state_top;
2654 kmp_uint32 th_task_state_stack_sz;
2655 kmp_uint32 th_reap_state;
2660 kmp_uint8 th_active_in_pool;
2662 std::atomic<kmp_uint32> th_used_in_team;
2665 struct cons_header *th_cons;
2666 #if KMP_USE_HIER_SCHED
2668 kmp_hier_private_bdata_t *th_hier_bar_data;
2672 KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
2674 KMP_ALIGN_CACHE
volatile kmp_int32
2677 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2679 kmp_free_list_t th_free_lists[NUM_LISTS];
2684 kmp_win32_cond_t th_suspend_cv;
2685 kmp_win32_mutex_t th_suspend_mx;
2686 std::atomic<int> th_suspend_init;
2689 kmp_cond_align_t th_suspend_cv;
2690 kmp_mutex_align_t th_suspend_mx;
2691 std::atomic<int> th_suspend_init_count;
2695 kmp_itt_mark_t th_itt_mark_single;
2698 #if KMP_STATS_ENABLED
2699 kmp_stats_list *th_stats;
2702 std::atomic<bool> th_blocking;
2704 kmp_cg_root_t *th_cg_roots;
2707 typedef union KMP_ALIGN_CACHE kmp_info {
2709 char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
2715 typedef struct kmp_base_data {
2716 volatile kmp_uint32 t_value;
2719 typedef union KMP_ALIGN_CACHE kmp_sleep_team {
2721 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2725 typedef union KMP_ALIGN_CACHE kmp_ordered_team {
2727 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2729 } kmp_ordered_team_t;
2731 typedef int (*launch_t)(
int gtid);
2734 #define KMP_MIN_MALLOC_ARGV_ENTRIES 100
2740 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2741 #define KMP_INLINE_ARGV_BYTES \
2743 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
2744 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
2747 #define KMP_INLINE_ARGV_BYTES \
2748 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
2750 #define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
2752 typedef struct KMP_ALIGN_CACHE kmp_base_team {
2755 KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
2756 kmp_balign_team_t t_bar[bs_last_barrier];
2757 std::atomic<int> t_construct;
2758 char pad[
sizeof(kmp_lock_t)];
2761 std::atomic<void *> t_tg_reduce_data[2];
2762 std::atomic<int> t_tg_fini_counter[2];
2766 KMP_ALIGN_CACHE
int t_master_tid;
2767 int t_master_this_cons;
2771 kmp_team_p *t_parent;
2772 kmp_team_p *t_next_pool;
2773 kmp_disp_t *t_dispatch;
2774 kmp_task_team_t *t_task_team[2];
2775 kmp_proc_bind_t t_proc_bind;
2777 kmp_uint64 t_region_time;
2782 KMP_ALIGN_CACHE
void **t_argv;
2789 ompt_team_info_t ompt_team_info;
2790 ompt_lw_taskteam_t *ompt_serialized_team_info;
2793 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2794 kmp_int8 t_fp_control_saved;
2796 kmp_int16 t_x87_fpu_control_word;
2800 void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
2802 KMP_ALIGN_CACHE kmp_info_t **t_threads;
2804 *t_implicit_task_taskdata;
2807 KMP_ALIGN_CACHE
int t_max_argc;
2810 dispatch_shared_info_t *t_disp_buffer;
2813 kmp_r_sched_t t_sched;
2814 #if KMP_AFFINITY_SUPPORTED
2818 int t_display_affinity;
2821 omp_allocator_handle_t t_def_allocator;
2824 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
2829 char dummy_padding[1024];
2832 KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
2835 std::atomic<kmp_int32> t_cancel_request;
2836 int t_master_active;
2837 void *t_copypriv_data;
2839 std::atomic<kmp_uint32> t_copyin_counter;
2844 distributedBarrier *b;
2847 union KMP_ALIGN_CACHE kmp_team {
2850 char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
2853 typedef union KMP_ALIGN_CACHE kmp_time_global {
2855 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2857 } kmp_time_global_t;
2859 typedef struct kmp_base_global {
2861 kmp_time_global_t g_time;
2864 volatile int g_abort;
2865 volatile int g_done;
2868 enum dynamic_mode g_dynamic_mode;
2869 } kmp_base_global_t;
2871 typedef union KMP_ALIGN_CACHE kmp_global {
2872 kmp_base_global_t g;
2874 char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
2877 typedef struct kmp_base_root {
2882 volatile int r_active;
2884 std::atomic<int> r_in_parallel;
2886 kmp_team_t *r_root_team;
2887 kmp_team_t *r_hot_team;
2888 kmp_info_t *r_uber_thread;
2889 kmp_lock_t r_begin_lock;
2890 volatile int r_begin;
2892 #if KMP_AFFINITY_SUPPORTED
2893 int r_affinity_assigned;
2897 typedef union KMP_ALIGN_CACHE kmp_root {
2900 char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
2903 struct fortran_inx_info {
2909 extern int __kmp_settings;
2910 extern int __kmp_duplicate_library_ok;
2912 extern int __kmp_forkjoin_frames;
2913 extern int __kmp_forkjoin_frames_mode;
2915 extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
2916 extern int __kmp_determ_red;
2919 extern int kmp_a_debug;
2920 extern int kmp_b_debug;
2921 extern int kmp_c_debug;
2922 extern int kmp_d_debug;
2923 extern int kmp_e_debug;
2924 extern int kmp_f_debug;
2928 #define KMP_DEBUG_BUF_LINES_INIT 512
2929 #define KMP_DEBUG_BUF_LINES_MIN 1
2931 #define KMP_DEBUG_BUF_CHARS_INIT 128
2932 #define KMP_DEBUG_BUF_CHARS_MIN 2
2936 extern int __kmp_debug_buf_lines;
2938 __kmp_debug_buf_chars;
2939 extern int __kmp_debug_buf_atomic;
2942 extern char *__kmp_debug_buffer;
2943 extern std::atomic<int> __kmp_debug_count;
2945 extern int __kmp_debug_buf_warn_chars;
2950 extern int __kmp_par_range;
2952 #define KMP_PAR_RANGE_ROUTINE_LEN 1024
2953 extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
2954 #define KMP_PAR_RANGE_FILENAME_LEN 1024
2955 extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
2956 extern int __kmp_par_range_lb;
2957 extern int __kmp_par_range_ub;
2963 extern int __kmp_storage_map_verbose;
2965 extern int __kmp_storage_map_verbose_specified;
2967 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2968 extern kmp_cpuinfo_t __kmp_cpuinfo;
2971 extern volatile int __kmp_init_serial;
2972 extern volatile int __kmp_init_gtid;
2973 extern volatile int __kmp_init_common;
2974 extern volatile int __kmp_init_middle;
2975 extern volatile int __kmp_init_parallel;
2977 extern volatile int __kmp_init_monitor;
2979 extern volatile int __kmp_init_user_locks;
2980 extern volatile int __kmp_init_hidden_helper_threads;
2981 extern int __kmp_init_counter;
2982 extern int __kmp_root_counter;
2983 extern int __kmp_version;
2986 extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
2989 extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
2990 extern kmp_uint32 __kmp_barrier_release_bb_dflt;
2991 extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
2992 extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
2993 extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
2994 extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
2995 extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
2996 extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
2997 extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
2998 extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
2999 extern char const *__kmp_barrier_type_name[bs_last_barrier];
3000 extern char const *__kmp_barrier_pattern_name[bp_last_bar];
3003 extern kmp_bootstrap_lock_t __kmp_initz_lock;
3004 extern kmp_bootstrap_lock_t __kmp_forkjoin_lock;
3005 extern kmp_bootstrap_lock_t __kmp_task_team_lock;
3006 extern kmp_bootstrap_lock_t
3009 extern kmp_bootstrap_lock_t
3012 extern kmp_bootstrap_lock_t
3013 __kmp_tp_cached_lock;
3016 extern kmp_lock_t __kmp_global_lock;
3017 extern kmp_queuing_lock_t __kmp_dispatch_lock;
3018 extern kmp_lock_t __kmp_debug_lock;
3020 extern enum library_type __kmp_library;
3026 extern int __kmp_chunk;
3027 extern int __kmp_force_monotonic;
3029 extern size_t __kmp_stksize;
3031 extern size_t __kmp_monitor_stksize;
3033 extern size_t __kmp_stkoffset;
3034 extern int __kmp_stkpadding;
3037 __kmp_malloc_pool_incr;
3038 extern int __kmp_env_stksize;
3039 extern int __kmp_env_blocktime;
3040 extern int __kmp_env_checks;
3041 extern int __kmp_env_consistency_check;
3042 extern int __kmp_generate_warnings;
3043 extern int __kmp_reserve_warn;
3045 #ifdef DEBUG_SUSPEND
3046 extern int __kmp_suspend_count;
3049 extern kmp_int32 __kmp_use_yield;
3050 extern kmp_int32 __kmp_use_yield_exp_set;
3051 extern kmp_uint32 __kmp_yield_init;
3052 extern kmp_uint32 __kmp_yield_next;
3055 extern int __kmp_allThreadsSpecified;
3057 extern size_t __kmp_align_alloc;
3059 extern int __kmp_xproc;
3060 extern int __kmp_avail_proc;
3061 extern size_t __kmp_sys_min_stksize;
3062 extern int __kmp_sys_max_nth;
3064 extern int __kmp_max_nth;
3066 extern int __kmp_cg_max_nth;
3067 extern int __kmp_teams_max_nth;
3068 extern int __kmp_threads_capacity;
3070 extern int __kmp_dflt_team_nth;
3072 extern int __kmp_dflt_team_nth_ub;
3074 extern int __kmp_tp_capacity;
3076 extern int __kmp_tp_cached;
3078 extern int __kmp_dflt_blocktime;
3082 __kmp_monitor_wakeups;
3083 extern int __kmp_bt_intervals;
3086 #ifdef KMP_ADJUST_BLOCKTIME
3087 extern int __kmp_zero_bt;
3089 #ifdef KMP_DFLT_NTH_CORES
3090 extern int __kmp_ncores;
3093 extern int __kmp_abort_delay;
3095 extern int __kmp_need_register_atfork_specified;
3096 extern int __kmp_need_register_atfork;
3098 extern int __kmp_gtid_mode;
3106 __kmp_adjust_gtid_mode;
3107 #ifdef KMP_TDATA_GTID
3108 extern KMP_THREAD_LOCAL
int __kmp_gtid;
3110 extern int __kmp_tls_gtid_min;
3111 extern int __kmp_foreign_tp;
3112 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3113 extern int __kmp_inherit_fp_control;
3114 extern kmp_int16 __kmp_init_x87_fpu_control_word;
3115 extern kmp_uint32 __kmp_init_mxcsr;
3120 extern int __kmp_dflt_max_active_levels;
3123 extern bool __kmp_dflt_max_active_levels_set;
3124 extern int __kmp_dispatch_num_buffers;
3126 #if KMP_NESTED_HOT_TEAMS
3127 extern int __kmp_hot_teams_mode;
3128 extern int __kmp_hot_teams_max_level;
3132 extern enum clock_function_type __kmp_clock_function;
3133 extern int __kmp_clock_function_param;
3136 #if KMP_MIC_SUPPORTED
3137 extern enum mic_type __kmp_mic_type;
3140 #ifdef USE_LOAD_BALANCE
3141 extern double __kmp_load_balance_interval;
3145 typedef struct kmp_nested_nthreads_t {
3149 } kmp_nested_nthreads_t;
3151 extern kmp_nested_nthreads_t __kmp_nested_nth;
3153 #if KMP_USE_ADAPTIVE_LOCKS
3156 struct kmp_adaptive_backoff_params_t {
3158 kmp_uint32 max_soft_retries;
3161 kmp_uint32 max_badness;
3164 extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3166 #if KMP_DEBUG_ADAPTIVE_LOCKS
3167 extern const char *__kmp_speculative_statsfile;
3172 extern int __kmp_display_env;
3173 extern int __kmp_display_env_verbose;
3174 extern int __kmp_omp_cancellation;
3175 extern int __kmp_nteams;
3176 extern int __kmp_teams_thread_limit;
3182 extern kmp_info_t **__kmp_threads;
3184 extern volatile kmp_team_t *__kmp_team_pool;
3185 extern volatile kmp_info_t *__kmp_thread_pool;
3186 extern kmp_info_t *__kmp_thread_pool_insert_pt;
3189 extern volatile int __kmp_nth;
3192 extern volatile int __kmp_all_nth;
3193 extern std::atomic<int> __kmp_thread_pool_active_nth;
3195 extern kmp_root_t **__kmp_root;
3199 #define __kmp_get_gtid() __kmp_get_global_thread_id()
3200 #define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3201 #define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3202 #define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3203 #define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3208 #define __kmp_get_team_num_threads(gtid) \
3209 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3211 static inline bool KMP_UBER_GTID(
int gtid) {
3212 KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3213 KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3214 return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3215 __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3218 static inline int __kmp_tid_from_gtid(
int gtid) {
3219 KMP_DEBUG_ASSERT(gtid >= 0);
3220 return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3223 static inline int __kmp_gtid_from_tid(
int tid,
const kmp_team_t *team) {
3224 KMP_DEBUG_ASSERT(tid >= 0 && team);
3225 return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3228 static inline int __kmp_gtid_from_thread(
const kmp_info_t *thr) {
3229 KMP_DEBUG_ASSERT(thr);
3230 return thr->th.th_info.ds.ds_gtid;
3233 static inline kmp_info_t *__kmp_thread_from_gtid(
int gtid) {
3234 KMP_DEBUG_ASSERT(gtid >= 0);
3235 return __kmp_threads[gtid];
3238 static inline kmp_team_t *__kmp_team_from_gtid(
int gtid) {
3239 KMP_DEBUG_ASSERT(gtid >= 0);
3240 return __kmp_threads[gtid]->th.th_team;
3243 static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
3244 if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
3245 KMP_FATAL(ThreadIdentInvalid);
3248 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3249 extern int __kmp_user_level_mwait;
3250 extern int __kmp_umwait_enabled;
3251 extern int __kmp_mwait_enabled;
3252 extern int __kmp_mwait_hints;
3257 extern kmp_global_t __kmp_global;
3259 extern kmp_info_t __kmp_monitor;
3261 extern std::atomic<kmp_int32> __kmp_team_counter;
3263 extern std::atomic<kmp_int32> __kmp_task_counter;
3266 #define _KMP_GEN_ID(counter) \
3267 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3269 #define _KMP_GEN_ID(counter) (~0)
3272 #define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3273 #define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3277 extern void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
3278 size_t size,
char const *format, ...);
3280 extern void __kmp_serial_initialize(
void);
3281 extern void __kmp_middle_initialize(
void);
3282 extern void __kmp_parallel_initialize(
void);
3284 extern void __kmp_internal_begin(
void);
3285 extern void __kmp_internal_end_library(
int gtid);
3286 extern void __kmp_internal_end_thread(
int gtid);
3287 extern void __kmp_internal_end_atexit(
void);
3288 extern void __kmp_internal_end_dtor(
void);
3289 extern void __kmp_internal_end_dest(
void *);
3291 extern int __kmp_register_root(
int initial_thread);
3292 extern void __kmp_unregister_root(
int gtid);
3293 extern void __kmp_unregister_library(
void);
3295 extern int __kmp_ignore_mppbeg(
void);
3296 extern int __kmp_ignore_mppend(
void);
3298 extern int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws);
3299 extern void __kmp_exit_single(
int gtid);
3301 extern void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3302 extern void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3304 #ifdef USE_LOAD_BALANCE
3305 extern int __kmp_get_load_balance(
int);
3308 extern int __kmp_get_global_thread_id(
void);
3309 extern int __kmp_get_global_thread_id_reg(
void);
3310 extern void __kmp_exit_thread(
int exit_status);
3311 extern void __kmp_abort(
char const *format, ...);
3312 extern void __kmp_abort_thread(
void);
3313 KMP_NORETURN
extern void __kmp_abort_process(
void);
3314 extern void __kmp_warn(
char const *format, ...);
3316 extern void __kmp_set_num_threads(
int new_nth,
int gtid);
3320 static inline kmp_info_t *__kmp_entry_thread() {
3321 int gtid = __kmp_entry_gtid();
3323 return __kmp_threads[gtid];
3326 extern void __kmp_set_max_active_levels(
int gtid,
int new_max_active_levels);
3327 extern int __kmp_get_max_active_levels(
int gtid);
3328 extern int __kmp_get_ancestor_thread_num(
int gtid,
int level);
3329 extern int __kmp_get_team_size(
int gtid,
int level);
3330 extern void __kmp_set_schedule(
int gtid, kmp_sched_t new_sched,
int chunk);
3331 extern void __kmp_get_schedule(
int gtid, kmp_sched_t *sched,
int *chunk);
3333 extern unsigned short __kmp_get_random(kmp_info_t *thread);
3334 extern void __kmp_init_random(kmp_info_t *thread);
3336 extern kmp_r_sched_t __kmp_get_schedule_global(
void);
3337 extern void __kmp_adjust_num_threads(
int new_nproc);
3338 extern void __kmp_check_stksize(
size_t *val);
3340 extern void *___kmp_allocate(
size_t size KMP_SRC_LOC_DECL);
3341 extern void *___kmp_page_allocate(
size_t size KMP_SRC_LOC_DECL);
3342 extern void ___kmp_free(
void *ptr KMP_SRC_LOC_DECL);
3343 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3344 #define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3345 #define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3348 extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3349 size_t size KMP_SRC_LOC_DECL);
3350 extern void ___kmp_fast_free(kmp_info_t *this_thr,
void *ptr KMP_SRC_LOC_DECL);
3351 extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3352 extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3353 #define __kmp_fast_allocate(this_thr, size) \
3354 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3355 #define __kmp_fast_free(this_thr, ptr) \
3356 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3359 extern void *___kmp_thread_malloc(kmp_info_t *th,
size_t size KMP_SRC_LOC_DECL);
3360 extern void *___kmp_thread_calloc(kmp_info_t *th,
size_t nelem,
3361 size_t elsize KMP_SRC_LOC_DECL);
3362 extern void *___kmp_thread_realloc(kmp_info_t *th,
void *ptr,
3363 size_t size KMP_SRC_LOC_DECL);
3364 extern void ___kmp_thread_free(kmp_info_t *th,
void *ptr KMP_SRC_LOC_DECL);
3365 #define __kmp_thread_malloc(th, size) \
3366 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3367 #define __kmp_thread_calloc(th, nelem, elsize) \
3368 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3369 #define __kmp_thread_realloc(th, ptr, size) \
3370 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3371 #define __kmp_thread_free(th, ptr) \
3372 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3374 #define KMP_INTERNAL_MALLOC(sz) malloc(sz)
3375 #define KMP_INTERNAL_FREE(p) free(p)
3376 #define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
3377 #define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
3379 extern void __kmp_push_num_threads(
ident_t *loc,
int gtid,
int num_threads);
3381 extern void __kmp_push_proc_bind(
ident_t *loc,
int gtid,
3382 kmp_proc_bind_t proc_bind);
3383 extern void __kmp_push_num_teams(
ident_t *loc,
int gtid,
int num_teams,
3385 extern void __kmp_push_num_teams_51(
ident_t *loc,
int gtid,
int num_teams_lb,
3386 int num_teams_ub,
int num_threads);
3388 extern void __kmp_yield();
3392 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3395 kmp_uint32 ub, kmp_int32 st,
3399 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3402 kmp_uint64 ub, kmp_int64 st,
3406 kmp_int32 *p_last, kmp_int32 *p_lb,
3407 kmp_int32 *p_ub, kmp_int32 *p_st);
3409 kmp_int32 *p_last, kmp_uint32 *p_lb,
3410 kmp_uint32 *p_ub, kmp_int32 *p_st);
3412 kmp_int32 *p_last, kmp_int64 *p_lb,
3413 kmp_int64 *p_ub, kmp_int64 *p_st);
3415 kmp_int32 *p_last, kmp_uint64 *p_lb,
3416 kmp_uint64 *p_ub, kmp_int64 *p_st);
3423 #ifdef KMP_GOMP_COMPAT
3425 extern void __kmp_aux_dispatch_init_4(
ident_t *loc, kmp_int32 gtid,
3427 kmp_int32 ub, kmp_int32 st,
3428 kmp_int32 chunk,
int push_ws);
3429 extern void __kmp_aux_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3431 kmp_uint32 ub, kmp_int32 st,
3432 kmp_int32 chunk,
int push_ws);
3433 extern void __kmp_aux_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3435 kmp_int64 ub, kmp_int64 st,
3436 kmp_int64 chunk,
int push_ws);
3437 extern void __kmp_aux_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3439 kmp_uint64 ub, kmp_int64 st,
3440 kmp_int64 chunk,
int push_ws);
3441 extern void __kmp_aux_dispatch_fini_chunk_4(
ident_t *loc, kmp_int32 gtid);
3442 extern void __kmp_aux_dispatch_fini_chunk_8(
ident_t *loc, kmp_int32 gtid);
3443 extern void __kmp_aux_dispatch_fini_chunk_4u(
ident_t *loc, kmp_int32 gtid);
3444 extern void __kmp_aux_dispatch_fini_chunk_8u(
ident_t *loc, kmp_int32 gtid);
3448 extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3449 extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3450 extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3451 extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3452 extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3453 extern kmp_uint32 __kmp_wait_4(kmp_uint32
volatile *spinner, kmp_uint32 checker,
3454 kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3456 extern void __kmp_wait_4_ptr(
void *spinner, kmp_uint32 checker,
3457 kmp_uint32 (*pred)(
void *, kmp_uint32),
void *obj);
3459 extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag,
3466 extern void __kmp_release_64(kmp_flag_64<> *flag);
3468 extern void __kmp_infinite_loop(
void);
3470 extern void __kmp_cleanup(
void);
3472 #if KMP_HANDLE_SIGNALS
3473 extern int __kmp_handle_signals;
3474 extern void __kmp_install_signals(
int parallel_init);
3475 extern void __kmp_remove_signals(
void);
3478 extern void __kmp_clear_system_time(
void);
3479 extern void __kmp_read_system_time(
double *delta);
3481 extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3483 extern void __kmp_expand_host_name(
char *buffer,
size_t size);
3484 extern void __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern);
3486 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && KMP_ARCH_AARCH64)
3488 __kmp_initialize_system_tick(
void);
3492 __kmp_runtime_initialize(
void);
3493 extern void __kmp_runtime_destroy(
void);
3495 #if KMP_AFFINITY_SUPPORTED
3496 extern char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
3497 kmp_affin_mask_t *mask);
3498 extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
3499 kmp_affin_mask_t *mask);
3500 extern void __kmp_affinity_initialize(
void);
3501 extern void __kmp_affinity_uninitialize(
void);
3502 extern void __kmp_affinity_set_init_mask(
3503 int gtid,
int isa_root);
3504 extern void __kmp_affinity_set_place(
int gtid);
3505 extern void __kmp_affinity_determine_capable(
const char *env_var);
3506 extern int __kmp_aux_set_affinity(
void **mask);
3507 extern int __kmp_aux_get_affinity(
void **mask);
3508 extern int __kmp_aux_get_affinity_max_proc();
3509 extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask);
3510 extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask);
3511 extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask);
3512 extern void __kmp_balanced_affinity(kmp_info_t *th,
int team_size);
3513 #if KMP_OS_LINUX || KMP_OS_FREEBSD
3514 extern int kmp_set_thread_affinity_mask_initial(
void);
3516 static inline void __kmp_assign_root_init_mask() {
3517 int gtid = __kmp_entry_gtid();
3518 kmp_root_t *r = __kmp_threads[gtid]->th.th_root;
3519 if (r->r.r_uber_thread == __kmp_threads[gtid] && !r->r.r_affinity_assigned) {
3520 __kmp_affinity_set_init_mask(gtid, TRUE);
3521 r->r.r_affinity_assigned = TRUE;
3525 #define __kmp_assign_root_init_mask()
3530 extern size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
3531 kmp_str_buf_t *buffer);
3532 extern void __kmp_aux_display_affinity(
int gtid,
const char *format);
3534 extern void __kmp_cleanup_hierarchy();
3535 extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3539 extern int __kmp_futex_determine_capable(
void);
3543 extern void __kmp_gtid_set_specific(
int gtid);
3544 extern int __kmp_gtid_get_specific(
void);
3546 extern double __kmp_read_cpu_time(
void);
3548 extern int __kmp_read_system_info(
struct kmp_sys_info *info);
3551 extern void __kmp_create_monitor(kmp_info_t *th);
3554 extern void *__kmp_launch_thread(kmp_info_t *thr);
3556 extern void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size);
3559 extern int __kmp_still_running(kmp_info_t *th);
3560 extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3561 extern void __kmp_free_handle(kmp_thread_t tHandle);
3565 extern void __kmp_reap_monitor(kmp_info_t *th);
3567 extern void __kmp_reap_worker(kmp_info_t *th);
3568 extern void __kmp_terminate_thread(
int gtid);
3570 extern int __kmp_try_suspend_mx(kmp_info_t *th);
3571 extern void __kmp_lock_suspend_mx(kmp_info_t *th);
3572 extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
3574 extern void __kmp_elapsed(
double *);
3575 extern void __kmp_elapsed_tick(
double *);
3577 extern void __kmp_enable(
int old_state);
3578 extern void __kmp_disable(
int *old_state);
3580 extern void __kmp_thread_sleep(
int millis);
3582 extern void __kmp_common_initialize(
void);
3583 extern void __kmp_common_destroy(
void);
3584 extern void __kmp_common_destroy_gtid(
int gtid);
3587 extern void __kmp_register_atfork(
void);
3589 extern void __kmp_suspend_initialize(
void);
3590 extern void __kmp_suspend_initialize_thread(kmp_info_t *th);
3591 extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
3593 extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
3596 __kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
3598 ompt_data_t ompt_parallel_data,
3600 kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
3601 int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3602 extern void __kmp_free_thread(kmp_info_t *);
3603 extern void __kmp_free_team(kmp_root_t *,
3604 kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
3605 extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
3609 extern void __kmp_initialize_bget(kmp_info_t *th);
3610 extern void __kmp_finalize_bget(kmp_info_t *th);
3612 KMP_EXPORT
void *kmpc_malloc(
size_t size);
3613 KMP_EXPORT
void *kmpc_aligned_malloc(
size_t size,
size_t alignment);
3614 KMP_EXPORT
void *kmpc_calloc(
size_t nelem,
size_t elsize);
3615 KMP_EXPORT
void *kmpc_realloc(
void *ptr,
size_t size);
3616 KMP_EXPORT
void kmpc_free(
void *ptr);
3620 extern int __kmp_barrier(
enum barrier_type bt,
int gtid,
int is_split,
3621 size_t reduce_size,
void *reduce_data,
3622 void (*reduce)(
void *,
void *));
3623 extern void __kmp_end_split_barrier(
enum barrier_type bt,
int gtid);
3624 extern int __kmp_barrier_gomp_cancel(
int gtid);
3630 enum fork_context_e {
3636 extern int __kmp_fork_call(
ident_t *loc,
int gtid,
3637 enum fork_context_e fork_context, kmp_int32 argc,
3638 microtask_t microtask, launch_t invoker,
3641 extern void __kmp_join_call(
ident_t *loc,
int gtid
3644 enum fork_context_e fork_context
3647 int exit_teams = 0);
3649 extern void __kmp_serialized_parallel(
ident_t *
id, kmp_int32 gtid);
3650 extern void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team);
3651 extern void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team);
3652 extern int __kmp_invoke_task_func(
int gtid);
3653 extern void __kmp_run_before_invoked_task(
int gtid,
int tid,
3654 kmp_info_t *this_thr,
3656 extern void __kmp_run_after_invoked_task(
int gtid,
int tid,
3657 kmp_info_t *this_thr,
3661 KMP_EXPORT
int __kmpc_invoke_task_func(
int gtid);
3662 extern int __kmp_invoke_teams_master(
int gtid);
3663 extern void __kmp_teams_master(
int gtid);
3664 extern int __kmp_aux_get_team_num();
3665 extern int __kmp_aux_get_num_teams();
3666 extern void __kmp_save_internal_controls(kmp_info_t *thread);
3667 extern void __kmp_user_set_library(
enum library_type arg);
3668 extern void __kmp_aux_set_library(
enum library_type arg);
3669 extern void __kmp_aux_set_stacksize(
size_t arg);
3670 extern void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid);
3671 extern void __kmp_aux_set_defaults(
char const *str,
size_t len);
3674 void kmpc_set_blocktime(
int arg);
3675 void ompc_set_nested(
int flag);
3676 void ompc_set_dynamic(
int flag);
3677 void ompc_set_num_threads(
int arg);
3679 extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
3680 kmp_team_t *team,
int tid);
3681 extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
3682 extern kmp_task_t *__kmp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3683 kmp_tasking_flags_t *flags,
3684 size_t sizeof_kmp_task_t,
3685 size_t sizeof_shareds,
3686 kmp_routine_entry_t task_entry);
3687 extern void __kmp_init_implicit_task(
ident_t *loc_ref, kmp_info_t *this_thr,
3688 kmp_team_t *team,
int tid,
3690 extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
3691 extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
3693 extern kmp_event_t *__kmpc_task_allow_completion_event(
ident_t *loc_ref,
3696 extern void __kmp_fulfill_event(kmp_event_t *event);
3698 extern void __kmp_free_task_team(kmp_info_t *thread,
3699 kmp_task_team_t *task_team);
3700 extern void __kmp_reap_task_teams(
void);
3701 extern void __kmp_wait_to_unref_task_teams(
void);
3702 extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
3704 extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
3705 extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
3712 extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
3715 extern int __kmp_is_address_mapped(
void *addr);
3716 extern kmp_uint64 __kmp_hardware_timestamp(
void);
3719 extern int __kmp_read_from_file(
char const *path,
char const *format, ...);
3727 extern int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int npr,
int argc,
3731 void **exit_frame_ptr
3744 size_t vector_length);
3748 KMP_EXPORT
void *__kmpc_threadprivate(
ident_t *, kmp_int32 global_tid,
3749 void *data,
size_t size);
3773 kmp_critical_name *);
3775 kmp_critical_name *);
3777 kmp_critical_name *, uint32_t hint);
3783 kmp_int32 global_tid);
3788 KMP_EXPORT
void KMPC_FOR_STATIC_INIT(
ident_t *loc, kmp_int32 global_tid,
3789 kmp_int32 schedtype, kmp_int32 *plastiter,
3790 kmp_int *plower, kmp_int *pupper,
3791 kmp_int *pstride, kmp_int incr,
3797 size_t cpy_size,
void *cpy_data,
3798 void (*cpy_func)(
void *,
void *),
3801 extern void KMPC_SET_NUM_THREADS(
int arg);
3802 extern void KMPC_SET_DYNAMIC(
int flag);
3803 extern void KMPC_SET_NESTED(
int flag);
3806 KMP_EXPORT kmp_int32 __kmpc_omp_task(
ident_t *loc_ref, kmp_int32 gtid,
3807 kmp_task_t *new_task);
3808 KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3810 size_t sizeof_kmp_task_t,
3811 size_t sizeof_shareds,
3812 kmp_routine_entry_t task_entry);
3813 KMP_EXPORT kmp_task_t *__kmpc_omp_target_task_alloc(
3814 ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t,
3815 size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id);
3816 KMP_EXPORT
void __kmpc_omp_task_begin_if0(
ident_t *loc_ref, kmp_int32 gtid,
3818 KMP_EXPORT
void __kmpc_omp_task_complete_if0(
ident_t *loc_ref, kmp_int32 gtid,
3820 KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(
ident_t *loc_ref, kmp_int32 gtid,
3821 kmp_task_t *new_task);
3822 KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(
ident_t *loc_ref, kmp_int32 gtid);
3824 KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(
ident_t *loc_ref, kmp_int32 gtid,
3828 void __kmpc_omp_task_begin(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
3829 void __kmpc_omp_task_complete(
ident_t *loc_ref, kmp_int32 gtid,
3835 KMP_EXPORT
void __kmpc_taskgroup(
ident_t *loc,
int gtid);
3836 KMP_EXPORT
void __kmpc_end_taskgroup(
ident_t *loc,
int gtid);
3839 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
3840 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
3841 kmp_depend_info_t *noalias_dep_list);
3844 kmp_depend_info_t *dep_list,
3845 kmp_int32 ndeps_noalias,
3846 kmp_depend_info_t *noalias_dep_list);
3847 extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
3848 bool serialize_immediate);
3850 KMP_EXPORT kmp_int32 __kmpc_cancel(
ident_t *loc_ref, kmp_int32 gtid,
3851 kmp_int32 cncl_kind);
3852 KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(
ident_t *loc_ref, kmp_int32 gtid,
3853 kmp_int32 cncl_kind);
3854 KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(
ident_t *loc_ref, kmp_int32 gtid);
3855 KMP_EXPORT
int __kmp_get_cancellation_status(
int cancel_kind);
3860 kmp_int32 if_val, kmp_uint64 *lb,
3861 kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
3862 kmp_int32 sched, kmp_uint64 grainsize,
3865 kmp_task_t *task, kmp_int32 if_val,
3866 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3867 kmp_int32 nogroup, kmp_int32 sched,
3868 kmp_uint64 grainsize, kmp_int32 modifier,
3877 int num,
void *data);
3881 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
3882 kmp_task_affinity_info_t *affin_list);
3883 KMP_EXPORT
void __kmp_set_num_teams(
int num_teams);
3884 KMP_EXPORT
int __kmp_get_max_teams(
void);
3885 KMP_EXPORT
void __kmp_set_teams_thread_limit(
int limit);
3886 KMP_EXPORT
int __kmp_get_teams_thread_limit(
void);
3889 KMP_EXPORT
void __kmpc_init_lock(
ident_t *loc, kmp_int32 gtid,
3891 KMP_EXPORT
void __kmpc_init_nest_lock(
ident_t *loc, kmp_int32 gtid,
3893 KMP_EXPORT
void __kmpc_destroy_lock(
ident_t *loc, kmp_int32 gtid,
3895 KMP_EXPORT
void __kmpc_destroy_nest_lock(
ident_t *loc, kmp_int32 gtid,
3897 KMP_EXPORT
void __kmpc_set_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
3898 KMP_EXPORT
void __kmpc_set_nest_lock(
ident_t *loc, kmp_int32 gtid,
3900 KMP_EXPORT
void __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
3902 KMP_EXPORT
void __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
3904 KMP_EXPORT
int __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
3905 KMP_EXPORT
int __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
3908 KMP_EXPORT
void __kmpc_init_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
3909 void **user_lock, uintptr_t hint);
3910 KMP_EXPORT
void __kmpc_init_nest_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
3917 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3918 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3919 kmp_critical_name *lck);
3921 kmp_critical_name *lck);
3923 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3924 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3925 kmp_critical_name *lck);
3927 kmp_critical_name *lck);
3931 extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
3932 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3933 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3934 kmp_critical_name *lck);
3937 KMP_EXPORT kmp_int32 __kmp_get_reduce_method(
void);
3939 KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
3940 KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
3946 KMP_EXPORT
void __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid);
3948 kmp_int32 num_threads);
3950 KMP_EXPORT
void __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid,
3953 kmp_int32 num_teams,
3954 kmp_int32 num_threads);
3957 kmp_int32 num_teams_lb,
3958 kmp_int32 num_teams_ub,
3959 kmp_int32 num_threads);
3969 const struct kmp_dim *dims);
3970 KMP_EXPORT
void __kmpc_doacross_wait(
ident_t *loc, kmp_int32 gtid,
3971 const kmp_int64 *vec);
3972 KMP_EXPORT
void __kmpc_doacross_post(
ident_t *loc, kmp_int32 gtid,
3973 const kmp_int64 *vec);
3974 KMP_EXPORT
void __kmpc_doacross_fini(
ident_t *loc, kmp_int32 gtid);
3977 void *data,
size_t size,
3981 extern int _You_must_link_with_exactly_one_OpenMP_library;
3982 extern int _You_must_link_with_Intel_OpenMP_library;
3983 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
3984 extern int _You_must_link_with_Microsoft_OpenMP_library;
3989 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
3990 void *data_addr,
size_t pc_size);
3991 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
3994 void __kmp_threadprivate_resize_cache(
int newCapacity);
3995 void __kmp_cleanup_threadprivate_caches();
3999 #define KMPC_CONVENTION __cdecl
4001 #define KMPC_CONVENTION
4005 typedef enum omp_sched_t {
4006 omp_sched_static = 1,
4007 omp_sched_dynamic = 2,
4008 omp_sched_guided = 3,
4011 typedef void *kmp_affinity_mask_t;
4014 KMP_EXPORT
void KMPC_CONVENTION ompc_set_max_active_levels(
int);
4015 KMP_EXPORT
void KMPC_CONVENTION ompc_set_schedule(omp_sched_t,
int);
4016 KMP_EXPORT
int KMPC_CONVENTION ompc_get_ancestor_thread_num(
int);
4017 KMP_EXPORT
int KMPC_CONVENTION ompc_get_team_size(
int);
4018 KMP_EXPORT
int KMPC_CONVENTION
4019 kmpc_set_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4020 KMP_EXPORT
int KMPC_CONVENTION
4021 kmpc_unset_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4022 KMP_EXPORT
int KMPC_CONVENTION
4023 kmpc_get_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4025 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize(
int);
4026 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize_s(
size_t);
4027 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_library(
int);
4028 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_defaults(
char const *);
4029 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_disp_num_buffers(
int);
4030 void KMP_EXPAND_NAME(ompc_set_affinity_format)(
char const *format);
4031 size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(
char *buffer,
size_t size);
4032 void KMP_EXPAND_NAME(ompc_display_affinity)(
char const *format);
4033 size_t KMP_EXPAND_NAME(ompc_capture_affinity)(
char *buffer,
size_t buf_size,
4034 char const *format);
4036 enum kmp_target_offload_kind {
4041 typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
4043 extern kmp_target_offload_kind_t __kmp_target_offload;
4044 extern int __kmpc_get_target_offload();
4047 #define KMP_DEVICE_DEFAULT -1
4048 #define KMP_DEVICE_ALL -11
4054 typedef enum kmp_pause_status_t {
4056 kmp_soft_paused = 1,
4058 } kmp_pause_status_t;
4061 extern kmp_pause_status_t __kmp_pause_status;
4062 extern int __kmpc_pause_resource(kmp_pause_status_t level);
4063 extern int __kmp_pause_resource(kmp_pause_status_t level);
4065 extern void __kmp_resume_if_soft_paused();
4069 static inline void __kmp_resume_if_hard_paused() {
4070 if (__kmp_pause_status == kmp_hard_paused) {
4071 __kmp_pause_status = kmp_not_paused;
4075 extern void __kmp_omp_display_env(
int verbose);
4078 extern volatile int __kmp_init_hidden_helper;
4080 extern volatile int __kmp_hidden_helper_team_done;
4082 extern kmp_int32 __kmp_enable_hidden_helper;
4084 extern kmp_info_t *__kmp_hidden_helper_main_thread;
4086 extern kmp_info_t **__kmp_hidden_helper_threads;
4088 extern kmp_int32 __kmp_hidden_helper_threads_num;
4090 extern std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
4092 extern void __kmp_hidden_helper_initialize();
4093 extern void __kmp_hidden_helper_threads_initz_routine();
4094 extern void __kmp_do_initialize_hidden_helper_threads();
4095 extern void __kmp_hidden_helper_threads_initz_wait();
4096 extern void __kmp_hidden_helper_initz_release();
4097 extern void __kmp_hidden_helper_threads_deinitz_wait();
4098 extern void __kmp_hidden_helper_threads_deinitz_release();
4099 extern void __kmp_hidden_helper_main_thread_wait();
4100 extern void __kmp_hidden_helper_worker_thread_wait();
4101 extern void __kmp_hidden_helper_worker_thread_signal();
4102 extern void __kmp_hidden_helper_main_thread_release();
4105 #define KMP_HIDDEN_HELPER_THREAD(gtid) \
4106 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4108 #define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4109 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4111 #define KMP_HIDDEN_HELPER_TEAM(team) \
4112 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4116 #define KMP_GTID_TO_SHADOW_GTID(gtid) \
4117 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4122 static inline int __kmp_adjust_gtid_for_hidden_helpers(
int gtid) {
4123 int adjusted_gtid = gtid;
4124 if (__kmp_hidden_helper_threads_num > 0 && gtid > 0 &&
4125 gtid - __kmp_hidden_helper_threads_num >= 0) {
4126 adjusted_gtid -= __kmp_hidden_helper_threads_num;
4128 return adjusted_gtid;
4132 typedef enum kmp_severity_t {
4133 severity_warning = 1,
4136 extern void __kmpc_error(
ident_t *loc,
int severity,
const char *message);
4139 KMP_EXPORT
void __kmpc_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4140 KMP_EXPORT
void __kmpc_end_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4146 template <
bool C,
bool S>
4147 extern void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4148 template <
bool C,
bool S>
4149 extern void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4150 template <
bool C,
bool S>
4151 extern void __kmp_atomic_suspend_64(
int th_gtid,
4152 kmp_atomic_flag_64<C, S> *flag);
4153 extern void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag);
4154 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4155 template <
bool C,
bool S>
4156 extern void __kmp_mwait_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4157 template <
bool C,
bool S>
4158 extern void __kmp_mwait_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4159 template <
bool C,
bool S>
4160 extern void __kmp_atomic_mwait_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag);
4161 extern void __kmp_mwait_oncore(
int th_gtid, kmp_flag_oncore *flag);
4163 template <
bool C,
bool S>
4164 extern void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag);
4165 template <
bool C,
bool S>
4166 extern void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag);
4167 template <
bool C,
bool S>
4168 extern void __kmp_atomic_resume_64(
int target_gtid,
4169 kmp_atomic_flag_64<C, S> *flag);
4170 extern void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag);
4172 template <
bool C,
bool S>
4173 int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
4174 kmp_flag_32<C, S> *flag,
int final_spin,
4175 int *thread_finished,
4179 kmp_int32 is_constrained);
4180 template <
bool C,
bool S>
4181 int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4182 kmp_flag_64<C, S> *flag,
int final_spin,
4183 int *thread_finished,
4187 kmp_int32 is_constrained);
4188 template <
bool C,
bool S>
4189 int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4190 kmp_atomic_flag_64<C, S> *flag,
4191 int final_spin,
int *thread_finished,
4195 kmp_int32 is_constrained);
4196 int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
4197 kmp_flag_oncore *flag,
int final_spin,
4198 int *thread_finished,
4202 kmp_int32 is_constrained);
4204 extern int __kmp_nesting_mode;
4205 extern int __kmp_nesting_mode_nlevels;
4206 extern int *__kmp_nesting_nth_level;
4207 extern void __kmp_init_nesting_mode();
4208 extern void __kmp_set_nesting_mode_threads();
4220 if (f && f != stdout && f != stderr) {
4229 const char *env_var =
nullptr)
4231 open(filename, mode, env_var);
4238 void open(
const char *filename,
const char *mode,
4239 const char *env_var =
nullptr) {
4241 f = fopen(filename, mode);
4245 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4246 KMP_HNT(CheckEnvVar, env_var, filename), __kmp_msg_null);
4248 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4265 operator bool() {
return bool(f); }
4266 operator FILE *() {
return f; }
4269 template <
typename SourceType,
typename TargetType,
4270 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4271 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4272 bool isSourceSigned = std::is_signed<SourceType>::value,
4273 bool isTargetSigned = std::is_signed<TargetType>::value>
4274 struct kmp_convert {};
4277 template <
typename SourceType,
typename TargetType>
4278 struct kmp_convert<SourceType, TargetType, true, false, true, true> {
4279 static TargetType to(SourceType src) {
return (TargetType)src; }
4282 template <
typename SourceType,
typename TargetType>
4283 struct kmp_convert<SourceType, TargetType, false, true, true, true> {
4284 static TargetType to(SourceType src) {
return src; }
4287 template <
typename SourceType,
typename TargetType>
4288 struct kmp_convert<SourceType, TargetType, false, false, true, true> {
4289 static TargetType to(SourceType src) {
4290 KMP_ASSERT(src <=
static_cast<SourceType
>(
4291 (std::numeric_limits<TargetType>::max)()));
4292 KMP_ASSERT(src >=
static_cast<SourceType
>(
4293 (std::numeric_limits<TargetType>::min)()));
4294 return (TargetType)src;
4300 template <
typename SourceType,
typename TargetType>
4301 struct kmp_convert<SourceType, TargetType, true, false, true, false> {
4302 static TargetType to(SourceType src) {
4303 KMP_ASSERT(src >= 0);
4304 return (TargetType)src;
4308 template <
typename SourceType,
typename TargetType>
4309 struct kmp_convert<SourceType, TargetType, false, true, true, false> {
4310 static TargetType to(SourceType src) {
4311 KMP_ASSERT(src >= 0);
4312 return (TargetType)src;
4316 template <
typename SourceType,
typename TargetType>
4317 struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4318 static TargetType to(SourceType src) {
4319 KMP_ASSERT(src >= 0);
4320 KMP_ASSERT(src <=
static_cast<SourceType
>(
4321 (std::numeric_limits<TargetType>::max)()));
4322 return (TargetType)src;
4328 template <
typename SourceType,
typename TargetType>
4329 struct kmp_convert<SourceType, TargetType, true, false, false, true> {
4330 static TargetType to(SourceType src) {
return (TargetType)src; }
4333 template <
typename SourceType,
typename TargetType>
4334 struct kmp_convert<SourceType, TargetType, false, true, false, true> {
4335 static TargetType to(SourceType src) {
4336 KMP_ASSERT(src <=
static_cast<SourceType
>(
4337 (std::numeric_limits<TargetType>::max)()));
4338 return (TargetType)src;
4342 template <
typename SourceType,
typename TargetType>
4343 struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4344 static TargetType to(SourceType src) {
4345 KMP_ASSERT(src <=
static_cast<SourceType
>(
4346 (std::numeric_limits<TargetType>::max)()));
4347 return (TargetType)src;
4353 template <
typename SourceType,
typename TargetType>
4354 struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4355 static TargetType to(SourceType src) {
return (TargetType)src; }
4358 template <
typename SourceType,
typename TargetType>
4359 struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4360 static TargetType to(SourceType src) {
return src; }
4363 template <
typename SourceType,
typename TargetType>
4364 struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4365 static TargetType to(SourceType src) {
4366 KMP_ASSERT(src <=
static_cast<SourceType
>(
4367 (std::numeric_limits<TargetType>::max)()));
4368 return (TargetType)src;
4372 template <
typename T1,
typename T2>
4373 static inline void __kmp_type_convert(T1 src, T2 *dest) {
4374 *dest = kmp_convert<T1, T2>::to(src);
void open(const char *filename, const char *mode, const char *env_var=nullptr)
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_ATOMIC_HINT_MASK
@ KMP_IDENT_WORK_DISTRIBUTE
@ KMP_IDENT_ATOMIC_REDUCE
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, int modifier, void *task_dup)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
void(* kmpc_dtor)(void *)
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_ctor)(void *)
void *(* kmpc_ctor_vec)(void *, size_t)
void *(* kmpc_cctor)(void *, void *)
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void(* kmpc_dtor_vec)(void *, size_t)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, const struct kmp_dim *dims)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_distribute_static_chunked
@ kmp_sch_modifier_monotonic
@ kmp_sch_modifier_nonmonotonic