17 #include "kmp_config.h"
27 #ifndef KMP_STATIC_STEAL_ENABLED
28 #define KMP_STATIC_STEAL_ENABLED 1
31 #define TASK_CURRENT_NOT_QUEUED 0
32 #define TASK_CURRENT_QUEUED 1
34 #ifdef BUILD_TIED_TASK_STACK
35 #define TASK_STACK_EMPTY 0
36 #define TASK_STACK_BLOCK_BITS 5
38 #define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
40 #define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
43 #define TASK_NOT_PUSHED 1
44 #define TASK_SUCCESSFULLY_PUSHED 0
47 #define TASK_EXPLICIT 1
48 #define TASK_IMPLICIT 0
51 #define TASK_DETACHABLE 1
52 #define TASK_UNDETACHABLE 0
54 #define KMP_CANCEL_THREADS
55 #define KMP_THREAD_ATTR
59 #if defined(__ANDROID__)
60 #undef KMP_CANCEL_THREADS
70 #include <type_traits>
74 #include <sys/types.h>
83 #include "kmp_safe_c_api.h"
89 #if KMP_USE_HIER_SCHED
91 #undef KMP_USE_HIER_SCHED
92 #define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
95 #if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
97 #ifndef HWLOC_OBJ_NUMANODE
98 #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
100 #ifndef HWLOC_OBJ_PACKAGE
101 #define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
103 #if HWLOC_API_VERSION >= 0x00020000
105 typedef int kmp_hwloc_depth_t;
107 typedef unsigned int kmp_hwloc_depth_t;
111 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
112 #include <xmmintrin.h>
115 #include "kmp_debug.h"
116 #include "kmp_lock.h"
117 #include "kmp_version.h"
118 #include "kmp_barrier.h"
120 #include "kmp_debugger.h"
122 #include "kmp_i18n.h"
124 #define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
126 #include "kmp_wrapper_malloc.h"
129 #if !defined NSIG && defined _NSIG
135 #pragma weak clock_gettime
139 #include "ompt-internal.h"
143 #include "ompd-specific.h"
147 #define UNLIKELY(x) (x)
156 #ifndef USE_FAST_MEMORY
157 #define USE_FAST_MEMORY 3
160 #ifndef KMP_NESTED_HOT_TEAMS
161 #define KMP_NESTED_HOT_TEAMS 0
162 #define USE_NESTED_HOT_ARG(x)
164 #if KMP_NESTED_HOT_TEAMS
165 #define USE_NESTED_HOT_ARG(x) , x
167 #define USE_NESTED_HOT_ARG(x)
172 #ifndef USE_CMP_XCHG_FOR_BGET
173 #define USE_CMP_XCHG_FOR_BGET 1
181 #define KMP_NSEC_PER_SEC 1000000000L
182 #define KMP_USEC_PER_SEC 1000000L
206 KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
207 KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
208 KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
210 KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
211 KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
224 KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
225 KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
226 KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
227 KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
228 KMP_IDENT_OPENMP_SPEC_VERSION_MASK = 0xFF000000
249 kmp_int32 get_openmp_version() {
250 return (((
flags & KMP_IDENT_OPENMP_SPEC_VERSION_MASK) >> 24) & 0xFF);
258 typedef union kmp_team kmp_team_t;
259 typedef struct kmp_taskdata kmp_taskdata_t;
260 typedef union kmp_task_team kmp_task_team_t;
261 typedef union kmp_team kmp_team_p;
262 typedef union kmp_info kmp_info_p;
263 typedef union kmp_root kmp_root_p;
265 template <
bool C = false,
bool S = true>
class kmp_flag_32;
266 template <
bool C = false,
bool S = true>
class kmp_flag_64;
267 template <
bool C = false,
bool S = true>
class kmp_atomic_flag_64;
268 class kmp_flag_oncore;
278 #define KMP_PACK_64(HIGH_32, LOW_32) \
279 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
282 #define SKIP_WS(_x) \
284 while (*(_x) == ' ' || *(_x) == '\t') \
287 #define SKIP_DIGITS(_x) \
289 while (*(_x) >= '0' && *(_x) <= '9') \
292 #define SKIP_TOKEN(_x) \
294 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
295 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
298 #define SKIP_TO(_x, _c) \
300 while (*(_x) != '\0' && *(_x) != (_c)) \
306 #define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
307 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
312 enum kmp_state_timer {
322 #ifdef USE_LOAD_BALANCE
323 dynamic_load_balance,
326 dynamic_thread_limit,
332 #ifndef KMP_SCHED_TYPE_DEFINED
333 #define KMP_SCHED_TYPE_DEFINED
334 typedef enum kmp_sched {
337 kmp_sched_static = 1,
338 kmp_sched_dynamic = 2,
339 kmp_sched_guided = 3,
341 kmp_sched_upper_std = 5,
342 kmp_sched_lower_ext = 100,
343 kmp_sched_trapezoidal = 101,
344 #if KMP_STATIC_STEAL_ENABLED
345 kmp_sched_static_steal = 102,
348 kmp_sched_default = kmp_sched_static,
349 kmp_sched_monotonic = 0x80000000
359 kmp_sch_static_chunked = 33,
361 kmp_sch_dynamic_chunked = 35,
363 kmp_sch_runtime = 37,
365 kmp_sch_trapezoidal = 39,
368 kmp_sch_static_greedy = 40,
369 kmp_sch_static_balanced = 41,
371 kmp_sch_guided_iterative_chunked = 42,
372 kmp_sch_guided_analytical_chunked = 43,
374 kmp_sch_static_steal = 44,
377 kmp_sch_static_balanced_chunked = 45,
385 kmp_ord_static_chunked = 65,
387 kmp_ord_dynamic_chunked = 67,
388 kmp_ord_guided_chunked = 68,
389 kmp_ord_runtime = 69,
391 kmp_ord_trapezoidal = 71,
404 kmp_nm_static_chunked =
407 kmp_nm_dynamic_chunked = 163,
409 kmp_nm_runtime = 165,
411 kmp_nm_trapezoidal = 167,
414 kmp_nm_static_greedy = 168,
415 kmp_nm_static_balanced = 169,
417 kmp_nm_guided_iterative_chunked = 170,
418 kmp_nm_guided_analytical_chunked = 171,
419 kmp_nm_static_steal =
422 kmp_nm_ord_static_chunked = 193,
424 kmp_nm_ord_dynamic_chunked = 195,
425 kmp_nm_ord_guided_chunked = 196,
426 kmp_nm_ord_runtime = 197,
428 kmp_nm_ord_trapezoidal = 199,
450 #define SCHEDULE_WITHOUT_MODIFIERS(s) \
453 #define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
454 #define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
455 #define SCHEDULE_HAS_NO_MODIFIERS(s) \
456 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
457 #define SCHEDULE_GET_MODIFIERS(s) \
458 ((enum sched_type)( \
459 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
460 #define SCHEDULE_SET_MODIFIERS(s, m) \
461 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
462 #define SCHEDULE_NONMONOTONIC 0
463 #define SCHEDULE_MONOTONIC 1
470 __kmp_sched_apply_mods_stdkind(kmp_sched_t *kind,
472 if (SCHEDULE_HAS_MONOTONIC(internal_kind)) {
473 *kind = (kmp_sched_t)((
int)*kind | (int)kmp_sched_monotonic);
479 __kmp_sched_apply_mods_intkind(kmp_sched_t kind,
481 if ((
int)kind & (
int)kmp_sched_monotonic) {
482 *internal_kind = (
enum sched_type)((
int)*internal_kind |
488 static inline kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind) {
489 return (kmp_sched_t)((int)kind & ~((
int)kmp_sched_monotonic));
493 typedef union kmp_r_sched {
512 enum clock_function_type {
513 clock_function_gettimeofday,
514 clock_function_clock_gettime
518 #if KMP_MIC_SUPPORTED
519 enum mic_type { non_mic, mic1, mic2, mic3, dummy };
524 #undef KMP_FAST_REDUCTION_BARRIER
525 #define KMP_FAST_REDUCTION_BARRIER 1
527 #undef KMP_FAST_REDUCTION_CORE_DUO
528 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
529 #define KMP_FAST_REDUCTION_CORE_DUO 1
532 enum _reduction_method {
533 reduction_method_not_defined = 0,
534 critical_reduce_block = (1 << 8),
535 atomic_reduce_block = (2 << 8),
536 tree_reduce_block = (3 << 8),
537 empty_reduce_block = (4 << 8)
552 #if KMP_FAST_REDUCTION_BARRIER
553 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
554 ((reduction_method) | (barrier_type))
556 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
557 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
559 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
560 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
562 #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
565 #define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
566 (packed_reduction_method)
568 #define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
571 #define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
572 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
573 (which_reduction_block))
575 #if KMP_FAST_REDUCTION_BARRIER
576 #define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
577 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
579 #define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
580 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
583 typedef int PACKED_REDUCTION_METHOD_T;
590 #pragma warning(push)
591 #pragma warning(disable : 271 310)
604 enum kmp_hw_t :
int {
621 typedef enum kmp_hw_core_type_t {
622 KMP_HW_CORE_TYPE_UNKNOWN = 0x0,
623 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
624 KMP_HW_CORE_TYPE_ATOM = 0x20,
625 KMP_HW_CORE_TYPE_CORE = 0x40,
626 KMP_HW_MAX_NUM_CORE_TYPES = 3,
628 KMP_HW_MAX_NUM_CORE_TYPES = 1,
630 } kmp_hw_core_type_t;
632 #define KMP_HW_MAX_NUM_CORE_EFFS 8
634 #define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
635 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
636 #define KMP_ASSERT_VALID_HW_TYPE(type) \
637 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
639 #define KMP_FOREACH_HW_TYPE(type) \
640 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
641 type = (kmp_hw_t)((int)type + 1))
643 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural =
false);
644 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural =
false);
645 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type);
648 #if KMP_AFFINITY_SUPPORTED
652 #if _MSC_VER < 1600 && KMP_MSVC_COMPAT
653 typedef struct GROUP_AFFINITY {
659 #if KMP_GROUP_AFFINITY
660 extern int __kmp_num_proc_groups;
662 static const int __kmp_num_proc_groups = 1;
664 typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
665 extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
667 typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
668 extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
670 typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
671 extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
673 typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
675 extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
679 extern hwloc_topology_t __kmp_hwloc_topology;
680 extern int __kmp_hwloc_error;
683 extern size_t __kmp_affin_mask_size;
684 #define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
685 #define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
686 #define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
687 #define KMP_CPU_SET_ITERATE(i, mask) \
688 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
689 #define KMP_CPU_SET(i, mask) (mask)->set(i)
690 #define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
691 #define KMP_CPU_CLR(i, mask) (mask)->clear(i)
692 #define KMP_CPU_ZERO(mask) (mask)->zero()
693 #define KMP_CPU_COPY(dest, src) (dest)->copy(src)
694 #define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
695 #define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
696 #define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
697 #define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
698 #define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
699 #define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
700 #define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
701 #define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
702 #define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
703 #define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
704 #define KMP_CPU_ALLOC_ARRAY(arr, n) \
705 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
706 #define KMP_CPU_FREE_ARRAY(arr, n) \
707 __kmp_affinity_dispatch->deallocate_mask_array(arr)
708 #define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
709 #define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
710 #define __kmp_get_system_affinity(mask, abort_bool) \
711 (mask)->get_system_affinity(abort_bool)
712 #define __kmp_set_system_affinity(mask, abort_bool) \
713 (mask)->set_system_affinity(abort_bool)
714 #define __kmp_get_proc_group(mask) (mask)->get_proc_group()
720 void *
operator new(
size_t n);
721 void operator delete(
void *p);
722 void *
operator new[](
size_t n);
723 void operator delete[](
void *p);
726 virtual void set(
int i) {}
728 virtual bool is_set(
int i)
const {
return false; }
730 virtual void clear(
int i) {}
732 virtual void zero() {}
734 virtual void copy(
const Mask *src) {}
736 virtual void bitwise_and(
const Mask *rhs) {}
738 virtual void bitwise_or(
const Mask *rhs) {}
740 virtual void bitwise_not() {}
743 virtual int begin()
const {
return 0; }
744 virtual int end()
const {
return 0; }
745 virtual int next(
int previous)
const {
return 0; }
747 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
750 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
752 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
755 virtual int get_proc_group()
const {
return -1; }
757 void *
operator new(
size_t n);
758 void operator delete(
void *p);
760 virtual ~KMPAffinity() =
default;
762 virtual void determine_capable(
const char *env_var) {}
764 virtual void bind_thread(
int proc) {}
766 virtual Mask *allocate_mask() {
return nullptr; }
767 virtual void deallocate_mask(Mask *m) {}
768 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
769 virtual void deallocate_mask_array(Mask *m) {}
770 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
771 static void pick_api();
772 static void destroy_api();
780 virtual api_type get_api_type()
const {
786 static bool picked_api;
789 typedef KMPAffinity::Mask kmp_affin_mask_t;
790 extern KMPAffinity *__kmp_affinity_dispatch;
794 #define KMP_AFFIN_MASK_PRINT_LEN 1024
808 enum affinity_top_method {
809 affinity_top_method_all = 0,
810 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
811 affinity_top_method_apicid,
812 affinity_top_method_x2apicid,
813 affinity_top_method_x2apicid_1f,
815 affinity_top_method_cpuinfo,
816 #if KMP_GROUP_AFFINITY
817 affinity_top_method_group,
819 affinity_top_method_flat,
821 affinity_top_method_hwloc,
823 affinity_top_method_default
826 #define affinity_respect_mask_default (-1)
828 extern enum affinity_type __kmp_affinity_type;
829 extern kmp_hw_t __kmp_affinity_gran;
830 extern int __kmp_affinity_gran_levels;
831 extern int __kmp_affinity_dups;
832 extern enum affinity_top_method __kmp_affinity_top_method;
833 extern int __kmp_affinity_compact;
834 extern int __kmp_affinity_offset;
835 extern int __kmp_affinity_verbose;
836 extern int __kmp_affinity_warnings;
837 extern int __kmp_affinity_respect_mask;
838 extern char *__kmp_affinity_proclist;
839 extern kmp_affin_mask_t *__kmp_affinity_masks;
840 extern unsigned __kmp_affinity_num_masks;
841 extern void __kmp_affinity_bind_thread(
int which);
843 extern kmp_affin_mask_t *__kmp_affin_fullMask;
844 extern char *__kmp_cpuinfo_file;
849 typedef enum kmp_proc_bind_t {
859 typedef struct kmp_nested_proc_bind_t {
860 kmp_proc_bind_t *bind_types;
863 } kmp_nested_proc_bind_t;
865 extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
866 extern kmp_proc_bind_t __kmp_teams_proc_bind;
868 extern int __kmp_display_affinity;
869 extern char *__kmp_affinity_format;
870 static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
872 extern int __kmp_tool;
873 extern char *__kmp_tool_libraries;
876 #if KMP_AFFINITY_SUPPORTED
877 #define KMP_PLACE_ALL (-1)
878 #define KMP_PLACE_UNDEFINED (-2)
880 #define KMP_AFFINITY_NON_PROC_BIND \
881 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
882 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
883 (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
886 extern int __kmp_affinity_num_places;
888 typedef enum kmp_cancel_kind_t {
897 typedef struct kmp_hws_item {
902 extern kmp_hws_item_t __kmp_hws_socket;
903 extern kmp_hws_item_t __kmp_hws_die;
904 extern kmp_hws_item_t __kmp_hws_node;
905 extern kmp_hws_item_t __kmp_hws_tile;
906 extern kmp_hws_item_t __kmp_hws_core;
907 extern kmp_hws_item_t __kmp_hws_proc;
908 extern int __kmp_hws_requested;
909 extern int __kmp_hws_abs_flag;
913 #define KMP_PAD(type, sz) \
914 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
918 #define KMP_GTID_DNE (-2)
919 #define KMP_GTID_SHUTDOWN (-3)
920 #define KMP_GTID_MONITOR (-4)
921 #define KMP_GTID_UNKNOWN (-5)
922 #define KMP_GTID_MIN (-6)
928 typedef uintptr_t omp_uintptr_t;
931 omp_atk_sync_hint = 1,
932 omp_atk_alignment = 2,
934 omp_atk_pool_size = 4,
935 omp_atk_fallback = 5,
938 omp_atk_partition = 8
939 } omp_alloctrait_key_t;
944 omp_atv_contended = 3,
945 omp_atv_uncontended = 4,
946 omp_atv_serialized = 5,
947 omp_atv_sequential = omp_atv_serialized,
953 omp_atv_default_mem_fb = 11,
954 omp_atv_null_fb = 12,
955 omp_atv_abort_fb = 13,
956 omp_atv_allocator_fb = 14,
957 omp_atv_environment = 15,
958 omp_atv_nearest = 16,
959 omp_atv_blocked = 17,
960 omp_atv_interleaved = 18
961 } omp_alloctrait_value_t;
962 #define omp_atv_default ((omp_uintptr_t)-1)
964 typedef void *omp_memspace_handle_t;
965 extern omp_memspace_handle_t
const omp_default_mem_space;
966 extern omp_memspace_handle_t
const omp_large_cap_mem_space;
967 extern omp_memspace_handle_t
const omp_const_mem_space;
968 extern omp_memspace_handle_t
const omp_high_bw_mem_space;
969 extern omp_memspace_handle_t
const omp_low_lat_mem_space;
971 extern omp_memspace_handle_t
const llvm_omp_target_host_mem_space;
972 extern omp_memspace_handle_t
const llvm_omp_target_shared_mem_space;
973 extern omp_memspace_handle_t
const llvm_omp_target_device_mem_space;
976 omp_alloctrait_key_t key;
980 typedef void *omp_allocator_handle_t;
981 extern omp_allocator_handle_t
const omp_null_allocator;
982 extern omp_allocator_handle_t
const omp_default_mem_alloc;
983 extern omp_allocator_handle_t
const omp_large_cap_mem_alloc;
984 extern omp_allocator_handle_t
const omp_const_mem_alloc;
985 extern omp_allocator_handle_t
const omp_high_bw_mem_alloc;
986 extern omp_allocator_handle_t
const omp_low_lat_mem_alloc;
987 extern omp_allocator_handle_t
const omp_cgroup_mem_alloc;
988 extern omp_allocator_handle_t
const omp_pteam_mem_alloc;
989 extern omp_allocator_handle_t
const omp_thread_mem_alloc;
991 extern omp_allocator_handle_t
const llvm_omp_target_host_mem_alloc;
992 extern omp_allocator_handle_t
const llvm_omp_target_shared_mem_alloc;
993 extern omp_allocator_handle_t
const llvm_omp_target_device_mem_alloc;
994 extern omp_allocator_handle_t
const kmp_max_mem_alloc;
995 extern omp_allocator_handle_t __kmp_def_allocator;
1000 extern int __kmp_memkind_available;
1002 typedef omp_memspace_handle_t kmp_memspace_t;
1004 typedef struct kmp_allocator_t {
1005 omp_memspace_handle_t memspace;
1008 omp_alloctrait_value_t fb;
1009 kmp_allocator_t *fb_data;
1010 kmp_uint64 pool_size;
1011 kmp_uint64 pool_used;
1014 extern omp_allocator_handle_t __kmpc_init_allocator(
int gtid,
1015 omp_memspace_handle_t,
1017 omp_alloctrait_t traits[]);
1018 extern void __kmpc_destroy_allocator(
int gtid, omp_allocator_handle_t al);
1019 extern void __kmpc_set_default_allocator(
int gtid, omp_allocator_handle_t al);
1020 extern omp_allocator_handle_t __kmpc_get_default_allocator(
int gtid);
1022 extern void *__kmpc_alloc(
int gtid,
size_t sz, omp_allocator_handle_t al);
1023 extern void *__kmpc_aligned_alloc(
int gtid,
size_t align,
size_t sz,
1024 omp_allocator_handle_t al);
1025 extern void *__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1026 omp_allocator_handle_t al);
1027 extern void *__kmpc_realloc(
int gtid,
void *ptr,
size_t sz,
1028 omp_allocator_handle_t al,
1029 omp_allocator_handle_t free_al);
1030 extern void __kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1032 extern void *__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1033 omp_allocator_handle_t al);
1034 extern void *__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1035 omp_allocator_handle_t al);
1036 extern void *__kmp_realloc(
int gtid,
void *ptr,
size_t sz,
1037 omp_allocator_handle_t al,
1038 omp_allocator_handle_t free_al);
1039 extern void ___kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1041 extern void __kmp_init_memkind();
1042 extern void __kmp_fini_memkind();
1043 extern void __kmp_init_target_mem();
1047 #define KMP_UINT64_MAX \
1048 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1050 #define KMP_MIN_NTH 1
1053 #if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1054 #define KMP_MAX_NTH PTHREAD_THREADS_MAX
1056 #define KMP_MAX_NTH INT_MAX
1060 #ifdef PTHREAD_STACK_MIN
1061 #define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
1063 #define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1066 #define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1069 #define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1070 #elif KMP_ARCH_X86_64
1071 #define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1072 #define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1074 #define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1077 #define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1078 #define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1079 #define KMP_MAX_MALLOC_POOL_INCR \
1080 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1082 #define KMP_MIN_STKOFFSET (0)
1083 #define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1085 #define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1087 #define KMP_DEFAULT_STKOFFSET CACHE_LINE
1090 #define KMP_MIN_STKPADDING (0)
1091 #define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1093 #define KMP_BLOCKTIME_MULTIPLIER \
1095 #define KMP_MIN_BLOCKTIME (0)
1096 #define KMP_MAX_BLOCKTIME \
1100 #define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200))
1103 #define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1104 #define KMP_MIN_MONITOR_WAKEUPS (1)
1105 #define KMP_MAX_MONITOR_WAKEUPS (1000)
1109 #define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1110 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1111 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1112 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1113 ? (monitor_wakeups) \
1114 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1118 #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1119 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1120 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1122 #define KMP_BLOCKTIME(team, tid) \
1123 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1124 #if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1126 extern kmp_uint64 __kmp_ticks_per_msec;
1127 #if KMP_COMPILER_ICC
1128 #define KMP_NOW() ((kmp_uint64)_rdtsc())
1130 #define KMP_NOW() __kmp_hardware_timestamp()
1132 #define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
1133 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
1134 (KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
1135 #define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1138 extern kmp_uint64 __kmp_now_nsec();
1139 #define KMP_NOW() __kmp_now_nsec()
1140 #define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
1141 #define KMP_BLOCKTIME_INTERVAL(team, tid) \
1142 (KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
1143 #define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1147 #define KMP_MIN_STATSCOLS 40
1148 #define KMP_MAX_STATSCOLS 4096
1149 #define KMP_DEFAULT_STATSCOLS 80
1151 #define KMP_MIN_INTERVAL 0
1152 #define KMP_MAX_INTERVAL (INT_MAX - 1)
1153 #define KMP_DEFAULT_INTERVAL 0
1155 #define KMP_MIN_CHUNK 1
1156 #define KMP_MAX_CHUNK (INT_MAX - 1)
1157 #define KMP_DEFAULT_CHUNK 1
1159 #define KMP_MIN_DISP_NUM_BUFF 1
1160 #define KMP_DFLT_DISP_NUM_BUFF 7
1161 #define KMP_MAX_DISP_NUM_BUFF 4096
1163 #define KMP_MAX_ORDERED 8
1165 #define KMP_MAX_FIELDS 32
1167 #define KMP_MAX_BRANCH_BITS 31
1169 #define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1171 #define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1173 #define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1178 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1179 #define KMP_TLS_GTID_MIN 5
1181 #define KMP_TLS_GTID_MIN INT_MAX
1184 #define KMP_MASTER_TID(tid) (0 == (tid))
1185 #define KMP_WORKER_TID(tid) (0 != (tid))
1187 #define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1188 #define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1189 #define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1193 #define TRUE (!FALSE)
1199 #define KMP_INIT_WAIT 64U
1200 #define KMP_NEXT_WAIT 32U
1202 #define KMP_INIT_WAIT 1024U
1203 #define KMP_NEXT_WAIT 512U
1206 #define KMP_INIT_WAIT 1024U
1207 #define KMP_NEXT_WAIT 512U
1208 #elif KMP_OS_DRAGONFLY
1210 #define KMP_INIT_WAIT 1024U
1211 #define KMP_NEXT_WAIT 512U
1212 #elif KMP_OS_FREEBSD
1214 #define KMP_INIT_WAIT 1024U
1215 #define KMP_NEXT_WAIT 512U
1218 #define KMP_INIT_WAIT 1024U
1219 #define KMP_NEXT_WAIT 512U
1222 #define KMP_INIT_WAIT 1024U
1223 #define KMP_NEXT_WAIT 512U
1224 #elif KMP_OS_OPENBSD
1226 #define KMP_INIT_WAIT 1024U
1227 #define KMP_NEXT_WAIT 512U
1230 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1231 typedef struct kmp_cpuid {
1238 typedef struct kmp_cpuinfo_flags_t {
1241 unsigned hybrid : 1;
1242 unsigned reserved : 29;
1243 } kmp_cpuinfo_flags_t;
1245 typedef struct kmp_cpuinfo {
1252 kmp_cpuinfo_flags_t flags;
1256 kmp_uint64 frequency;
1257 char name[3 *
sizeof(kmp_cpuid_t)];
1260 extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
1265 static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *p) {
1266 __asm__ __volatile__(
"cpuid"
1267 :
"=a"(p->eax),
"=b"(p->ebx),
"=c"(p->ecx),
"=d"(p->edx)
1268 :
"a"(leaf),
"c"(subleaf));
1271 static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p) {
1272 __asm__ __volatile__(
"fldcw %0" : :
"m"(*p));
1275 static inline void __kmp_store_x87_fpu_control_word(kmp_int16 *p) {
1276 __asm__ __volatile__(
"fstcw %0" :
"=m"(*p));
1278 static inline void __kmp_clear_x87_fpu_status_word() {
1281 struct x87_fpu_state {
1290 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1291 __asm__ __volatile__(
"fstenv %0\n\t"
1292 "andw $0x7f00, %1\n\t"
1294 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1296 __asm__ __volatile__(
"fnclex");
1300 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1301 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1303 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) {}
1304 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = 0; }
1308 extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *p);
1309 extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p);
1310 extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
1311 extern void __kmp_clear_x87_fpu_status_word();
1312 static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1313 static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1316 #define KMP_X86_MXCSR_MASK 0xffffffc0
1319 extern void __kmp_x86_pause(
void);
1325 static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1327 static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1329 #define KMP_CPU_PAUSE() __kmp_x86_pause()
1330 #elif KMP_ARCH_PPC64
1331 #define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1332 #define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1333 #define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1334 #define KMP_CPU_PAUSE() \
1336 KMP_PPC64_PRI_LOW(); \
1337 KMP_PPC64_PRI_MED(); \
1338 KMP_PPC64_PRI_LOC_MB(); \
1341 #define KMP_CPU_PAUSE()
1344 #define KMP_INIT_YIELD(count) \
1345 { (count) = __kmp_yield_init; }
1347 #define KMP_OVERSUBSCRIBED \
1348 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1350 #define KMP_TRY_YIELD \
1351 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1353 #define KMP_TRY_YIELD_OVERSUB \
1354 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1356 #define KMP_YIELD(cond) \
1359 if ((cond) && (KMP_TRY_YIELD)) \
1363 #define KMP_YIELD_OVERSUB() \
1366 if ((KMP_TRY_YIELD_OVERSUB)) \
1372 #define KMP_YIELD_SPIN(count) \
1375 if (KMP_TRY_YIELD) { \
1379 (count) = __kmp_yield_next; \
1384 #define KMP_YIELD_OVERSUB_ELSE_SPIN(count) \
1387 if ((KMP_TRY_YIELD_OVERSUB)) \
1389 else if (__kmp_use_yield == 1) { \
1393 (count) = __kmp_yield_next; \
1401 #if KMP_HAVE_WAITPKG_INTRINSICS
1402 #if KMP_HAVE_IMMINTRIN_H
1403 #include <immintrin.h>
1404 #elif KMP_HAVE_INTRIN_H
1408 KMP_ATTRIBUTE_TARGET_WAITPKG
1409 static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
1410 #if !KMP_HAVE_WAITPKG_INTRINSICS
1411 uint32_t timeHi = uint32_t(counter >> 32);
1412 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1414 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1417 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1421 return _tpause(hint, counter);
1424 KMP_ATTRIBUTE_TARGET_WAITPKG
1425 static inline void __kmp_umonitor(
void *cacheline) {
1426 #if !KMP_HAVE_WAITPKG_INTRINSICS
1427 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1432 _umonitor(cacheline);
1435 KMP_ATTRIBUTE_TARGET_WAITPKG
1436 static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
1437 #if !KMP_HAVE_WAITPKG_INTRINSICS
1438 uint32_t timeHi = uint32_t(counter >> 32);
1439 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1441 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1444 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1448 return _umwait(hint, counter);
1451 #elif KMP_HAVE_MWAIT
1453 #include <pmmintrin.h>
1458 __attribute__((target(
"sse3")))
1461 __kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1462 _mm_monitor(cacheline, extensions, hints);
1465 __attribute__((target(
"sse3")))
1468 __kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1469 _mm_mwait(extensions, hints);
1487 ct_ordered_in_parallel,
1495 #define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1499 enum cons_type type;
1505 struct cons_header {
1506 int p_top, w_top, s_top;
1507 int stack_size, stack_top;
1508 struct cons_data *stack_data;
1511 struct kmp_region_info {
1513 int offset[KMP_MAX_FIELDS];
1514 int length[KMP_MAX_FIELDS];
1521 typedef HANDLE kmp_thread_t;
1522 typedef DWORD kmp_key_t;
1526 typedef pthread_t kmp_thread_t;
1527 typedef pthread_key_t kmp_key_t;
1530 extern kmp_key_t __kmp_gtid_threadprivate_key;
1532 typedef struct kmp_sys_info {
1546 typedef int kmp_itt_mark_t;
1547 #define KMP_ITT_DEBUG 0
1550 typedef kmp_int32 kmp_critical_name[8];
1561 typedef void (*
kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1562 typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1579 typedef void *(*kmpc_ctor)(
void *);
1592 typedef void *(*kmpc_cctor)(
void *,
void *);
1602 typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1614 typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1622 typedef struct kmp_cached_addr {
1624 void ***compiler_cache;
1626 struct kmp_cached_addr *next;
1627 } kmp_cached_addr_t;
1629 struct private_data {
1630 struct private_data *next;
1636 struct private_common {
1637 struct private_common *next;
1638 struct private_common *link;
1644 struct shared_common {
1645 struct shared_common *next;
1646 struct private_data *pod_init;
1666 #define KMP_HASH_TABLE_LOG2 9
1667 #define KMP_HASH_TABLE_SIZE \
1668 (1 << KMP_HASH_TABLE_LOG2)
1669 #define KMP_HASH_SHIFT 3
1670 #define KMP_HASH(x) \
1671 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1673 struct common_table {
1674 struct private_common *data[KMP_HASH_TABLE_SIZE];
1677 struct shared_table {
1678 struct shared_common *data[KMP_HASH_TABLE_SIZE];
1683 #if KMP_USE_HIER_SCHED
1686 typedef struct kmp_hier_private_bdata_t {
1687 kmp_int32 num_active;
1689 kmp_uint64 wait_val[2];
1690 } kmp_hier_private_bdata_t;
1693 typedef struct kmp_sched_flags {
1694 unsigned ordered : 1;
1695 unsigned nomerge : 1;
1696 unsigned contains_last : 1;
1697 #if KMP_USE_HIER_SCHED
1698 unsigned use_hier : 1;
1699 unsigned unused : 28;
1701 unsigned unused : 29;
1703 } kmp_sched_flags_t;
1705 KMP_BUILD_ASSERT(
sizeof(kmp_sched_flags_t) == 4);
1707 #if KMP_STATIC_STEAL_ENABLED
1708 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1715 kmp_lock_t *steal_lock;
1722 struct KMP_ALIGN(32) {
1729 kmp_uint32 ordered_lower;
1730 kmp_uint32 ordered_upper;
1732 kmp_int32 last_upper;
1734 } dispatch_private_info32_t;
1736 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1743 kmp_lock_t *steal_lock;
1752 struct KMP_ALIGN(32) {
1759 kmp_uint64 ordered_lower;
1760 kmp_uint64 ordered_upper;
1762 kmp_int64 last_upper;
1764 } dispatch_private_info64_t;
1766 typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1779 kmp_uint32 ordered_lower;
1780 kmp_uint32 ordered_upper;
1782 kmp_int32 last_upper;
1784 } dispatch_private_info32_t;
1786 typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1800 kmp_uint64 ordered_lower;
1801 kmp_uint64 ordered_upper;
1803 kmp_int64 last_upper;
1805 } dispatch_private_info64_t;
1808 typedef struct KMP_ALIGN_CACHE dispatch_private_info {
1809 union private_info {
1810 dispatch_private_info32_t p32;
1811 dispatch_private_info64_t p64;
1814 kmp_sched_flags_t flags;
1815 std::atomic<kmp_uint32> steal_flag;
1816 kmp_int32 ordered_bumped;
1818 struct dispatch_private_info *next;
1819 kmp_int32 type_size;
1820 #if KMP_USE_HIER_SCHED
1824 enum cons_type pushed_ws;
1825 } dispatch_private_info_t;
1827 typedef struct dispatch_shared_info32 {
1830 volatile kmp_uint32 iteration;
1831 volatile kmp_int32 num_done;
1832 volatile kmp_uint32 ordered_iteration;
1834 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
1835 } dispatch_shared_info32_t;
1837 typedef struct dispatch_shared_info64 {
1840 volatile kmp_uint64 iteration;
1841 volatile kmp_int64 num_done;
1842 volatile kmp_uint64 ordered_iteration;
1844 kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
1845 } dispatch_shared_info64_t;
1847 typedef struct dispatch_shared_info {
1849 dispatch_shared_info32_t s32;
1850 dispatch_shared_info64_t s64;
1852 volatile kmp_uint32 buffer_index;
1853 volatile kmp_int32 doacross_buf_idx;
1854 volatile kmp_uint32 *doacross_flags;
1855 kmp_int32 doacross_num_done;
1856 #if KMP_USE_HIER_SCHED
1865 } dispatch_shared_info_t;
1867 typedef struct kmp_disp {
1869 void (*th_deo_fcn)(
int *gtid,
int *cid,
ident_t *);
1871 void (*th_dxo_fcn)(
int *gtid,
int *cid,
ident_t *);
1873 dispatch_shared_info_t *th_dispatch_sh_current;
1874 dispatch_private_info_t *th_dispatch_pr_current;
1876 dispatch_private_info_t *th_disp_buffer;
1877 kmp_uint32 th_disp_index;
1878 kmp_int32 th_doacross_buf_idx;
1879 volatile kmp_uint32 *th_doacross_flags;
1880 kmp_int64 *th_doacross_info;
1881 #if KMP_USE_INTERNODE_ALIGNMENT
1882 char more_padding[INTERNODE_CACHE_LINE];
1890 #define KMP_INIT_BARRIER_STATE 0
1891 #define KMP_BARRIER_SLEEP_BIT 0
1892 #define KMP_BARRIER_UNUSED_BIT 1
1893 #define KMP_BARRIER_BUMP_BIT 2
1895 #define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
1896 #define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
1897 #define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
1899 #if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
1900 #error "Barrier sleep bit must be smaller than barrier bump bit"
1902 #if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
1903 #error "Barrier unused bit must be smaller than barrier bump bit"
1907 #define KMP_BARRIER_NOT_WAITING 0
1908 #define KMP_BARRIER_OWN_FLAG \
1910 #define KMP_BARRIER_PARENT_FLAG \
1912 #define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
1914 #define KMP_BARRIER_SWITCHING \
1917 #define KMP_NOT_SAFE_TO_REAP \
1919 #define KMP_SAFE_TO_REAP 1
1931 bs_plain_barrier = 0,
1933 bs_forkjoin_barrier,
1934 #if KMP_FAST_REDUCTION_BARRIER
1935 bs_reduction_barrier,
1941 #if !KMP_FAST_REDUCTION_BARRIER
1942 #define bs_reduction_barrier bs_plain_barrier
1945 typedef enum kmp_bar_pat {
1952 bp_hierarchical_bar = 3,
1957 #define KMP_BARRIER_ICV_PUSH 1
1960 typedef struct kmp_internal_control {
1961 int serial_nesting_level;
1974 int max_active_levels;
1977 kmp_proc_bind_t proc_bind;
1978 kmp_int32 default_device;
1979 struct kmp_internal_control *next;
1980 } kmp_internal_control_t;
1982 static inline void copy_icvs(kmp_internal_control_t *dst,
1983 kmp_internal_control_t *src) {
1988 typedef struct KMP_ALIGN_CACHE kmp_bstate {
1993 kmp_internal_control_t th_fixed_icvs;
1996 volatile kmp_uint64 b_go;
1997 KMP_ALIGN_CACHE
volatile kmp_uint64
1999 kmp_uint32 *skip_per_level;
2000 kmp_uint32 my_level;
2001 kmp_int32 parent_tid;
2004 struct kmp_bstate *parent_bar;
2006 kmp_uint64 leaf_state;
2008 kmp_uint8 base_leaf_kids;
2009 kmp_uint8 leaf_kids;
2011 kmp_uint8 wait_flag;
2012 kmp_uint8 use_oncore_barrier;
2017 KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
2021 union KMP_ALIGN_CACHE kmp_barrier_union {
2023 char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
2027 typedef union kmp_barrier_union kmp_balign_t;
2030 union KMP_ALIGN_CACHE kmp_barrier_team_union {
2032 char b_pad[CACHE_LINE];
2034 kmp_uint64 b_arrived;
2040 kmp_uint b_master_arrived;
2041 kmp_uint b_team_arrived;
2046 typedef union kmp_barrier_team_union kmp_balign_team_t;
2053 typedef struct kmp_win32_mutex {
2055 CRITICAL_SECTION cs;
2056 } kmp_win32_mutex_t;
2058 typedef struct kmp_win32_cond {
2063 kmp_win32_mutex_t waiters_count_lock_;
2070 int wait_generation_count_;
2079 union KMP_ALIGN_CACHE kmp_cond_union {
2081 char c_pad[CACHE_LINE];
2082 pthread_cond_t c_cond;
2085 typedef union kmp_cond_union kmp_cond_align_t;
2087 union KMP_ALIGN_CACHE kmp_mutex_union {
2089 char m_pad[CACHE_LINE];
2090 pthread_mutex_t m_mutex;
2093 typedef union kmp_mutex_union kmp_mutex_align_t;
2097 typedef struct kmp_desc_base {
2099 size_t ds_stacksize;
2101 kmp_thread_t ds_thread;
2102 volatile int ds_tid;
2105 volatile int ds_alive;
2122 typedef union KMP_ALIGN_CACHE kmp_desc {
2124 char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
2128 typedef struct kmp_local {
2129 volatile int this_construct;
2134 #if !USE_CMP_XCHG_FOR_BGET
2135 #ifdef USE_QUEUING_LOCK_FOR_BGET
2136 kmp_lock_t bget_lock;
2138 kmp_bootstrap_lock_t bget_lock;
2145 PACKED_REDUCTION_METHOD_T
2146 packed_reduction_method;
2151 #define KMP_CHECK_UPDATE(a, b) \
2154 #define KMP_CHECK_UPDATE_SYNC(a, b) \
2156 TCW_SYNC_PTR((a), (b))
2158 #define get__blocktime(xteam, xtid) \
2159 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2160 #define get__bt_set(xteam, xtid) \
2161 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2163 #define get__bt_intervals(xteam, xtid) \
2164 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2167 #define get__dynamic_2(xteam, xtid) \
2168 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2169 #define get__nproc_2(xteam, xtid) \
2170 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2171 #define get__sched_2(xteam, xtid) \
2172 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2174 #define set__blocktime_team(xteam, xtid, xval) \
2175 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2179 #define set__bt_intervals_team(xteam, xtid, xval) \
2180 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2184 #define set__bt_set_team(xteam, xtid, xval) \
2185 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2187 #define set__dynamic(xthread, xval) \
2188 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2189 #define get__dynamic(xthread) \
2190 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2192 #define set__nproc(xthread, xval) \
2193 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2195 #define set__thread_limit(xthread, xval) \
2196 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2198 #define set__max_active_levels(xthread, xval) \
2199 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2201 #define get__max_active_levels(xthread) \
2202 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2204 #define set__sched(xthread, xval) \
2205 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2207 #define set__proc_bind(xthread, xval) \
2208 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2209 #define get__proc_bind(xthread) \
2210 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2214 typedef enum kmp_tasking_mode {
2215 tskm_immediate_exec = 0,
2216 tskm_extra_barrier = 1,
2217 tskm_task_teams = 2,
2219 } kmp_tasking_mode_t;
2221 extern kmp_tasking_mode_t
2223 extern int __kmp_task_stealing_constraint;
2224 extern int __kmp_enable_task_throttling;
2225 extern kmp_int32 __kmp_default_device;
2228 extern kmp_int32 __kmp_max_task_priority;
2230 extern kmp_uint64 __kmp_taskloop_min_tasks;
2234 #define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2235 #define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2239 #define KMP_TASKING_ENABLED(task_team) \
2240 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2248 typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32,
void *);
2250 typedef union kmp_cmplrdata {
2261 typedef struct kmp_task {
2268 kmp_cmplrdata_t data2;
2277 typedef struct kmp_taskgroup {
2278 std::atomic<kmp_int32> count;
2279 std::atomic<kmp_int32>
2281 struct kmp_taskgroup *parent;
2284 kmp_int32 reduce_num_data;
2285 uintptr_t *gomp_data;
2289 typedef union kmp_depnode kmp_depnode_t;
2290 typedef struct kmp_depnode_list kmp_depnode_list_t;
2291 typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2294 #define KMP_DEP_IN 0x1
2295 #define KMP_DEP_OUT 0x2
2296 #define KMP_DEP_INOUT 0x3
2297 #define KMP_DEP_MTX 0x4
2298 #define KMP_DEP_SET 0x8
2299 #define KMP_DEP_ALL 0x80
2301 typedef struct kmp_depend_info {
2302 kmp_intptr_t base_addr;
2311 unsigned unused : 3;
2315 } kmp_depend_info_t;
2318 struct kmp_depnode_list {
2319 kmp_depnode_t *node;
2320 kmp_depnode_list_t *next;
2324 #define MAX_MTX_DEPS 4
2326 typedef struct kmp_base_depnode {
2327 kmp_depnode_list_t *successors;
2329 kmp_lock_t *mtx_locks[MAX_MTX_DEPS];
2330 kmp_int32 mtx_num_locks;
2332 #if KMP_SUPPORT_GRAPH_OUTPUT
2335 std::atomic<kmp_int32> npredecessors;
2336 std::atomic<kmp_int32> nrefs;
2337 } kmp_base_depnode_t;
2339 union KMP_ALIGN_CACHE kmp_depnode {
2341 char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2342 kmp_base_depnode_t dn;
2345 struct kmp_dephash_entry {
2347 kmp_depnode_t *last_out;
2348 kmp_depnode_list_t *last_set;
2349 kmp_depnode_list_t *prev_set;
2350 kmp_uint8 last_flag;
2351 kmp_lock_t *mtx_lock;
2352 kmp_dephash_entry_t *next_in_bucket;
2355 typedef struct kmp_dephash {
2356 kmp_dephash_entry_t **buckets;
2358 kmp_depnode_t *last_all;
2360 kmp_uint32 nelements;
2361 kmp_uint32 nconflicts;
2364 typedef struct kmp_task_affinity_info {
2365 kmp_intptr_t base_addr;
2370 kmp_int32 reserved : 30;
2372 } kmp_task_affinity_info_t;
2374 typedef enum kmp_event_type_t {
2375 KMP_EVENT_UNINITIALIZED = 0,
2376 KMP_EVENT_ALLOW_COMPLETION = 1
2380 kmp_event_type_t type;
2381 kmp_tas_lock_t lock;
2387 #ifdef BUILD_TIED_TASK_STACK
2390 typedef struct kmp_stack_block {
2391 kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2392 struct kmp_stack_block *sb_next;
2393 struct kmp_stack_block *sb_prev;
2394 } kmp_stack_block_t;
2396 typedef struct kmp_task_stack {
2397 kmp_stack_block_t ts_first_block;
2398 kmp_taskdata_t **ts_top;
2399 kmp_int32 ts_entries;
2404 typedef struct kmp_tasking_flags {
2406 unsigned tiedness : 1;
2408 unsigned merged_if0 : 1;
2410 unsigned destructors_thunk : 1;
2414 unsigned priority_specified : 1;
2416 unsigned detachable : 1;
2417 unsigned hidden_helper : 1;
2418 unsigned reserved : 8;
2421 unsigned tasktype : 1;
2422 unsigned task_serial : 1;
2423 unsigned tasking_ser : 1;
2425 unsigned team_serial : 1;
2429 unsigned started : 1;
2430 unsigned executing : 1;
2431 unsigned complete : 1;
2433 unsigned native : 1;
2434 unsigned reserved31 : 7;
2436 } kmp_tasking_flags_t;
2438 struct kmp_taskdata {
2439 kmp_int32 td_task_id;
2440 kmp_tasking_flags_t td_flags;
2441 kmp_team_t *td_team;
2442 kmp_info_p *td_alloc_thread;
2444 kmp_taskdata_t *td_parent;
2446 std::atomic<kmp_int32> td_untied_count;
2450 kmp_uint32 td_taskwait_counter;
2451 kmp_int32 td_taskwait_thread;
2452 KMP_ALIGN_CACHE kmp_internal_control_t
2454 KMP_ALIGN_CACHE std::atomic<kmp_int32>
2455 td_allocated_child_tasks;
2457 std::atomic<kmp_int32>
2458 td_incomplete_child_tasks;
2465 kmp_task_team_t *td_task_team;
2466 size_t td_size_alloc;
2467 #if defined(KMP_GOMP_COMPAT)
2469 kmp_int32 td_size_loop_bounds;
2471 kmp_taskdata_t *td_last_tied;
2472 #if defined(KMP_GOMP_COMPAT)
2474 void (*td_copy_func)(
void *,
void *);
2476 kmp_event_t td_allow_completion_event;
2478 ompt_task_info_t ompt_task_info;
2483 KMP_BUILD_ASSERT(
sizeof(kmp_taskdata_t) %
sizeof(
void *) == 0);
2486 typedef struct kmp_base_thread_data {
2490 kmp_bootstrap_lock_t td_deque_lock;
2493 kmp_int32 td_deque_size;
2494 kmp_uint32 td_deque_head;
2495 kmp_uint32 td_deque_tail;
2496 kmp_int32 td_deque_ntasks;
2498 kmp_int32 td_deque_last_stolen;
2499 #ifdef BUILD_TIED_TASK_STACK
2500 kmp_task_stack_t td_susp_tied_tasks;
2503 } kmp_base_thread_data_t;
2505 #define TASK_DEQUE_BITS 8
2506 #define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2508 #define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2509 #define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2511 typedef union KMP_ALIGN_CACHE kmp_thread_data {
2512 kmp_base_thread_data_t td;
2514 char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2515 } kmp_thread_data_t;
2518 typedef struct kmp_base_task_team {
2519 kmp_bootstrap_lock_t
2522 kmp_task_team_t *tt_next;
2526 kmp_int32 tt_found_tasks;
2530 kmp_int32 tt_max_threads;
2531 kmp_int32 tt_found_proxy_tasks;
2532 kmp_int32 tt_untied_task_encountered;
2535 kmp_int32 tt_hidden_helper_task_encountered;
2538 std::atomic<kmp_int32> tt_unfinished_threads;
2543 } kmp_base_task_team_t;
2545 union KMP_ALIGN_CACHE kmp_task_team {
2546 kmp_base_task_team_t tt;
2548 char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2551 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2554 typedef struct kmp_free_list {
2555 void *th_free_list_self;
2556 void *th_free_list_sync;
2558 void *th_free_list_other;
2562 #if KMP_NESTED_HOT_TEAMS
2565 typedef struct kmp_hot_team_ptr {
2566 kmp_team_p *hot_team;
2567 kmp_int32 hot_team_nth;
2568 } kmp_hot_team_ptr_t;
2570 typedef struct kmp_teams_size {
2586 typedef struct kmp_cg_root {
2587 kmp_info_p *cg_root;
2590 kmp_int32 cg_thread_limit;
2591 kmp_int32 cg_nthreads;
2592 struct kmp_cg_root *up;
2597 typedef struct KMP_ALIGN_CACHE kmp_base_info {
2603 kmp_team_p *th_team;
2604 kmp_root_p *th_root;
2605 kmp_info_p *th_next_pool;
2606 kmp_disp_t *th_dispatch;
2612 kmp_info_p *th_team_master;
2613 int th_team_serialized;
2614 microtask_t th_teams_microtask;
2623 int th_team_bt_intervals;
2626 kmp_uint64 th_team_bt_intervals;
2629 #if KMP_AFFINITY_SUPPORTED
2630 kmp_affin_mask_t *th_affin_mask;
2632 omp_allocator_handle_t th_def_allocator;
2636 #if KMP_NESTED_HOT_TEAMS
2637 kmp_hot_team_ptr_t *th_hot_teams;
2643 #if KMP_AFFINITY_SUPPORTED
2644 int th_current_place;
2650 int th_prev_num_threads;
2652 kmp_uint64 th_bar_arrive_time;
2653 kmp_uint64 th_bar_min_time;
2654 kmp_uint64 th_frame_time;
2656 kmp_local_t th_local;
2657 struct private_common *th_pri_head;
2662 KMP_ALIGN_CACHE kmp_team_p
2666 ompt_thread_info_t ompt_thread_info;
2670 struct common_table *th_pri_common;
2672 volatile kmp_uint32 th_spin_here;
2675 volatile void *th_sleep_loc;
2676 flag_type th_sleep_loc_type;
2683 kmp_task_team_t *th_task_team;
2684 kmp_taskdata_t *th_current_task;
2685 kmp_uint8 th_task_state;
2686 kmp_uint8 *th_task_state_memo_stack;
2688 kmp_uint32 th_task_state_top;
2689 kmp_uint32 th_task_state_stack_sz;
2690 kmp_uint32 th_reap_state;
2695 kmp_uint8 th_active_in_pool;
2697 std::atomic<kmp_uint32> th_used_in_team;
2700 struct cons_header *th_cons;
2701 #if KMP_USE_HIER_SCHED
2703 kmp_hier_private_bdata_t *th_hier_bar_data;
2707 KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
2709 KMP_ALIGN_CACHE
volatile kmp_int32
2712 #if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2714 kmp_free_list_t th_free_lists[NUM_LISTS];
2719 kmp_win32_cond_t th_suspend_cv;
2720 kmp_win32_mutex_t th_suspend_mx;
2721 std::atomic<int> th_suspend_init;
2724 kmp_cond_align_t th_suspend_cv;
2725 kmp_mutex_align_t th_suspend_mx;
2726 std::atomic<int> th_suspend_init_count;
2730 kmp_itt_mark_t th_itt_mark_single;
2733 #if KMP_STATS_ENABLED
2734 kmp_stats_list *th_stats;
2737 std::atomic<bool> th_blocking;
2739 kmp_cg_root_t *th_cg_roots;
2742 typedef union KMP_ALIGN_CACHE kmp_info {
2744 char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
2750 typedef struct kmp_base_data {
2751 volatile kmp_uint32 t_value;
2754 typedef union KMP_ALIGN_CACHE kmp_sleep_team {
2756 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2760 typedef union KMP_ALIGN_CACHE kmp_ordered_team {
2762 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2764 } kmp_ordered_team_t;
2766 typedef int (*launch_t)(
int gtid);
2769 #define KMP_MIN_MALLOC_ARGV_ENTRIES 100
2775 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2776 #define KMP_INLINE_ARGV_BYTES \
2778 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
2779 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
2782 #define KMP_INLINE_ARGV_BYTES \
2783 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
2785 #define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
2787 typedef struct KMP_ALIGN_CACHE kmp_base_team {
2790 KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
2791 kmp_balign_team_t t_bar[bs_last_barrier];
2792 std::atomic<int> t_construct;
2793 char pad[
sizeof(kmp_lock_t)];
2796 std::atomic<void *> t_tg_reduce_data[2];
2797 std::atomic<int> t_tg_fini_counter[2];
2801 KMP_ALIGN_CACHE
int t_master_tid;
2802 int t_master_this_cons;
2806 kmp_team_p *t_parent;
2807 kmp_team_p *t_next_pool;
2808 kmp_disp_t *t_dispatch;
2809 kmp_task_team_t *t_task_team[2];
2810 kmp_proc_bind_t t_proc_bind;
2812 kmp_uint64 t_region_time;
2817 KMP_ALIGN_CACHE
void **t_argv;
2824 ompt_team_info_t ompt_team_info;
2825 ompt_lw_taskteam_t *ompt_serialized_team_info;
2828 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2829 kmp_int8 t_fp_control_saved;
2831 kmp_int16 t_x87_fpu_control_word;
2835 void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
2837 KMP_ALIGN_CACHE kmp_info_t **t_threads;
2839 *t_implicit_task_taskdata;
2842 KMP_ALIGN_CACHE
int t_max_argc;
2845 dispatch_shared_info_t *t_disp_buffer;
2848 kmp_r_sched_t t_sched;
2849 #if KMP_AFFINITY_SUPPORTED
2853 int t_display_affinity;
2856 omp_allocator_handle_t t_def_allocator;
2859 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
2864 char dummy_padding[1024];
2867 KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
2870 std::atomic<kmp_int32> t_cancel_request;
2871 int t_master_active;
2872 void *t_copypriv_data;
2874 std::atomic<kmp_uint32> t_copyin_counter;
2879 distributedBarrier *b;
2882 union KMP_ALIGN_CACHE kmp_team {
2885 char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
2888 typedef union KMP_ALIGN_CACHE kmp_time_global {
2890 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2892 } kmp_time_global_t;
2894 typedef struct kmp_base_global {
2896 kmp_time_global_t g_time;
2899 volatile int g_abort;
2900 volatile int g_done;
2903 enum dynamic_mode g_dynamic_mode;
2904 } kmp_base_global_t;
2906 typedef union KMP_ALIGN_CACHE kmp_global {
2907 kmp_base_global_t g;
2909 char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
2912 typedef struct kmp_base_root {
2917 volatile int r_active;
2919 std::atomic<int> r_in_parallel;
2921 kmp_team_t *r_root_team;
2922 kmp_team_t *r_hot_team;
2923 kmp_info_t *r_uber_thread;
2924 kmp_lock_t r_begin_lock;
2925 volatile int r_begin;
2927 #if KMP_AFFINITY_SUPPORTED
2928 int r_affinity_assigned;
2932 typedef union KMP_ALIGN_CACHE kmp_root {
2935 char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
2938 struct fortran_inx_info {
2944 extern int __kmp_settings;
2945 extern int __kmp_duplicate_library_ok;
2947 extern int __kmp_forkjoin_frames;
2948 extern int __kmp_forkjoin_frames_mode;
2950 extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
2951 extern int __kmp_determ_red;
2954 extern int kmp_a_debug;
2955 extern int kmp_b_debug;
2956 extern int kmp_c_debug;
2957 extern int kmp_d_debug;
2958 extern int kmp_e_debug;
2959 extern int kmp_f_debug;
2963 #define KMP_DEBUG_BUF_LINES_INIT 512
2964 #define KMP_DEBUG_BUF_LINES_MIN 1
2966 #define KMP_DEBUG_BUF_CHARS_INIT 128
2967 #define KMP_DEBUG_BUF_CHARS_MIN 2
2971 extern int __kmp_debug_buf_lines;
2973 __kmp_debug_buf_chars;
2974 extern int __kmp_debug_buf_atomic;
2977 extern char *__kmp_debug_buffer;
2978 extern std::atomic<int> __kmp_debug_count;
2980 extern int __kmp_debug_buf_warn_chars;
2985 extern int __kmp_par_range;
2987 #define KMP_PAR_RANGE_ROUTINE_LEN 1024
2988 extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
2989 #define KMP_PAR_RANGE_FILENAME_LEN 1024
2990 extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
2991 extern int __kmp_par_range_lb;
2992 extern int __kmp_par_range_ub;
2998 extern int __kmp_storage_map_verbose;
3000 extern int __kmp_storage_map_verbose_specified;
3002 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3003 extern kmp_cpuinfo_t __kmp_cpuinfo;
3004 static inline bool __kmp_is_hybrid_cpu() {
return __kmp_cpuinfo.flags.hybrid; }
3006 static inline bool __kmp_is_hybrid_cpu() {
return false; }
3009 extern volatile int __kmp_init_serial;
3010 extern volatile int __kmp_init_gtid;
3011 extern volatile int __kmp_init_common;
3012 extern volatile int __kmp_init_middle;
3013 extern volatile int __kmp_init_parallel;
3015 extern volatile int __kmp_init_monitor;
3017 extern volatile int __kmp_init_user_locks;
3018 extern volatile int __kmp_init_hidden_helper_threads;
3019 extern int __kmp_init_counter;
3020 extern int __kmp_root_counter;
3021 extern int __kmp_version;
3024 extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
3027 extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
3028 extern kmp_uint32 __kmp_barrier_release_bb_dflt;
3029 extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
3030 extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
3031 extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
3032 extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
3033 extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
3034 extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
3035 extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
3036 extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
3037 extern char const *__kmp_barrier_type_name[bs_last_barrier];
3038 extern char const *__kmp_barrier_pattern_name[bp_last_bar];
3041 extern kmp_bootstrap_lock_t __kmp_initz_lock;
3042 extern kmp_bootstrap_lock_t __kmp_forkjoin_lock;
3043 extern kmp_bootstrap_lock_t __kmp_task_team_lock;
3044 extern kmp_bootstrap_lock_t
3047 extern kmp_bootstrap_lock_t
3050 extern kmp_bootstrap_lock_t
3051 __kmp_tp_cached_lock;
3054 extern kmp_lock_t __kmp_global_lock;
3055 extern kmp_queuing_lock_t __kmp_dispatch_lock;
3056 extern kmp_lock_t __kmp_debug_lock;
3058 extern enum library_type __kmp_library;
3064 extern int __kmp_chunk;
3065 extern int __kmp_force_monotonic;
3067 extern size_t __kmp_stksize;
3069 extern size_t __kmp_monitor_stksize;
3071 extern size_t __kmp_stkoffset;
3072 extern int __kmp_stkpadding;
3075 __kmp_malloc_pool_incr;
3076 extern int __kmp_env_stksize;
3077 extern int __kmp_env_blocktime;
3078 extern int __kmp_env_checks;
3079 extern int __kmp_env_consistency_check;
3080 extern int __kmp_generate_warnings;
3081 extern int __kmp_reserve_warn;
3083 #ifdef DEBUG_SUSPEND
3084 extern int __kmp_suspend_count;
3087 extern kmp_int32 __kmp_use_yield;
3088 extern kmp_int32 __kmp_use_yield_exp_set;
3089 extern kmp_uint32 __kmp_yield_init;
3090 extern kmp_uint32 __kmp_yield_next;
3093 extern int __kmp_allThreadsSpecified;
3095 extern size_t __kmp_align_alloc;
3097 extern int __kmp_xproc;
3098 extern int __kmp_avail_proc;
3099 extern size_t __kmp_sys_min_stksize;
3100 extern int __kmp_sys_max_nth;
3102 extern int __kmp_max_nth;
3104 extern int __kmp_cg_max_nth;
3105 extern int __kmp_teams_max_nth;
3106 extern int __kmp_threads_capacity;
3108 extern int __kmp_dflt_team_nth;
3110 extern int __kmp_dflt_team_nth_ub;
3112 extern int __kmp_tp_capacity;
3114 extern int __kmp_tp_cached;
3116 extern int __kmp_dflt_blocktime;
3120 __kmp_monitor_wakeups;
3121 extern int __kmp_bt_intervals;
3124 #ifdef KMP_ADJUST_BLOCKTIME
3125 extern int __kmp_zero_bt;
3127 #ifdef KMP_DFLT_NTH_CORES
3128 extern int __kmp_ncores;
3131 extern int __kmp_abort_delay;
3133 extern int __kmp_need_register_atfork_specified;
3134 extern int __kmp_need_register_atfork;
3136 extern int __kmp_gtid_mode;
3144 __kmp_adjust_gtid_mode;
3145 #ifdef KMP_TDATA_GTID
3146 extern KMP_THREAD_LOCAL
int __kmp_gtid;
3148 extern int __kmp_tls_gtid_min;
3149 extern int __kmp_foreign_tp;
3150 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3151 extern int __kmp_inherit_fp_control;
3152 extern kmp_int16 __kmp_init_x87_fpu_control_word;
3153 extern kmp_uint32 __kmp_init_mxcsr;
3158 extern int __kmp_dflt_max_active_levels;
3161 extern bool __kmp_dflt_max_active_levels_set;
3162 extern int __kmp_dispatch_num_buffers;
3164 #if KMP_NESTED_HOT_TEAMS
3165 extern int __kmp_hot_teams_mode;
3166 extern int __kmp_hot_teams_max_level;
3170 extern enum clock_function_type __kmp_clock_function;
3171 extern int __kmp_clock_function_param;
3174 #if KMP_MIC_SUPPORTED
3175 extern enum mic_type __kmp_mic_type;
3178 #ifdef USE_LOAD_BALANCE
3179 extern double __kmp_load_balance_interval;
3183 typedef struct kmp_nested_nthreads_t {
3187 } kmp_nested_nthreads_t;
3189 extern kmp_nested_nthreads_t __kmp_nested_nth;
3191 #if KMP_USE_ADAPTIVE_LOCKS
3194 struct kmp_adaptive_backoff_params_t {
3196 kmp_uint32 max_soft_retries;
3199 kmp_uint32 max_badness;
3202 extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3204 #if KMP_DEBUG_ADAPTIVE_LOCKS
3205 extern const char *__kmp_speculative_statsfile;
3210 extern int __kmp_display_env;
3211 extern int __kmp_display_env_verbose;
3212 extern int __kmp_omp_cancellation;
3213 extern int __kmp_nteams;
3214 extern int __kmp_teams_thread_limit;
3220 extern kmp_info_t **__kmp_threads;
3222 extern volatile kmp_team_t *__kmp_team_pool;
3223 extern volatile kmp_info_t *__kmp_thread_pool;
3224 extern kmp_info_t *__kmp_thread_pool_insert_pt;
3227 extern volatile int __kmp_nth;
3230 extern volatile int __kmp_all_nth;
3231 extern std::atomic<int> __kmp_thread_pool_active_nth;
3233 extern kmp_root_t **__kmp_root;
3237 #define __kmp_get_gtid() __kmp_get_global_thread_id()
3238 #define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3239 #define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3240 #define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3241 #define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3246 #define __kmp_get_team_num_threads(gtid) \
3247 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3249 static inline bool KMP_UBER_GTID(
int gtid) {
3250 KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3251 KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3252 return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3253 __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3256 static inline int __kmp_tid_from_gtid(
int gtid) {
3257 KMP_DEBUG_ASSERT(gtid >= 0);
3258 return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3261 static inline int __kmp_gtid_from_tid(
int tid,
const kmp_team_t *team) {
3262 KMP_DEBUG_ASSERT(tid >= 0 && team);
3263 return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3266 static inline int __kmp_gtid_from_thread(
const kmp_info_t *thr) {
3267 KMP_DEBUG_ASSERT(thr);
3268 return thr->th.th_info.ds.ds_gtid;
3271 static inline kmp_info_t *__kmp_thread_from_gtid(
int gtid) {
3272 KMP_DEBUG_ASSERT(gtid >= 0);
3273 return __kmp_threads[gtid];
3276 static inline kmp_team_t *__kmp_team_from_gtid(
int gtid) {
3277 KMP_DEBUG_ASSERT(gtid >= 0);
3278 return __kmp_threads[gtid]->th.th_team;
3281 static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
3282 if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
3283 KMP_FATAL(ThreadIdentInvalid);
3286 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3287 extern int __kmp_user_level_mwait;
3288 extern int __kmp_umwait_enabled;
3289 extern int __kmp_mwait_enabled;
3290 extern int __kmp_mwait_hints;
3295 extern kmp_global_t __kmp_global;
3297 extern kmp_info_t __kmp_monitor;
3299 extern std::atomic<kmp_int32> __kmp_team_counter;
3301 extern std::atomic<kmp_int32> __kmp_task_counter;
3304 #define _KMP_GEN_ID(counter) \
3305 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3307 #define _KMP_GEN_ID(counter) (~0)
3310 #define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3311 #define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3315 extern void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
3316 size_t size,
char const *format, ...);
3318 extern void __kmp_serial_initialize(
void);
3319 extern void __kmp_middle_initialize(
void);
3320 extern void __kmp_parallel_initialize(
void);
3322 extern void __kmp_internal_begin(
void);
3323 extern void __kmp_internal_end_library(
int gtid);
3324 extern void __kmp_internal_end_thread(
int gtid);
3325 extern void __kmp_internal_end_atexit(
void);
3326 extern void __kmp_internal_end_dtor(
void);
3327 extern void __kmp_internal_end_dest(
void *);
3329 extern int __kmp_register_root(
int initial_thread);
3330 extern void __kmp_unregister_root(
int gtid);
3331 extern void __kmp_unregister_library(
void);
3333 extern int __kmp_ignore_mppbeg(
void);
3334 extern int __kmp_ignore_mppend(
void);
3336 extern int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws);
3337 extern void __kmp_exit_single(
int gtid);
3339 extern void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3340 extern void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3342 #ifdef USE_LOAD_BALANCE
3343 extern int __kmp_get_load_balance(
int);
3346 extern int __kmp_get_global_thread_id(
void);
3347 extern int __kmp_get_global_thread_id_reg(
void);
3348 extern void __kmp_exit_thread(
int exit_status);
3349 extern void __kmp_abort(
char const *format, ...);
3350 extern void __kmp_abort_thread(
void);
3351 KMP_NORETURN
extern void __kmp_abort_process(
void);
3352 extern void __kmp_warn(
char const *format, ...);
3354 extern void __kmp_set_num_threads(
int new_nth,
int gtid);
3358 static inline kmp_info_t *__kmp_entry_thread() {
3359 int gtid = __kmp_entry_gtid();
3361 return __kmp_threads[gtid];
3364 extern void __kmp_set_max_active_levels(
int gtid,
int new_max_active_levels);
3365 extern int __kmp_get_max_active_levels(
int gtid);
3366 extern int __kmp_get_ancestor_thread_num(
int gtid,
int level);
3367 extern int __kmp_get_team_size(
int gtid,
int level);
3368 extern void __kmp_set_schedule(
int gtid, kmp_sched_t new_sched,
int chunk);
3369 extern void __kmp_get_schedule(
int gtid, kmp_sched_t *sched,
int *chunk);
3371 extern unsigned short __kmp_get_random(kmp_info_t *thread);
3372 extern void __kmp_init_random(kmp_info_t *thread);
3374 extern kmp_r_sched_t __kmp_get_schedule_global(
void);
3375 extern void __kmp_adjust_num_threads(
int new_nproc);
3376 extern void __kmp_check_stksize(
size_t *val);
3378 extern void *___kmp_allocate(
size_t size KMP_SRC_LOC_DECL);
3379 extern void *___kmp_page_allocate(
size_t size KMP_SRC_LOC_DECL);
3380 extern void ___kmp_free(
void *ptr KMP_SRC_LOC_DECL);
3381 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3382 #define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3383 #define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3386 extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3387 size_t size KMP_SRC_LOC_DECL);
3388 extern void ___kmp_fast_free(kmp_info_t *this_thr,
void *ptr KMP_SRC_LOC_DECL);
3389 extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3390 extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3391 #define __kmp_fast_allocate(this_thr, size) \
3392 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3393 #define __kmp_fast_free(this_thr, ptr) \
3394 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3397 extern void *___kmp_thread_malloc(kmp_info_t *th,
size_t size KMP_SRC_LOC_DECL);
3398 extern void *___kmp_thread_calloc(kmp_info_t *th,
size_t nelem,
3399 size_t elsize KMP_SRC_LOC_DECL);
3400 extern void *___kmp_thread_realloc(kmp_info_t *th,
void *ptr,
3401 size_t size KMP_SRC_LOC_DECL);
3402 extern void ___kmp_thread_free(kmp_info_t *th,
void *ptr KMP_SRC_LOC_DECL);
3403 #define __kmp_thread_malloc(th, size) \
3404 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3405 #define __kmp_thread_calloc(th, nelem, elsize) \
3406 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3407 #define __kmp_thread_realloc(th, ptr, size) \
3408 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3409 #define __kmp_thread_free(th, ptr) \
3410 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3412 #define KMP_INTERNAL_MALLOC(sz) malloc(sz)
3413 #define KMP_INTERNAL_FREE(p) free(p)
3414 #define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
3415 #define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
3417 extern void __kmp_push_num_threads(
ident_t *loc,
int gtid,
int num_threads);
3419 extern void __kmp_push_proc_bind(
ident_t *loc,
int gtid,
3420 kmp_proc_bind_t proc_bind);
3421 extern void __kmp_push_num_teams(
ident_t *loc,
int gtid,
int num_teams,
3423 extern void __kmp_push_num_teams_51(
ident_t *loc,
int gtid,
int num_teams_lb,
3424 int num_teams_ub,
int num_threads);
3426 extern void __kmp_yield();
3430 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3433 kmp_uint32 ub, kmp_int32 st,
3437 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3440 kmp_uint64 ub, kmp_int64 st,
3444 kmp_int32 *p_last, kmp_int32 *p_lb,
3445 kmp_int32 *p_ub, kmp_int32 *p_st);
3447 kmp_int32 *p_last, kmp_uint32 *p_lb,
3448 kmp_uint32 *p_ub, kmp_int32 *p_st);
3450 kmp_int32 *p_last, kmp_int64 *p_lb,
3451 kmp_int64 *p_ub, kmp_int64 *p_st);
3453 kmp_int32 *p_last, kmp_uint64 *p_lb,
3454 kmp_uint64 *p_ub, kmp_int64 *p_st);
3461 #ifdef KMP_GOMP_COMPAT
3463 extern void __kmp_aux_dispatch_init_4(
ident_t *loc, kmp_int32 gtid,
3465 kmp_int32 ub, kmp_int32 st,
3466 kmp_int32 chunk,
int push_ws);
3467 extern void __kmp_aux_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3469 kmp_uint32 ub, kmp_int32 st,
3470 kmp_int32 chunk,
int push_ws);
3471 extern void __kmp_aux_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3473 kmp_int64 ub, kmp_int64 st,
3474 kmp_int64 chunk,
int push_ws);
3475 extern void __kmp_aux_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3477 kmp_uint64 ub, kmp_int64 st,
3478 kmp_int64 chunk,
int push_ws);
3479 extern void __kmp_aux_dispatch_fini_chunk_4(
ident_t *loc, kmp_int32 gtid);
3480 extern void __kmp_aux_dispatch_fini_chunk_8(
ident_t *loc, kmp_int32 gtid);
3481 extern void __kmp_aux_dispatch_fini_chunk_4u(
ident_t *loc, kmp_int32 gtid);
3482 extern void __kmp_aux_dispatch_fini_chunk_8u(
ident_t *loc, kmp_int32 gtid);
3486 extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3487 extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3488 extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3489 extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3490 extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3491 extern kmp_uint32 __kmp_wait_4(kmp_uint32
volatile *spinner, kmp_uint32 checker,
3492 kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3494 extern void __kmp_wait_4_ptr(
void *spinner, kmp_uint32 checker,
3495 kmp_uint32 (*pred)(
void *, kmp_uint32),
void *obj);
3497 extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag,
3504 extern void __kmp_release_64(kmp_flag_64<> *flag);
3506 extern void __kmp_infinite_loop(
void);
3508 extern void __kmp_cleanup(
void);
3510 #if KMP_HANDLE_SIGNALS
3511 extern int __kmp_handle_signals;
3512 extern void __kmp_install_signals(
int parallel_init);
3513 extern void __kmp_remove_signals(
void);
3516 extern void __kmp_clear_system_time(
void);
3517 extern void __kmp_read_system_time(
double *delta);
3519 extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3521 extern void __kmp_expand_host_name(
char *buffer,
size_t size);
3522 extern void __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern);
3524 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && KMP_ARCH_AARCH64)
3526 __kmp_initialize_system_tick(
void);
3530 __kmp_runtime_initialize(
void);
3531 extern void __kmp_runtime_destroy(
void);
3533 #if KMP_AFFINITY_SUPPORTED
3534 extern char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
3535 kmp_affin_mask_t *mask);
3536 extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
3537 kmp_affin_mask_t *mask);
3538 extern void __kmp_affinity_initialize(
void);
3539 extern void __kmp_affinity_uninitialize(
void);
3540 extern void __kmp_affinity_set_init_mask(
3541 int gtid,
int isa_root);
3542 extern void __kmp_affinity_set_place(
int gtid);
3543 extern void __kmp_affinity_determine_capable(
const char *env_var);
3544 extern int __kmp_aux_set_affinity(
void **mask);
3545 extern int __kmp_aux_get_affinity(
void **mask);
3546 extern int __kmp_aux_get_affinity_max_proc();
3547 extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask);
3548 extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask);
3549 extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask);
3550 extern void __kmp_balanced_affinity(kmp_info_t *th,
int team_size);
3551 #if KMP_OS_LINUX || KMP_OS_FREEBSD
3552 extern int kmp_set_thread_affinity_mask_initial(
void);
3554 static inline void __kmp_assign_root_init_mask() {
3555 int gtid = __kmp_entry_gtid();
3556 kmp_root_t *r = __kmp_threads[gtid]->th.th_root;
3557 if (r->r.r_uber_thread == __kmp_threads[gtid] && !r->r.r_affinity_assigned) {
3558 __kmp_affinity_set_init_mask(gtid, TRUE);
3559 r->r.r_affinity_assigned = TRUE;
3563 #define __kmp_assign_root_init_mask()
3568 extern size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
3569 kmp_str_buf_t *buffer);
3570 extern void __kmp_aux_display_affinity(
int gtid,
const char *format);
3572 extern void __kmp_cleanup_hierarchy();
3573 extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3577 extern int __kmp_futex_determine_capable(
void);
3581 extern void __kmp_gtid_set_specific(
int gtid);
3582 extern int __kmp_gtid_get_specific(
void);
3584 extern double __kmp_read_cpu_time(
void);
3586 extern int __kmp_read_system_info(
struct kmp_sys_info *info);
3589 extern void __kmp_create_monitor(kmp_info_t *th);
3592 extern void *__kmp_launch_thread(kmp_info_t *thr);
3594 extern void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size);
3597 extern int __kmp_still_running(kmp_info_t *th);
3598 extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3599 extern void __kmp_free_handle(kmp_thread_t tHandle);
3603 extern void __kmp_reap_monitor(kmp_info_t *th);
3605 extern void __kmp_reap_worker(kmp_info_t *th);
3606 extern void __kmp_terminate_thread(
int gtid);
3608 extern int __kmp_try_suspend_mx(kmp_info_t *th);
3609 extern void __kmp_lock_suspend_mx(kmp_info_t *th);
3610 extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
3612 extern void __kmp_elapsed(
double *);
3613 extern void __kmp_elapsed_tick(
double *);
3615 extern void __kmp_enable(
int old_state);
3616 extern void __kmp_disable(
int *old_state);
3618 extern void __kmp_thread_sleep(
int millis);
3620 extern void __kmp_common_initialize(
void);
3621 extern void __kmp_common_destroy(
void);
3622 extern void __kmp_common_destroy_gtid(
int gtid);
3625 extern void __kmp_register_atfork(
void);
3627 extern void __kmp_suspend_initialize(
void);
3628 extern void __kmp_suspend_initialize_thread(kmp_info_t *th);
3629 extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
3631 extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
3634 __kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
3636 ompt_data_t ompt_parallel_data,
3638 kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
3639 int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3640 extern void __kmp_free_thread(kmp_info_t *);
3641 extern void __kmp_free_team(kmp_root_t *,
3642 kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
3643 extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
3647 extern void __kmp_initialize_bget(kmp_info_t *th);
3648 extern void __kmp_finalize_bget(kmp_info_t *th);
3650 KMP_EXPORT
void *kmpc_malloc(
size_t size);
3651 KMP_EXPORT
void *kmpc_aligned_malloc(
size_t size,
size_t alignment);
3652 KMP_EXPORT
void *kmpc_calloc(
size_t nelem,
size_t elsize);
3653 KMP_EXPORT
void *kmpc_realloc(
void *ptr,
size_t size);
3654 KMP_EXPORT
void kmpc_free(
void *ptr);
3658 extern int __kmp_barrier(
enum barrier_type bt,
int gtid,
int is_split,
3659 size_t reduce_size,
void *reduce_data,
3660 void (*reduce)(
void *,
void *));
3661 extern void __kmp_end_split_barrier(
enum barrier_type bt,
int gtid);
3662 extern int __kmp_barrier_gomp_cancel(
int gtid);
3668 enum fork_context_e {
3674 extern int __kmp_fork_call(
ident_t *loc,
int gtid,
3675 enum fork_context_e fork_context, kmp_int32 argc,
3676 microtask_t microtask, launch_t invoker,
3679 extern void __kmp_join_call(
ident_t *loc,
int gtid
3682 enum fork_context_e fork_context
3685 int exit_teams = 0);
3687 extern void __kmp_serialized_parallel(
ident_t *
id, kmp_int32 gtid);
3688 extern void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team);
3689 extern void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team);
3690 extern int __kmp_invoke_task_func(
int gtid);
3691 extern void __kmp_run_before_invoked_task(
int gtid,
int tid,
3692 kmp_info_t *this_thr,
3694 extern void __kmp_run_after_invoked_task(
int gtid,
int tid,
3695 kmp_info_t *this_thr,
3699 KMP_EXPORT
int __kmpc_invoke_task_func(
int gtid);
3700 extern int __kmp_invoke_teams_master(
int gtid);
3701 extern void __kmp_teams_master(
int gtid);
3702 extern int __kmp_aux_get_team_num();
3703 extern int __kmp_aux_get_num_teams();
3704 extern void __kmp_save_internal_controls(kmp_info_t *thread);
3705 extern void __kmp_user_set_library(
enum library_type arg);
3706 extern void __kmp_aux_set_library(
enum library_type arg);
3707 extern void __kmp_aux_set_stacksize(
size_t arg);
3708 extern void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid);
3709 extern void __kmp_aux_set_defaults(
char const *str,
size_t len);
3712 void kmpc_set_blocktime(
int arg);
3713 void ompc_set_nested(
int flag);
3714 void ompc_set_dynamic(
int flag);
3715 void ompc_set_num_threads(
int arg);
3717 extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
3718 kmp_team_t *team,
int tid);
3719 extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
3720 extern kmp_task_t *__kmp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3721 kmp_tasking_flags_t *flags,
3722 size_t sizeof_kmp_task_t,
3723 size_t sizeof_shareds,
3724 kmp_routine_entry_t task_entry);
3725 extern void __kmp_init_implicit_task(
ident_t *loc_ref, kmp_info_t *this_thr,
3726 kmp_team_t *team,
int tid,
3728 extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
3729 extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
3731 extern kmp_event_t *__kmpc_task_allow_completion_event(
ident_t *loc_ref,
3734 extern void __kmp_fulfill_event(kmp_event_t *event);
3736 extern void __kmp_free_task_team(kmp_info_t *thread,
3737 kmp_task_team_t *task_team);
3738 extern void __kmp_reap_task_teams(
void);
3739 extern void __kmp_wait_to_unref_task_teams(
void);
3740 extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
3742 extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
3743 extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
3750 extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
3753 extern int __kmp_is_address_mapped(
void *addr);
3754 extern kmp_uint64 __kmp_hardware_timestamp(
void);
3757 extern int __kmp_read_from_file(
char const *path,
char const *format, ...);
3765 extern int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int npr,
int argc,
3769 void **exit_frame_ptr
3782 size_t vector_length);
3786 KMP_EXPORT
void *__kmpc_threadprivate(
ident_t *, kmp_int32 global_tid,
3787 void *data,
size_t size);
3811 kmp_critical_name *);
3813 kmp_critical_name *);
3815 kmp_critical_name *, uint32_t hint);
3821 kmp_int32 global_tid);
3826 KMP_EXPORT
void KMPC_FOR_STATIC_INIT(
ident_t *loc, kmp_int32 global_tid,
3827 kmp_int32 schedtype, kmp_int32 *plastiter,
3828 kmp_int *plower, kmp_int *pupper,
3829 kmp_int *pstride, kmp_int incr,
3835 size_t cpy_size,
void *cpy_data,
3836 void (*cpy_func)(
void *,
void *),
3839 extern void KMPC_SET_NUM_THREADS(
int arg);
3840 extern void KMPC_SET_DYNAMIC(
int flag);
3841 extern void KMPC_SET_NESTED(
int flag);
3844 KMP_EXPORT kmp_int32 __kmpc_omp_task(
ident_t *loc_ref, kmp_int32 gtid,
3845 kmp_task_t *new_task);
3846 KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3848 size_t sizeof_kmp_task_t,
3849 size_t sizeof_shareds,
3850 kmp_routine_entry_t task_entry);
3851 KMP_EXPORT kmp_task_t *__kmpc_omp_target_task_alloc(
3852 ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t,
3853 size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id);
3854 KMP_EXPORT
void __kmpc_omp_task_begin_if0(
ident_t *loc_ref, kmp_int32 gtid,
3856 KMP_EXPORT
void __kmpc_omp_task_complete_if0(
ident_t *loc_ref, kmp_int32 gtid,
3858 KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(
ident_t *loc_ref, kmp_int32 gtid,
3859 kmp_task_t *new_task);
3860 KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(
ident_t *loc_ref, kmp_int32 gtid);
3862 KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(
ident_t *loc_ref, kmp_int32 gtid,
3866 void __kmpc_omp_task_begin(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
3867 void __kmpc_omp_task_complete(
ident_t *loc_ref, kmp_int32 gtid,
3873 KMP_EXPORT
void __kmpc_taskgroup(
ident_t *loc,
int gtid);
3874 KMP_EXPORT
void __kmpc_end_taskgroup(
ident_t *loc,
int gtid);
3877 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
3878 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
3879 kmp_depend_info_t *noalias_dep_list);
3882 kmp_depend_info_t *dep_list,
3883 kmp_int32 ndeps_noalias,
3884 kmp_depend_info_t *noalias_dep_list);
3885 extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
3886 bool serialize_immediate);
3888 KMP_EXPORT kmp_int32 __kmpc_cancel(
ident_t *loc_ref, kmp_int32 gtid,
3889 kmp_int32 cncl_kind);
3890 KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(
ident_t *loc_ref, kmp_int32 gtid,
3891 kmp_int32 cncl_kind);
3892 KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(
ident_t *loc_ref, kmp_int32 gtid);
3893 KMP_EXPORT
int __kmp_get_cancellation_status(
int cancel_kind);
3898 kmp_int32 if_val, kmp_uint64 *lb,
3899 kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
3900 kmp_int32 sched, kmp_uint64 grainsize,
3903 kmp_task_t *task, kmp_int32 if_val,
3904 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3905 kmp_int32 nogroup, kmp_int32 sched,
3906 kmp_uint64 grainsize, kmp_int32 modifier,
3915 int num,
void *data);
3919 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
3920 kmp_task_affinity_info_t *affin_list);
3921 KMP_EXPORT
void __kmp_set_num_teams(
int num_teams);
3922 KMP_EXPORT
int __kmp_get_max_teams(
void);
3923 KMP_EXPORT
void __kmp_set_teams_thread_limit(
int limit);
3924 KMP_EXPORT
int __kmp_get_teams_thread_limit(
void);
3927 KMP_EXPORT
void __kmpc_init_lock(
ident_t *loc, kmp_int32 gtid,
3929 KMP_EXPORT
void __kmpc_init_nest_lock(
ident_t *loc, kmp_int32 gtid,
3931 KMP_EXPORT
void __kmpc_destroy_lock(
ident_t *loc, kmp_int32 gtid,
3933 KMP_EXPORT
void __kmpc_destroy_nest_lock(
ident_t *loc, kmp_int32 gtid,
3935 KMP_EXPORT
void __kmpc_set_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
3936 KMP_EXPORT
void __kmpc_set_nest_lock(
ident_t *loc, kmp_int32 gtid,
3938 KMP_EXPORT
void __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
3940 KMP_EXPORT
void __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
3942 KMP_EXPORT
int __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
3943 KMP_EXPORT
int __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
3946 KMP_EXPORT
void __kmpc_init_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
3947 void **user_lock, uintptr_t hint);
3948 KMP_EXPORT
void __kmpc_init_nest_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
3955 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3956 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3957 kmp_critical_name *lck);
3959 kmp_critical_name *lck);
3961 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3962 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3963 kmp_critical_name *lck);
3965 kmp_critical_name *lck);
3969 extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
3970 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
3971 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
3972 kmp_critical_name *lck);
3975 KMP_EXPORT kmp_int32 __kmp_get_reduce_method(
void);
3977 KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
3978 KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
3984 KMP_EXPORT
void __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid);
3986 kmp_int32 num_threads);
3988 KMP_EXPORT
void __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid,
3991 kmp_int32 num_teams,
3992 kmp_int32 num_threads);
3995 kmp_int32 num_teams_lb,
3996 kmp_int32 num_teams_ub,
3997 kmp_int32 num_threads);
4007 const struct kmp_dim *dims);
4008 KMP_EXPORT
void __kmpc_doacross_wait(
ident_t *loc, kmp_int32 gtid,
4009 const kmp_int64 *vec);
4010 KMP_EXPORT
void __kmpc_doacross_post(
ident_t *loc, kmp_int32 gtid,
4011 const kmp_int64 *vec);
4012 KMP_EXPORT
void __kmpc_doacross_fini(
ident_t *loc, kmp_int32 gtid);
4015 void *data,
size_t size,
4019 extern int _You_must_link_with_exactly_one_OpenMP_library;
4020 extern int _You_must_link_with_Intel_OpenMP_library;
4021 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
4022 extern int _You_must_link_with_Microsoft_OpenMP_library;
4027 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
4028 void *data_addr,
size_t pc_size);
4029 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
4032 void __kmp_threadprivate_resize_cache(
int newCapacity);
4033 void __kmp_cleanup_threadprivate_caches();
4037 #define KMPC_CONVENTION __cdecl
4039 #define KMPC_CONVENTION
4043 typedef enum omp_sched_t {
4044 omp_sched_static = 1,
4045 omp_sched_dynamic = 2,
4046 omp_sched_guided = 3,
4049 typedef void *kmp_affinity_mask_t;
4052 KMP_EXPORT
void KMPC_CONVENTION ompc_set_max_active_levels(
int);
4053 KMP_EXPORT
void KMPC_CONVENTION ompc_set_schedule(omp_sched_t,
int);
4054 KMP_EXPORT
int KMPC_CONVENTION ompc_get_ancestor_thread_num(
int);
4055 KMP_EXPORT
int KMPC_CONVENTION ompc_get_team_size(
int);
4056 KMP_EXPORT
int KMPC_CONVENTION
4057 kmpc_set_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4058 KMP_EXPORT
int KMPC_CONVENTION
4059 kmpc_unset_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4060 KMP_EXPORT
int KMPC_CONVENTION
4061 kmpc_get_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4063 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize(
int);
4064 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize_s(
size_t);
4065 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_library(
int);
4066 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_defaults(
char const *);
4067 KMP_EXPORT
void KMPC_CONVENTION kmpc_set_disp_num_buffers(
int);
4068 void KMP_EXPAND_NAME(ompc_set_affinity_format)(
char const *format);
4069 size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(
char *buffer,
size_t size);
4070 void KMP_EXPAND_NAME(ompc_display_affinity)(
char const *format);
4071 size_t KMP_EXPAND_NAME(ompc_capture_affinity)(
char *buffer,
size_t buf_size,
4072 char const *format);
4074 enum kmp_target_offload_kind {
4079 typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
4081 extern kmp_target_offload_kind_t __kmp_target_offload;
4082 extern int __kmpc_get_target_offload();
4085 #define KMP_DEVICE_DEFAULT -1
4086 #define KMP_DEVICE_ALL -11
4092 typedef enum kmp_pause_status_t {
4094 kmp_soft_paused = 1,
4096 } kmp_pause_status_t;
4099 extern kmp_pause_status_t __kmp_pause_status;
4100 extern int __kmpc_pause_resource(kmp_pause_status_t level);
4101 extern int __kmp_pause_resource(kmp_pause_status_t level);
4103 extern void __kmp_resume_if_soft_paused();
4107 static inline void __kmp_resume_if_hard_paused() {
4108 if (__kmp_pause_status == kmp_hard_paused) {
4109 __kmp_pause_status = kmp_not_paused;
4113 extern void __kmp_omp_display_env(
int verbose);
4116 extern volatile int __kmp_init_hidden_helper;
4118 extern volatile int __kmp_hidden_helper_team_done;
4120 extern kmp_int32 __kmp_enable_hidden_helper;
4122 extern kmp_info_t *__kmp_hidden_helper_main_thread;
4124 extern kmp_info_t **__kmp_hidden_helper_threads;
4126 extern kmp_int32 __kmp_hidden_helper_threads_num;
4128 extern std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
4130 extern void __kmp_hidden_helper_initialize();
4131 extern void __kmp_hidden_helper_threads_initz_routine();
4132 extern void __kmp_do_initialize_hidden_helper_threads();
4133 extern void __kmp_hidden_helper_threads_initz_wait();
4134 extern void __kmp_hidden_helper_initz_release();
4135 extern void __kmp_hidden_helper_threads_deinitz_wait();
4136 extern void __kmp_hidden_helper_threads_deinitz_release();
4137 extern void __kmp_hidden_helper_main_thread_wait();
4138 extern void __kmp_hidden_helper_worker_thread_wait();
4139 extern void __kmp_hidden_helper_worker_thread_signal();
4140 extern void __kmp_hidden_helper_main_thread_release();
4143 #define KMP_HIDDEN_HELPER_THREAD(gtid) \
4144 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4146 #define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4147 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4149 #define KMP_HIDDEN_HELPER_TEAM(team) \
4150 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4154 #define KMP_GTID_TO_SHADOW_GTID(gtid) \
4155 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4160 static inline int __kmp_adjust_gtid_for_hidden_helpers(
int gtid) {
4161 int adjusted_gtid = gtid;
4162 if (__kmp_hidden_helper_threads_num > 0 && gtid > 0 &&
4163 gtid - __kmp_hidden_helper_threads_num >= 0) {
4164 adjusted_gtid -= __kmp_hidden_helper_threads_num;
4166 return adjusted_gtid;
4170 typedef enum kmp_severity_t {
4171 severity_warning = 1,
4174 extern void __kmpc_error(
ident_t *loc,
int severity,
const char *message);
4177 KMP_EXPORT
void __kmpc_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4178 KMP_EXPORT
void __kmpc_end_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4184 template <
bool C,
bool S>
4185 extern void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4186 template <
bool C,
bool S>
4187 extern void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4188 template <
bool C,
bool S>
4189 extern void __kmp_atomic_suspend_64(
int th_gtid,
4190 kmp_atomic_flag_64<C, S> *flag);
4191 extern void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag);
4192 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4193 template <
bool C,
bool S>
4194 extern void __kmp_mwait_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4195 template <
bool C,
bool S>
4196 extern void __kmp_mwait_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4197 template <
bool C,
bool S>
4198 extern void __kmp_atomic_mwait_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag);
4199 extern void __kmp_mwait_oncore(
int th_gtid, kmp_flag_oncore *flag);
4201 template <
bool C,
bool S>
4202 extern void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag);
4203 template <
bool C,
bool S>
4204 extern void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag);
4205 template <
bool C,
bool S>
4206 extern void __kmp_atomic_resume_64(
int target_gtid,
4207 kmp_atomic_flag_64<C, S> *flag);
4208 extern void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag);
4210 template <
bool C,
bool S>
4211 int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
4212 kmp_flag_32<C, S> *flag,
int final_spin,
4213 int *thread_finished,
4217 kmp_int32 is_constrained);
4218 template <
bool C,
bool S>
4219 int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4220 kmp_flag_64<C, S> *flag,
int final_spin,
4221 int *thread_finished,
4225 kmp_int32 is_constrained);
4226 template <
bool C,
bool S>
4227 int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4228 kmp_atomic_flag_64<C, S> *flag,
4229 int final_spin,
int *thread_finished,
4233 kmp_int32 is_constrained);
4234 int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
4235 kmp_flag_oncore *flag,
int final_spin,
4236 int *thread_finished,
4240 kmp_int32 is_constrained);
4242 extern int __kmp_nesting_mode;
4243 extern int __kmp_nesting_mode_nlevels;
4244 extern int *__kmp_nesting_nth_level;
4245 extern void __kmp_init_nesting_mode();
4246 extern void __kmp_set_nesting_mode_threads();
4258 if (f && f != stdout && f != stderr) {
4267 const char *env_var =
nullptr)
4269 open(filename, mode, env_var);
4276 void open(
const char *filename,
const char *mode,
4277 const char *env_var =
nullptr) {
4279 f = fopen(filename, mode);
4283 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4284 KMP_HNT(CheckEnvVar, env_var, filename), __kmp_msg_null);
4286 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4295 f = fopen(filename, mode);
4312 operator bool() {
return bool(f); }
4313 operator FILE *() {
return f; }
4316 template <
typename SourceType,
typename TargetType,
4317 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4318 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4319 bool isSourceSigned = std::is_signed<SourceType>::value,
4320 bool isTargetSigned = std::is_signed<TargetType>::value>
4321 struct kmp_convert {};
4324 template <
typename SourceType,
typename TargetType>
4325 struct kmp_convert<SourceType, TargetType, true, false, true, true> {
4326 static TargetType to(SourceType src) {
return (TargetType)src; }
4329 template <
typename SourceType,
typename TargetType>
4330 struct kmp_convert<SourceType, TargetType, false, true, true, true> {
4331 static TargetType to(SourceType src) {
return src; }
4334 template <
typename SourceType,
typename TargetType>
4335 struct kmp_convert<SourceType, TargetType, false, false, true, true> {
4336 static TargetType to(SourceType src) {
4337 KMP_ASSERT(src <=
static_cast<SourceType
>(
4338 (std::numeric_limits<TargetType>::max)()));
4339 KMP_ASSERT(src >=
static_cast<SourceType
>(
4340 (std::numeric_limits<TargetType>::min)()));
4341 return (TargetType)src;
4347 template <
typename SourceType,
typename TargetType>
4348 struct kmp_convert<SourceType, TargetType, true, false, true, false> {
4349 static TargetType to(SourceType src) {
4350 KMP_ASSERT(src >= 0);
4351 return (TargetType)src;
4355 template <
typename SourceType,
typename TargetType>
4356 struct kmp_convert<SourceType, TargetType, false, true, true, false> {
4357 static TargetType to(SourceType src) {
4358 KMP_ASSERT(src >= 0);
4359 return (TargetType)src;
4363 template <
typename SourceType,
typename TargetType>
4364 struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4365 static TargetType to(SourceType src) {
4366 KMP_ASSERT(src >= 0);
4367 KMP_ASSERT(src <=
static_cast<SourceType
>(
4368 (std::numeric_limits<TargetType>::max)()));
4369 return (TargetType)src;
4375 template <
typename SourceType,
typename TargetType>
4376 struct kmp_convert<SourceType, TargetType, true, false, false, true> {
4377 static TargetType to(SourceType src) {
return (TargetType)src; }
4380 template <
typename SourceType,
typename TargetType>
4381 struct kmp_convert<SourceType, TargetType, false, true, false, true> {
4382 static TargetType to(SourceType src) {
4383 KMP_ASSERT(src <=
static_cast<SourceType
>(
4384 (std::numeric_limits<TargetType>::max)()));
4385 return (TargetType)src;
4389 template <
typename SourceType,
typename TargetType>
4390 struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4391 static TargetType to(SourceType src) {
4392 KMP_ASSERT(src <=
static_cast<SourceType
>(
4393 (std::numeric_limits<TargetType>::max)()));
4394 return (TargetType)src;
4400 template <
typename SourceType,
typename TargetType>
4401 struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4402 static TargetType to(SourceType src) {
return (TargetType)src; }
4405 template <
typename SourceType,
typename TargetType>
4406 struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4407 static TargetType to(SourceType src) {
return src; }
4410 template <
typename SourceType,
typename TargetType>
4411 struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4412 static TargetType to(SourceType src) {
4413 KMP_ASSERT(src <=
static_cast<SourceType
>(
4414 (std::numeric_limits<TargetType>::max)()));
4415 return (TargetType)src;
4419 template <
typename T1,
typename T2>
4420 static inline void __kmp_type_convert(T1 src, T2 *dest) {
4421 *dest = kmp_convert<T1, T2>::to(src);
int try_open(const char *filename, const char *mode)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_ATOMIC_HINT_MASK
@ KMP_IDENT_WORK_DISTRIBUTE
@ KMP_IDENT_ATOMIC_REDUCE
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, int modifier, void *task_dup)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
void(* kmpc_dtor)(void *)
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_ctor)(void *)
void *(* kmpc_ctor_vec)(void *, size_t)
void *(* kmpc_cctor)(void *, void *)
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void(* kmpc_dtor_vec)(void *, size_t)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, const struct kmp_dim *dims)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_distribute_static_chunked
@ kmp_sch_modifier_monotonic
@ kmp_sch_modifier_nonmonotonic