27#include "ompt-specific.h"
33char const *traits_t<int>::spec =
"d";
34char const *traits_t<unsigned int>::spec =
"u";
35char const *traits_t<long long>::spec =
"lld";
36char const *traits_t<unsigned long long>::spec =
"llu";
37char const *traits_t<long>::spec =
"ld";
42#define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61#define KMP_STATS_LOOP_END(stat)
65static inline void check_loc(
ident_t *&loc) {
71static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77#
if OMPT_SUPPORT && OMPT_OPTIONAL
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
86 typedef typename traits_t<T>::unsigned_t UT;
87 typedef typename traits_t<T>::signed_t ST;
89 kmp_int32 gtid = global_tid;
94 __kmp_assert_valid_gtid(gtid);
95 kmp_info_t *th = __kmp_threads[gtid];
97#if OMPT_SUPPORT && OMPT_OPTIONAL
98 ompt_team_info_t *team_info = NULL;
99 ompt_task_info_t *task_info = NULL;
100 ompt_work_t ompt_work_type = ompt_work_loop;
102 static kmp_int8 warn = 0;
104 if (ompt_enabled.ompt_callback_work) {
106 team_info = __ompt_get_teaminfo(0, NULL);
107 task_info = __ompt_get_task_info_object(0);
111 ompt_work_type = ompt_work_loop;
113 ompt_work_type = ompt_work_sections;
115 ompt_work_type = ompt_work_distribute;
118 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
120 KMP_WARNING(OmptOutdatedWorkshare);
122 KMP_DEBUG_ASSERT(ompt_work_type);
127 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
128 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
133 buff = __kmp_str_format(
134 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
135 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
136 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
137 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
138 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
139 *pstride, incr, chunk));
140 __kmp_str_free(&buff);
144 if (__kmp_env_consistency_check) {
145 __kmp_push_workshare(global_tid, ct_pdo, loc);
147 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
152 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
153 if (plastiter != NULL)
165 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
166 "lower=%%%s upper=%%%s stride = %%%s "
167 "signed?<%s>, loc = %%s\n",
168 traits_t<T>::spec, traits_t<T>::spec,
169 traits_t<ST>::spec, traits_t<T>::spec);
172 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
173 __kmp_str_free(&buff);
176 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
178#if OMPT_SUPPORT && OMPT_OPTIONAL
179 if (ompt_enabled.ompt_callback_work) {
180 ompt_callbacks.ompt_callback(ompt_callback_work)(
181 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
182 &(task_info->task_data), 0, codeptr);
185 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
197 tid = th->th.th_team->t.t_master_tid;
198 team = th->th.th_team->t.t_parent;
200 tid = __kmp_tid_from_gtid(global_tid);
201 team = th->th.th_team;
205 if (team->t.t_serialized) {
207 if (plastiter != NULL)
211 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
217 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
218 "lower=%%%s upper=%%%s stride = %%%s\n",
219 traits_t<T>::spec, traits_t<T>::spec,
221 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
222 __kmp_str_free(&buff);
225 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
227#if OMPT_SUPPORT && OMPT_OPTIONAL
228 if (ompt_enabled.ompt_callback_work) {
229 ompt_callbacks.ompt_callback(ompt_callback_work)(
230 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
231 &(task_info->task_data), *pstride, codeptr);
234 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
237 nth = team->t.t_nproc;
239 if (plastiter != NULL)
242 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
247 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
248 "lower=%%%s upper=%%%s stride = %%%s\n",
249 traits_t<T>::spec, traits_t<T>::spec,
251 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
252 __kmp_str_free(&buff);
255 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
257#if OMPT_SUPPORT && OMPT_OPTIONAL
258 if (ompt_enabled.ompt_callback_work) {
259 ompt_callbacks.ompt_callback(ompt_callback_work)(
260 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
261 &(task_info->task_data), *pstride, codeptr);
264 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
270 trip_count = *pupper - *plower + 1;
271 }
else if (incr == -1) {
272 trip_count = *plower - *pupper + 1;
273 }
else if (incr > 0) {
275 trip_count = (UT)(*pupper - *plower) / incr + 1;
277 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
281 if (KMP_MASTER_GTID(gtid)) {
286 if (__kmp_env_consistency_check) {
288 if (trip_count == 0 && *pupper != *plower) {
289 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
297 if (trip_count < nth) {
299 __kmp_static == kmp_sch_static_greedy ||
301 kmp_sch_static_balanced);
302 if (tid < trip_count) {
303 *pupper = *plower = *plower + tid * incr;
306 *plower = *pupper + (incr > 0 ? 1 : -1);
308 if (plastiter != NULL)
309 *plastiter = (tid == trip_count - 1);
311 if (__kmp_static == kmp_sch_static_balanced) {
312 UT small_chunk = trip_count / nth;
313 UT extras = trip_count % nth;
314 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
315 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
316 if (plastiter != NULL)
317 *plastiter = (tid == nth - 1);
319 T big_chunk_inc_count =
320 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
321 T old_upper = *pupper;
323 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
326 *plower += tid * big_chunk_inc_count;
327 *pupper = *plower + big_chunk_inc_count - incr;
329 if (*pupper < *plower)
330 *pupper = traits_t<T>::max_value;
331 if (plastiter != NULL)
332 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
333 if (*pupper > old_upper)
336 if (*pupper > *plower)
337 *pupper = traits_t<T>::min_value;
338 if (plastiter != NULL)
339 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
340 if (*pupper < old_upper)
345 *pstride = trip_count;
348 case kmp_sch_static_chunked: {
353 else if ((UT)chunk > trip_count)
355 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
358 *pstride = span * nchunks;
360 *plower = *plower + (span * tid);
361 *pupper = *plower + span - incr;
363 *plower = *pupper + (incr > 0 ? 1 : -1);
366 *pstride = span * nth;
367 *plower = *plower + (span * tid);
368 *pupper = *plower + span - incr;
370 if (plastiter != NULL)
371 *plastiter = (tid == (nchunks - 1) % nth);
374 case kmp_sch_static_balanced_chunked: {
375 T old_upper = *pupper;
377 UT span = (trip_count + nth - 1) / nth;
380 chunk = (span + chunk - 1) & ~(chunk - 1);
383 *plower = *plower + (span * tid);
384 *pupper = *plower + span - incr;
386 if (*pupper > old_upper)
388 }
else if (*pupper < old_upper)
391 if (plastiter != NULL)
392 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
396 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
402 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
403 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
404 team->t.t_active_level == 1) {
405 kmp_uint64 cur_chunk = chunk;
410 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
413 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
420 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
421 "upper=%%%s stride = %%%s signed?<%s>\n",
422 traits_t<T>::spec, traits_t<T>::spec,
423 traits_t<ST>::spec, traits_t<T>::spec);
424 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
425 __kmp_str_free(&buff);
428 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
430#if OMPT_SUPPORT && OMPT_OPTIONAL
431 if (ompt_enabled.ompt_callback_work) {
432 ompt_callbacks.ompt_callback(ompt_callback_work)(
433 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
434 &(task_info->task_data), trip_count, codeptr);
438 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
443static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
444 kmp_int32 schedule, kmp_int32 *plastiter,
445 T *plower, T *pupper, T *pupperDist,
446 typename traits_t<T>::signed_t *pstride,
447 typename traits_t<T>::signed_t incr,
448 typename traits_t<T>::signed_t chunk) {
450 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
451 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
452 typedef typename traits_t<T>::unsigned_t UT;
453 typedef typename traits_t<T>::signed_t ST;
462 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
463 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
464 __kmp_assert_valid_gtid(gtid);
469 buff = __kmp_str_format(
470 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
471 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
472 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
473 traits_t<ST>::spec, traits_t<T>::spec);
475 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
476 __kmp_str_free(&buff);
480 if (__kmp_env_consistency_check) {
481 __kmp_push_workshare(gtid, ct_pdo, loc);
483 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
486 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
496 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
499 tid = __kmp_tid_from_gtid(gtid);
500 th = __kmp_threads[gtid];
501 nth = th->th.th_team_nproc;
502 team = th->th.th_team;
503 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
504 nteams = th->th.th_teams_size.nteams;
505 team_id = team->t.t_master_tid;
506 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
510 trip_count = *pupper - *plower + 1;
511 }
else if (incr == -1) {
512 trip_count = *plower - *pupper + 1;
513 }
else if (incr > 0) {
515 trip_count = (UT)(*pupper - *plower) / incr + 1;
517 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
520 *pstride = *pupper - *plower;
521 if (trip_count <= nteams) {
523 __kmp_static == kmp_sch_static_greedy ||
525 kmp_sch_static_balanced);
528 if (team_id < trip_count && tid == 0) {
529 *pupper = *pupperDist = *plower = *plower + team_id * incr;
531 *pupperDist = *pupper;
532 *plower = *pupper + incr;
534 if (plastiter != NULL)
535 *plastiter = (tid == 0 && team_id == trip_count - 1);
538 if (__kmp_static == kmp_sch_static_balanced) {
539 UT chunkD = trip_count / nteams;
540 UT extras = trip_count % nteams;
542 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
543 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
544 if (plastiter != NULL)
545 *plastiter = (team_id == nteams - 1);
548 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
550 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
552 *plower += team_id * chunk_inc_count;
553 *pupperDist = *plower + chunk_inc_count - incr;
556 if (*pupperDist < *plower)
557 *pupperDist = traits_t<T>::max_value;
558 if (plastiter != NULL)
559 *plastiter = *plower <= upper && *pupperDist > upper - incr;
560 if (*pupperDist > upper)
562 if (*plower > *pupperDist) {
563 *pupper = *pupperDist;
567 if (*pupperDist > *plower)
568 *pupperDist = traits_t<T>::min_value;
569 if (plastiter != NULL)
570 *plastiter = *plower >= upper && *pupperDist < upper - incr;
571 if (*pupperDist < upper)
573 if (*plower < *pupperDist) {
574 *pupper = *pupperDist;
582 trip_count = *pupperDist - *plower + 1;
583 }
else if (incr == -1) {
584 trip_count = *plower - *pupperDist + 1;
585 }
else if (incr > 1) {
587 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
589 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
591 KMP_DEBUG_ASSERT(trip_count);
594 if (trip_count <= nth) {
596 __kmp_static == kmp_sch_static_greedy ||
598 kmp_sch_static_balanced);
599 if (tid < trip_count)
600 *pupper = *plower = *plower + tid * incr;
602 *plower = *pupper + incr;
603 if (plastiter != NULL)
604 if (*plastiter != 0 && !(tid == trip_count - 1))
607 if (__kmp_static == kmp_sch_static_balanced) {
608 UT chunkL = trip_count / nth;
609 UT extras = trip_count % nth;
610 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
611 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
612 if (plastiter != NULL)
613 if (*plastiter != 0 && !(tid == nth - 1))
617 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
618 T upper = *pupperDist;
619 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
621 *plower += tid * chunk_inc_count;
622 *pupper = *plower + chunk_inc_count - incr;
624 if (*pupper < *plower)
625 *pupper = traits_t<T>::max_value;
626 if (plastiter != NULL)
627 if (*plastiter != 0 &&
628 !(*plower <= upper && *pupper > upper - incr))
633 if (*pupper > *plower)
634 *pupper = traits_t<T>::min_value;
635 if (plastiter != NULL)
636 if (*plastiter != 0 &&
637 !(*plower >= upper && *pupper < upper - incr))
646 case kmp_sch_static_chunked: {
651 *pstride = span * nth;
652 *plower = *plower + (span * tid);
653 *pupper = *plower + span - incr;
654 if (plastiter != NULL)
655 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
661 "__kmpc_dist_for_static_init: unknown loop scheduling type");
670 buff = __kmp_str_format(
671 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
672 "stride=%%%s signed?<%s>\n",
673 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
674 traits_t<ST>::spec, traits_t<T>::spec);
675 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
676 __kmp_str_free(&buff);
679 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
680 KMP_STATS_LOOP_END(OMP_distribute_iterations);
685static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
686 kmp_int32 *p_last, T *p_lb, T *p_ub,
687 typename traits_t<T>::signed_t *p_st,
688 typename traits_t<T>::signed_t incr,
689 typename traits_t<T>::signed_t chunk) {
695 typedef typename traits_t<T>::unsigned_t UT;
696 typedef typename traits_t<T>::signed_t ST;
706 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
707 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
708 __kmp_assert_valid_gtid(gtid);
713 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
714 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
715 traits_t<T>::spec, traits_t<T>::spec,
716 traits_t<ST>::spec, traits_t<ST>::spec,
718 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
719 __kmp_str_free(&buff);
725 if (__kmp_env_consistency_check) {
727 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
730 if (incr > 0 ? (upper < lower) : (lower < upper)) {
740 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
743 th = __kmp_threads[gtid];
744 team = th->th.th_team;
745 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
746 nteams = th->th.th_teams_size.nteams;
747 team_id = team->t.t_master_tid;
748 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
752 trip_count = upper - lower + 1;
753 }
else if (incr == -1) {
754 trip_count = lower - upper + 1;
755 }
else if (incr > 0) {
757 trip_count = (UT)(upper - lower) / incr + 1;
759 trip_count = (UT)(lower - upper) / (-incr) + 1;
764 *p_st = span * nteams;
765 *p_lb = lower + (span * team_id);
766 *p_ub = *p_lb + span - incr;
768 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
772 *p_ub = traits_t<T>::max_value;
777 *p_ub = traits_t<T>::min_value;
786 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
787 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
788 traits_t<T>::spec, traits_t<T>::spec,
789 traits_t<ST>::spec, traits_t<ST>::spec);
790 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
791 __kmp_str_free(&buff);
820 kmp_int32 *plastiter, kmp_int32 *plower,
821 kmp_int32 *pupper, kmp_int32 *pstride,
822 kmp_int32 incr, kmp_int32 chunk) {
823 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
824 pupper, pstride, incr, chunk
825#
if OMPT_SUPPORT && OMPT_OPTIONAL
827 OMPT_GET_RETURN_ADDRESS(0)
836 kmp_int32 schedtype, kmp_int32 *plastiter,
837 kmp_uint32 *plower, kmp_uint32 *pupper,
838 kmp_int32 *pstride, kmp_int32 incr,
840 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
841 pupper, pstride, incr, chunk
842#
if OMPT_SUPPORT && OMPT_OPTIONAL
844 OMPT_GET_RETURN_ADDRESS(0)
853 kmp_int32 *plastiter, kmp_int64 *plower,
854 kmp_int64 *pupper, kmp_int64 *pstride,
855 kmp_int64 incr, kmp_int64 chunk) {
856 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
857 pupper, pstride, incr, chunk
858#
if OMPT_SUPPORT && OMPT_OPTIONAL
860 OMPT_GET_RETURN_ADDRESS(0)
869 kmp_int32 schedtype, kmp_int32 *plastiter,
870 kmp_uint64 *plower, kmp_uint64 *pupper,
871 kmp_int64 *pstride, kmp_int64 incr,
873 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
874 pupper, pstride, incr, chunk
875#
if OMPT_SUPPORT && OMPT_OPTIONAL
877 OMPT_GET_RETURN_ADDRESS(0)
908 kmp_int32 schedule, kmp_int32 *plastiter,
909 kmp_int32 *plower, kmp_int32 *pupper,
910 kmp_int32 *pupperD, kmp_int32 *pstride,
911 kmp_int32 incr, kmp_int32 chunk) {
912 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
913 pupper, pupperD, pstride, incr, chunk);
920 kmp_int32 schedule, kmp_int32 *plastiter,
921 kmp_uint32 *plower, kmp_uint32 *pupper,
922 kmp_uint32 *pupperD, kmp_int32 *pstride,
923 kmp_int32 incr, kmp_int32 chunk) {
924 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
925 pupper, pupperD, pstride, incr, chunk);
932 kmp_int32 schedule, kmp_int32 *plastiter,
933 kmp_int64 *plower, kmp_int64 *pupper,
934 kmp_int64 *pupperD, kmp_int64 *pstride,
935 kmp_int64 incr, kmp_int64 chunk) {
936 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
937 pupper, pupperD, pstride, incr, chunk);
944 kmp_int32 schedule, kmp_int32 *plastiter,
945 kmp_uint64 *plower, kmp_uint64 *pupper,
946 kmp_uint64 *pupperD, kmp_int64 *pstride,
947 kmp_int64 incr, kmp_int64 chunk) {
948 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
949 pupper, pupperD, pstride, incr, chunk);
982 kmp_int32 *p_lb, kmp_int32 *p_ub,
983 kmp_int32 *p_st, kmp_int32 incr,
985 KMP_DEBUG_ASSERT(__kmp_init_serial);
986 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
994 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
995 kmp_int32 *p_st, kmp_int32 incr,
997 KMP_DEBUG_ASSERT(__kmp_init_serial);
998 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1006 kmp_int64 *p_lb, kmp_int64 *p_ub,
1007 kmp_int64 *p_st, kmp_int64 incr,
1009 KMP_DEBUG_ASSERT(__kmp_init_serial);
1010 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1018 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1019 kmp_int64 *p_st, kmp_int64 incr,
1021 KMP_DEBUG_ASSERT(__kmp_init_serial);
1022 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)