17#define USE_CHECKS_COMMON
19#define KMP_INLINE_SUBR 1
21void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
22 void *data_addr,
size_t pc_size);
23struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
27struct shared_table __kmp_threadprivate_d_table;
33 struct private_common *
34 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
38 struct private_common *tn;
40#ifdef KMP_TASK_COMMON_DEBUG
41 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with "
47 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
48 if (tn->gbl_addr == pc_addr) {
49#ifdef KMP_TASK_COMMON_DEBUG
50 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, found "
64 struct shared_common *
65 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
67 struct shared_common *tn;
69 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
70 if (tn->gbl_addr == pc_addr) {
71#ifdef KMP_TASK_COMMON_DEBUG
74 (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
85static struct private_data *__kmp_init_common_data(
void *pc_addr,
87 struct private_data *d;
91 d = (
struct private_data *)__kmp_allocate(
sizeof(
struct private_data));
102 for (i = pc_size; i > 0; --i) {
104 d->data = __kmp_allocate(pc_size);
105 KMP_MEMCPY(d->data, pc_addr, pc_size);
114static void __kmp_copy_common_data(
void *pc_addr,
struct private_data *d) {
115 char *addr = (
char *)pc_addr;
117 for (
size_t offset = 0; d != 0; d = d->next) {
118 for (
int i = d->more; i > 0; --i) {
120 memset(&addr[offset],
'\0', d->size);
122 KMP_MEMCPY(&addr[offset], d->data, d->size);
129void __kmp_common_initialize(
void) {
130 if (!TCR_4(__kmp_init_common)) {
136 __kmp_threadpriv_cache_list = NULL;
140 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
141 if (__kmp_root[gtid]) {
142 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
143 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
145 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
151 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
152 __kmp_threadprivate_d_table.data[q] = 0;
154 TCW_4(__kmp_init_common, TRUE);
160void __kmp_common_destroy(
void) {
161 if (TCR_4(__kmp_init_common)) {
164 TCW_4(__kmp_init_common, FALSE);
166 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
168 struct private_common *tn;
169 struct shared_common *d_tn;
175 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
178 if (d_tn->dt.dtorv != 0) {
179 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
180 if (__kmp_threads[gtid]) {
181 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
182 : (!KMP_UBER_GTID(gtid))) {
183 tn = __kmp_threadprivate_find_task_common(
184 __kmp_threads[gtid]->th.th_pri_common, gtid,
187 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
192 if (d_tn->obj_init != 0) {
193 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
197 if (d_tn->dt.dtor != 0) {
198 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
199 if (__kmp_threads[gtid]) {
200 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
201 : (!KMP_UBER_GTID(gtid))) {
202 tn = __kmp_threadprivate_find_task_common(
203 __kmp_threads[gtid]->th.th_pri_common, gtid,
206 (*d_tn->dt.dtor)(tn->par_addr);
211 if (d_tn->obj_init != 0) {
212 (*d_tn->dt.dtor)(d_tn->obj_init);
217 __kmp_threadprivate_d_table.data[q] = 0;
223void __kmp_common_destroy_gtid(
int gtid) {
224 struct private_common *tn;
225 struct shared_common *d_tn;
227 if (!TCR_4(__kmp_init_gtid)) {
234 KC_TRACE(10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid));
235 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
237 if (TCR_4(__kmp_init_common)) {
242 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
244 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
249 if (d_tn->dt.dtorv != 0) {
250 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
252 if (d_tn->obj_init != 0) {
253 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
256 if (d_tn->dt.dtor != 0) {
257 (void)(*d_tn->dt.dtor)(tn->par_addr);
259 if (d_tn->obj_init != 0) {
260 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
264 KC_TRACE(30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors "
271#ifdef KMP_TASK_COMMON_DEBUG
272static void dump_list(
void) {
275 for (p = 0; p < __kmp_all_nth; ++p) {
276 if (!__kmp_threads[p])
278 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
279 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
280 struct private_common *tn;
282 KC_TRACE(10, (
"\tdump_list: gtid:%d addresses\n", p));
284 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
287 (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
288 tn->gbl_addr, tn->par_addr));
297void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
298 void *data_addr,
size_t pc_size) {
299 struct shared_common **lnk_tn, *d_tn;
300 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
301 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
303 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
307 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
309 d_tn->gbl_addr = pc_addr;
310 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
320 d_tn->cmn_size = pc_size;
322 __kmp_acquire_lock(&__kmp_global_lock, gtid);
324 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
326 d_tn->next = *lnk_tn;
329 __kmp_release_lock(&__kmp_global_lock, gtid);
333struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
336 struct private_common *tn, **tt;
337 struct shared_common *d_tn;
340 __kmp_acquire_lock(&__kmp_global_lock, gtid);
342 tn = (
struct private_common *)__kmp_allocate(
sizeof(
struct private_common));
344 tn->gbl_addr = pc_addr;
346 d_tn = __kmp_find_shared_task_common(
347 &__kmp_threadprivate_d_table, gtid,
353 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
354 d_tn->cmn_size = pc_size;
357 if (d_tn->ct.ctorv != 0) {
360 }
else if (d_tn->cct.cctorv != 0) {
363 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
364 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
366 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
369 if (d_tn->ct.ctor != 0) {
372 }
else if (d_tn->cct.cctor != 0) {
375 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
376 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
378 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
383 struct shared_common **lnk_tn;
385 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
386 d_tn->gbl_addr = pc_addr;
387 d_tn->cmn_size = pc_size;
388 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
398 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
400 d_tn->next = *lnk_tn;
404 tn->cmn_size = d_tn->cmn_size;
406 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
407 tn->par_addr = (
void *)pc_addr;
409 tn->par_addr = (
void *)__kmp_allocate(tn->cmn_size);
412 __kmp_release_lock(&__kmp_global_lock, gtid);
415#ifdef USE_CHECKS_COMMON
416 if (pc_size > d_tn->cmn_size) {
418 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
419 " ,%" KMP_UINTPTR_SPEC
")\n",
420 pc_addr, pc_size, d_tn->cmn_size));
421 KMP_FATAL(TPCommonBlocksInconsist);
425 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
427#ifdef KMP_TASK_COMMON_DEBUG
431 (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
438#ifdef KMP_TASK_COMMON_DEBUG
440 (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
447 tn->link = __kmp_threads[gtid]->th.th_pri_head;
448 __kmp_threads[gtid]->th.th_pri_head = tn;
450 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
465 if (d_tn->ct.ctorv != 0) {
466 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
467 }
else if (d_tn->cct.cctorv != 0) {
468 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
469 }
else if (tn->par_addr != tn->gbl_addr) {
470 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
473 if (d_tn->ct.ctor != 0) {
474 (void)(*d_tn->ct.ctor)(tn->par_addr);
475 }
else if (d_tn->cct.cctor != 0) {
476 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
477 }
else if (tn->par_addr != tn->gbl_addr) {
478 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
506 struct shared_common *d_tn, **lnk_tn;
508 KC_TRACE(10, (
"__kmpc_threadprivate_register: called\n"));
510#ifdef USE_CHECKS_COMMON
512 KMP_ASSERT(cctor == 0);
516 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
519 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
520 d_tn->gbl_addr = data;
522 d_tn->ct.ctor = ctor;
523 d_tn->cct.cctor = cctor;
524 d_tn->dt.dtor = dtor;
532 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
534 d_tn->next = *lnk_tn;
539void *__kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
542 struct private_common *tn;
544 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d called\n", global_tid));
546#ifdef USE_CHECKS_COMMON
547 if (!__kmp_init_serial)
548 KMP_FATAL(RTLNotInitialized);
551 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
556 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting private data\n",
558 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
564 (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
566 tn = __kmp_threadprivate_find_task_common(
567 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
570 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid));
571#ifdef USE_CHECKS_COMMON
572 if ((
size_t)size > tn->cmn_size) {
573 KC_TRACE(10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
574 " ,%" KMP_UINTPTR_SPEC
")\n",
575 data, size, tn->cmn_size));
576 KMP_FATAL(TPCommonBlocksInconsist);
583 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid));
584 tn = kmp_threadprivate_insert(global_tid, data, data, size);
589 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
595static kmp_cached_addr_t *__kmp_find_cache(
void *data) {
596 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
597 while (ptr && ptr->data != data)
615 kmp_int32 global_tid,
619 KC_TRACE(10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, "
620 "address: %p, size: %" KMP_SIZE_T_SPEC
"\n",
621 global_tid, *cache, data, size));
623 if (TCR_PTR(*cache) == 0) {
624 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
626 if (TCR_PTR(*cache) == 0) {
627 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
630 kmp_cached_addr_t *tp_cache_addr;
632 tp_cache_addr = __kmp_find_cache(data);
633 if (!tp_cache_addr) {
635 KMP_ITT_IGNORE(my_cache = (
void **)__kmp_allocate(
636 sizeof(
void *) * __kmp_tp_capacity +
637 sizeof(kmp_cached_addr_t)););
639 KC_TRACE(50, (
"__kmpc_threadprivate_cached: T#%d allocated cache at "
641 global_tid, my_cache));
645 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
646 tp_cache_addr->addr = my_cache;
647 tp_cache_addr->data = data;
648 tp_cache_addr->compiler_cache = cache;
649 tp_cache_addr->next = __kmp_threadpriv_cache_list;
650 __kmp_threadpriv_cache_list = tp_cache_addr;
652 my_cache = tp_cache_addr->addr;
653 tp_cache_addr->compiler_cache = cache;
657 TCW_PTR(*cache, my_cache);
658 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
662 __kmp_release_lock(&__kmp_global_lock, global_tid);
666 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
667 ret = __kmpc_threadprivate(loc, global_tid, data, (
size_t)size);
669 TCW_PTR((*cache)[global_tid], ret);
672 (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
679void __kmp_threadprivate_resize_cache(
int newCapacity) {
680 KC_TRACE(10, (
"__kmp_threadprivate_resize_cache: called with size: %d\n",
683 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
688 KMP_ITT_IGNORE(my_cache =
689 (
void **)__kmp_allocate(
sizeof(
void *) * newCapacity +
690 sizeof(kmp_cached_addr_t)););
692 KC_TRACE(50, (
"__kmp_threadprivate_resize_cache: allocated cache at %p\n",
695 void **old_cache = ptr->addr;
696 for (
int i = 0; i < __kmp_tp_capacity; ++i) {
697 my_cache[i] = old_cache[i];
701 kmp_cached_addr_t *tp_cache_addr;
702 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[newCapacity];
703 tp_cache_addr->addr = my_cache;
704 tp_cache_addr->data = ptr->data;
705 tp_cache_addr->compiler_cache = ptr->compiler_cache;
706 tp_cache_addr->next = __kmp_threadpriv_cache_list;
707 __kmp_threadpriv_cache_list = tp_cache_addr;
718 (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr->compiler_cache, old_cache,
733 *(
volatile int *)&__kmp_tp_capacity = newCapacity;
749 size_t vector_length) {
750 struct shared_common *d_tn, **lnk_tn;
752 KC_TRACE(10, (
"__kmpc_threadprivate_register_vec: called\n"));
754#ifdef USE_CHECKS_COMMON
756 KMP_ASSERT(cctor == 0);
759 d_tn = __kmp_find_shared_task_common(
760 &__kmp_threadprivate_d_table, -1,
764 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
765 d_tn->gbl_addr = data;
767 d_tn->ct.ctorv = ctor;
768 d_tn->cct.cctorv = cctor;
769 d_tn->dt.dtorv = dtor;
771 d_tn->vec_len = (size_t)vector_length;
774 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
776 d_tn->next = *lnk_tn;
781void __kmp_cleanup_threadprivate_caches() {
782 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
785 void **cache = ptr->addr;
786 __kmp_threadpriv_cache_list = ptr->next;
787 if (*ptr->compiler_cache)
788 *ptr->compiler_cache = NULL;
789 ptr->compiler_cache = NULL;
796 ptr = __kmp_threadpriv_cache_list;
void(* kmpc_dtor)(void *)
void *(* kmpc_cctor)(void *, void *)
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_ctor)(void *)
void *(* kmpc_ctor_vec)(void *, size_t)
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)