44 #define USE_CHECKS_COMMON
46 #define KMP_INLINE_SUBR 1
53 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
54 struct private_common *
55 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
57 struct shared_table __kmp_threadprivate_d_table;
63 #ifdef KMP_INLINE_SUBR
66 struct private_common *
67 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
void *pc_addr )
70 struct private_common *tn;
72 #ifdef KMP_TASK_COMMON_DEBUG
73 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
78 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
79 if (tn->gbl_addr == pc_addr) {
80 #ifdef KMP_TASK_COMMON_DEBUG
81 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
91 #ifdef KMP_INLINE_SUBR
94 struct shared_common *
95 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
void *pc_addr )
97 struct shared_common *tn;
99 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
100 if (tn->gbl_addr == pc_addr) {
101 #ifdef KMP_TASK_COMMON_DEBUG
102 KC_TRACE( 10, (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
118 static struct private_data *
119 __kmp_init_common_data(
void *pc_addr,
size_t pc_size )
121 struct private_data *d;
125 d = (
struct private_data *) __kmp_allocate(
sizeof(
struct private_data ) );
135 for (i = pc_size; i > 0; --i) {
137 d->data = __kmp_allocate( pc_size );
138 memcpy( d->data, pc_addr, pc_size );
151 __kmp_copy_common_data(
void *pc_addr,
struct private_data *d )
153 char *addr = (
char *) pc_addr;
156 for (offset = 0; d != 0; d = d->next) {
157 for (i = d->more; i > 0; --i) {
159 memset( & addr[ offset ],
'\0', d->size );
161 memcpy( & addr[ offset ], d->data, d->size );
172 __kmp_common_initialize(
void )
174 if( ! TCR_4(__kmp_init_common) ) {
180 __kmp_threadpriv_cache_list = NULL;
184 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
185 if( __kmp_root[gtid] ) {
186 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
187 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
188 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
193 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
194 __kmp_threadprivate_d_table.data[ q ] = 0;
196 TCW_4(__kmp_init_common, TRUE);
203 __kmp_common_destroy(
void )
205 if( TCR_4(__kmp_init_common) ) {
208 TCW_4(__kmp_init_common, FALSE);
210 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
212 struct private_common *tn;
213 struct shared_common *d_tn;
218 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
220 if (d_tn->dt.dtorv != 0) {
221 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
222 if( __kmp_threads[gtid] ) {
223 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
224 (! KMP_UBER_GTID (gtid)) ) {
225 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
226 gtid, d_tn->gbl_addr );
228 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
233 if (d_tn->obj_init != 0) {
234 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
238 if (d_tn->dt.dtor != 0) {
239 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
240 if( __kmp_threads[gtid] ) {
241 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
242 (! KMP_UBER_GTID (gtid)) ) {
243 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
244 gtid, d_tn->gbl_addr );
246 (*d_tn->dt.dtor) (tn->par_addr);
251 if (d_tn->obj_init != 0) {
252 (*d_tn->dt.dtor) (d_tn->obj_init);
257 __kmp_threadprivate_d_table.data[ q ] = 0;
264 __kmp_common_destroy_gtid(
int gtid )
266 struct private_common *tn;
267 struct shared_common *d_tn;
269 KC_TRACE( 10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
270 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
271 (! KMP_UBER_GTID (gtid)) ) {
273 if( TCR_4(__kmp_init_common) ) {
278 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
280 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
281 gtid, tn->gbl_addr );
283 KMP_DEBUG_ASSERT( d_tn );
286 if (d_tn->dt.dtorv != 0) {
287 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
289 if (d_tn->obj_init != 0) {
290 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
293 if (d_tn->dt.dtor != 0) {
294 (void) (*d_tn->dt.dtor) (tn->par_addr);
296 if (d_tn->obj_init != 0) {
297 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
301 KC_TRACE( 30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
310 #ifdef KMP_TASK_COMMON_DEBUG
316 for (p = 0; p < __kmp_all_nth; ++p) {
317 if( !__kmp_threads[p] )
continue;
318 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
319 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
320 struct private_common *tn;
322 KC_TRACE( 10, (
"\tdump_list: gtid:%d addresses\n", p ) );
324 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
325 KC_TRACE( 10, (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
326 tn->gbl_addr, tn->par_addr ) );
340 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
342 struct shared_common **lnk_tn, *d_tn;
343 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
344 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
346 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
350 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
352 d_tn->gbl_addr = pc_addr;
353 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
362 d_tn->cmn_size = pc_size;
364 __kmp_acquire_lock( &__kmp_global_lock, gtid );
366 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
368 d_tn->next = *lnk_tn;
371 __kmp_release_lock( &__kmp_global_lock, gtid );
375 struct private_common *
376 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
378 struct private_common *tn, **tt;
379 struct shared_common *d_tn;
383 __kmp_acquire_lock( & __kmp_global_lock, gtid );
385 tn = (
struct private_common *) __kmp_allocate(
sizeof (
struct private_common) );
387 tn->gbl_addr = pc_addr;
389 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
395 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
396 d_tn->cmn_size = pc_size;
399 if (d_tn->ct.ctorv != 0) {
403 else if (d_tn->cct.cctorv != 0) {
405 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
406 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
409 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
412 if (d_tn->ct.ctor != 0) {
416 else if (d_tn->cct.cctor != 0) {
418 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
419 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
422 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
428 struct shared_common **lnk_tn;
430 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
431 d_tn->gbl_addr = pc_addr;
432 d_tn->cmn_size = pc_size;
433 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
442 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
444 d_tn->next = *lnk_tn;
448 tn->cmn_size = d_tn->cmn_size;
450 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
451 tn->par_addr = (
void *) pc_addr;
454 tn->par_addr = (
void *) __kmp_allocate( tn->cmn_size );
457 __kmp_release_lock( & __kmp_global_lock, gtid );
461 #ifdef USE_CHECKS_COMMON
462 if (pc_size > d_tn->cmn_size) {
463 KC_TRACE( 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
464 KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
465 pc_addr, pc_size, d_tn->cmn_size ) );
466 KMP_FATAL( TPCommonBlocksInconsist );
470 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
472 #ifdef KMP_TASK_COMMON_DEBUG
474 KC_TRACE( 10, (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
481 #ifdef KMP_TASK_COMMON_DEBUG
482 KC_TRACE( 10, (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
489 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
490 __kmp_threads[ gtid ]->th.th_pri_head = tn;
493 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
496 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
513 if ( d_tn->ct.ctorv != 0) {
514 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
515 }
else if (d_tn->cct.cctorv != 0) {
516 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
517 }
else if (tn->par_addr != tn->gbl_addr) {
518 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
521 if ( d_tn->ct.ctor != 0 ) {
522 (void) (*d_tn->ct.ctor) (tn->par_addr);
523 }
else if (d_tn->cct.cctor != 0) {
524 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
525 }
else if (tn->par_addr != tn->gbl_addr) {
526 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
555 struct shared_common *d_tn, **lnk_tn;
557 KC_TRACE( 10, (
"__kmpc_threadprivate_register: called\n" ) );
559 #ifdef USE_CHECKS_COMMON
561 KMP_ASSERT( cctor == 0);
565 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
568 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
569 d_tn->gbl_addr = data;
571 d_tn->ct.ctor = ctor;
572 d_tn->cct.cctor = cctor;
573 d_tn->dt.dtor = dtor;
580 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
582 d_tn->next = *lnk_tn;
588 __kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
size_t size)
591 struct private_common *tn;
593 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d called\n", global_tid ) );
595 #ifdef USE_CHECKS_COMMON
596 if (! __kmp_init_serial)
597 KMP_FATAL( RTLNotInitialized );
600 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
604 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
605 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
610 KC_TRACE( 50, (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
611 global_tid, data ) );
612 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
615 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid ) );
616 #ifdef USE_CHECKS_COMMON
617 if ((
size_t) size > tn->cmn_size) {
618 KC_TRACE( 10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
619 data, size, tn->cmn_size ) );
620 KMP_FATAL( TPCommonBlocksInconsist );
627 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
628 tn = kmp_threadprivate_insert( global_tid, data, data, size );
633 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
653 kmp_int32 global_tid,
658 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
659 KMP_SIZE_T_SPEC
"\n",
660 global_tid, *cache, data, size ) );
662 if ( TCR_PTR(*cache) == 0) {
663 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
665 if ( TCR_PTR(*cache) == 0) {
666 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
668 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
672 __kmp_allocate(
sizeof(
void * ) * __kmp_tp_capacity +
sizeof ( kmp_cached_addr_t ));
675 KC_TRACE( 50, (
"__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
676 global_tid, my_cache ) );
680 kmp_cached_addr_t *tp_cache_addr;
682 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
683 tp_cache_addr -> addr = my_cache;
684 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
685 __kmp_threadpriv_cache_list = tp_cache_addr;
689 TCW_PTR( *cache, my_cache);
694 __kmp_release_lock( & __kmp_global_lock, global_tid );
698 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
699 ret = __kmpc_threadprivate( loc, global_tid, data, (
size_t) size);
701 TCW_PTR( (*cache)[ global_tid ], ret);
703 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
722 size_t vector_length )
724 struct shared_common *d_tn, **lnk_tn;
726 KC_TRACE( 10, (
"__kmpc_threadprivate_register_vec: called\n" ) );
728 #ifdef USE_CHECKS_COMMON
730 KMP_ASSERT( cctor == 0);
733 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
737 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
738 d_tn->gbl_addr = data;
740 d_tn->ct.ctorv = ctor;
741 d_tn->cct.cctorv = cctor;
742 d_tn->dt.dtorv = dtor;
744 d_tn->vec_len = (size_t) vector_length;
749 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
751 d_tn->next = *lnk_tn;