ARGOBOTS  66b1c39742507d8df30e8d28c54839b961a14814
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
thread.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 ABTU_ret_err static inline int
9 ythread_create(ABTI_local *p_local, ABTI_pool *p_pool,
10  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
11  ABTI_thread_type thread_type, ABTI_sched *p_sched,
12  ABT_bool push_pool, ABTI_ythread **pp_newthread);
13 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread);
14 static inline void thread_free(ABTI_local *p_local, ABTI_thread *p_thread,
15  ABT_bool free_unit);
16 static void thread_root_func(void *arg);
17 static void thread_main_sched_func(void *arg);
18 #ifndef ABT_CONFIG_DISABLE_MIGRATION
20  ABTI_thread *p_thread,
21  ABTI_xstream *p_xstream);
23  ABTI_thread *p_thread,
24  ABTI_pool *p_pool);
25 #endif
26 static inline ABT_unit_id thread_get_new_id(void);
27 
28 static void thread_key_destructor_stackable_sched(void *p_value);
32 static void thread_key_destructor_migration(void *p_value);
36 
63 int ABT_thread_create(ABT_pool pool, void (*thread_func)(void *), void *arg,
64  ABT_thread_attr attr, ABT_thread *newthread)
65 {
66  ABTI_local *p_local = ABTI_local_get_local();
67  ABTI_ythread *p_newthread;
68 
69  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
71 
72  ABTI_thread_type unit_type =
73  (newthread != NULL)
76  int abt_errno = ythread_create(p_local, p_pool, thread_func, arg,
77  ABTI_thread_attr_get_ptr(attr), unit_type,
78  NULL, ABT_TRUE, &p_newthread);
79  ABTI_CHECK_ERROR(abt_errno);
80 
81  /* Return value */
82  if (newthread)
83  *newthread = ABTI_ythread_get_handle(p_newthread);
84  return ABT_SUCCESS;
85 }
86 
128  void (*thread_func)(void *), void *arg,
129  ABT_thread_attr attr, ABT_thread *newthread)
130 {
131  ABTI_local *p_local = ABTI_local_get_local();
132  ABTI_ythread *p_newthread;
133 
134  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
135  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
136 
137  /* TODO: need to consider the access type of target pool */
138  ABTI_pool *p_pool = ABTI_xstream_get_main_pool(p_xstream);
139  ABTI_thread_type unit_type =
140  (newthread != NULL)
143  int abt_errno = ythread_create(p_local, p_pool, thread_func, arg,
144  ABTI_thread_attr_get_ptr(attr), unit_type,
145  NULL, ABT_TRUE, &p_newthread);
146  ABTI_CHECK_ERROR(abt_errno);
147 
148  /* Return value */
149  if (newthread)
150  *newthread = ABTI_ythread_get_handle(p_newthread);
151 
152  return ABT_SUCCESS;
153 }
154 
178 int ABT_thread_create_many(int num, ABT_pool *pool_list,
179  void (**thread_func_list)(void *), void **arg_list,
180  ABT_thread_attr attr, ABT_thread *newthread_list)
181 {
182  ABTI_local *p_local = ABTI_local_get_local();
183  int i;
184 
185  if (attr != ABT_THREAD_ATTR_NULL) {
186  /* This implies that the stack is given by a user. Since threads
187  * cannot use the same stack region, this is illegal. */
188  ABTI_CHECK_TRUE(!(ABTI_thread_attr_get_ptr(attr)->thread_type &
192  }
193 
194  if (newthread_list == NULL) {
195  for (i = 0; i < num; i++) {
196  ABTI_ythread *p_newthread;
197  ABT_pool pool = pool_list[i];
198  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
199  ABTI_CHECK_NULL_POOL_PTR(p_pool);
200 
201  void (*thread_f)(void *) = thread_func_list[i];
202  void *arg = arg_list ? arg_list[i] : NULL;
203  int abt_errno = ythread_create(p_local, p_pool, thread_f, arg,
206  ABT_TRUE, &p_newthread);
207  ABTI_CHECK_ERROR(abt_errno);
208  }
209  } else {
210  for (i = 0; i < num; i++) {
211  ABTI_ythread *p_newthread;
212  ABT_pool pool = pool_list[i];
213  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
214  ABTI_CHECK_NULL_POOL_PTR(p_pool);
215 
216  void (*thread_f)(void *) = thread_func_list[i];
217  void *arg = arg_list ? arg_list[i] : NULL;
218  int abt_errno = ythread_create(p_local, p_pool, thread_f, arg,
222  NULL, ABT_TRUE, &p_newthread);
223  newthread_list[i] = ABTI_ythread_get_handle(p_newthread);
224  /* TODO: Release threads that have been already created. */
225  ABTI_CHECK_ERROR(abt_errno);
226  }
227  }
228 
229  return ABT_SUCCESS;
230 }
231 
251 int ABT_thread_revive(ABT_pool pool, void (*thread_func)(void *), void *arg,
252  ABT_thread *thread)
253 {
254  ABTI_local *p_local = ABTI_local_get_local();
255 
256  ABTI_thread *p_thread = ABTI_thread_get_ptr(*thread);
257  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
258 
262 
263  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
264  ABTI_CHECK_NULL_POOL_PTR(p_pool);
265 
266  ABTI_thread_revive(p_local, p_pool, thread_func, arg, p_thread);
267 
268  return ABT_SUCCESS;
269 }
270 
285 {
286  ABTI_local *p_local = ABTI_local_get_local();
287  ABT_thread h_thread = *thread;
288 
289  ABTI_thread *p_thread = ABTI_thread_get_ptr(h_thread);
290  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
291 
292  /* We first need to check whether p_local_xstream is NULL because external
293  * threads might call this routine. */
295  p_thread !=
296  ABTI_local_get_xstream(p_local)->p_thread,
298  "The current thread cannot be freed.");
299 
303  "The main thread cannot be freed explicitly.");
304 
305  /* Wait until the thread terminates */
306  thread_join(&p_local, p_thread);
307  /* Free the ABTI_thread structure */
308  ABTI_thread_free(p_local, p_thread);
309 
310  /* Return value */
311  *thread = ABT_THREAD_NULL;
312 
313  return ABT_SUCCESS;
314 }
315 
329 int ABT_thread_free_many(int num, ABT_thread *thread_list)
330 {
331  ABTI_local *p_local = ABTI_local_get_local();
332  int i;
333 
334  for (i = 0; i < num; i++) {
335  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread_list[i]);
336  /* TODO: check input */
337  thread_join(&p_local, p_thread);
338  ABTI_thread_free(p_local, p_thread);
339  }
340  return ABT_SUCCESS;
341 }
342 
354 {
355  ABTI_local *p_local = ABTI_local_get_local();
356  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
357  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
358 
360  p_thread !=
361  ABTI_local_get_xstream(p_local)->p_thread,
363  "The current thread cannot be freed.");
364 
368  "The main thread cannot be freed explicitly.");
369 
370  thread_join(&p_local, p_thread);
371  return ABT_SUCCESS;
372 }
373 
386 int ABT_thread_join_many(int num_threads, ABT_thread *thread_list)
387 {
388  ABTI_local *p_local = ABTI_local_get_local();
389  int i;
390  for (i = 0; i < num_threads; i++) {
391  /* TODO: check input */
392  thread_join(&p_local, ABTI_thread_get_ptr(thread_list[i]));
393  }
394  return ABT_SUCCESS;
395 }
396 
410 {
411  ABTI_xstream *p_local_xstream;
412  ABTI_ythread *p_ythread;
413  ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(&p_local_xstream, &p_ythread);
414 
415  ABTI_ythread_exit(p_local_xstream, p_ythread);
416  return ABT_SUCCESS;
417 }
418 
428 {
429 #ifdef ABT_CONFIG_DISABLE_THREAD_CANCEL
431 #else
432  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
433  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
437  "The main thread cannot be canceled.");
438 
439  /* Set the cancel request */
441  return ABT_SUCCESS;
442 #endif
443 }
444 
465 {
466  *thread = ABT_THREAD_NULL;
467 
468  ABTI_xstream *p_local_xstream;
470  ABTI_thread *p_thread = p_local_xstream->p_thread;
471  if (!(p_thread->type & ABTI_THREAD_TYPE_YIELDABLE)) {
472  /* This is checked even if an error check is disabled. */
474  }
475 
476  *thread = ABTI_thread_get_handle(p_thread);
477  return ABT_SUCCESS;
478 }
479 
494 {
495  ABTI_ythread *p_self;
497 
498  *id = ABTI_thread_get_id(&p_self->thread);
499  return ABT_SUCCESS;
500 }
501 
516 {
517  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
518  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
519 
520  *xstream = ABTI_xstream_get_handle(p_thread->p_last_xstream);
521  return ABT_SUCCESS;
522 }
523 
534 {
535  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
536  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
537 
539  return ABT_SUCCESS;
540 }
541 
555 {
556  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
557  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
558 
559  *pool = ABTI_pool_get_handle(p_thread->p_pool);
560  return ABT_SUCCESS;
561 }
562 
578 {
579  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
580  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
581  ABTI_ASSERT(p_thread->p_pool);
582 
583  *id = (int)(p_thread->p_pool->id);
584  return ABT_SUCCESS;
585 }
586 
605 {
606  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
607  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
608  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
609  ABTI_CHECK_NULL_POOL_PTR(p_pool);
610 
611  p_thread->p_pool = p_pool;
612  return ABT_SUCCESS;
613 }
614 
630 {
631  ABTI_ythread *p_tar_ythread = ABTI_ythread_get_ptr(thread);
632  ABTI_CHECK_NULL_YTHREAD_PTR(p_tar_ythread);
633 
634  ABTI_xstream *p_local_xstream;
635  ABTI_ythread *p_cur_ythread;
636  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_cur_ythread);
637 
638  LOG_DEBUG("[U%" PRIu64 ":E%d] yield_to -> U%" PRIu64 "\n",
639  ABTI_thread_get_id(&p_cur_ythread->thread),
640  p_cur_ythread->thread.p_last_xstream->rank,
641  ABTI_thread_get_id(&p_tar_ythread->thread));
642 
643  /* The target ULT must be different from the caller ULT. */
644  ABTI_CHECK_TRUE_MSG(p_cur_ythread != p_tar_ythread, ABT_ERR_INV_THREAD,
645  "The caller and target ULTs are the same.");
646 
648  &p_tar_ythread->thread.state) !=
651  "Cannot yield to the terminated thread");
652 
653  /* Both threads must be associated with the same pool. */
654  /* FIXME: instead of same pool, runnable by the same ES */
655  ABTI_CHECK_TRUE_MSG(p_cur_ythread->thread.p_pool ==
656  p_tar_ythread->thread.p_pool,
658  "The target thread's pool is not the same as mine.");
659 
660  /* If the target thread is not in READY, we don't yield. Note that ULT can
661  * be regarded as 'ready' only if its state is READY and it has been
662  * pushed into a pool. Since we set ULT's state to READY and then push it
663  * into a pool, we check them in the reverse order, i.e., check if the ULT
664  * is inside a pool and the its state. */
665  if (!(p_tar_ythread->thread.p_pool->u_is_in_pool(
666  p_tar_ythread->thread.unit) == ABT_TRUE &&
667  ABTD_atomic_acquire_load_int(&p_tar_ythread->thread.state) ==
669  return ABT_SUCCESS;
670  }
671 
672  /* Remove the target ULT from the pool */
674  /* This is necessary to prevent the size of this pool from 0. */
675  ABTI_pool_inc_num_blocked(p_tar_ythread->thread.p_pool);
676  }
677  int abt_errno = ABTI_pool_remove(p_tar_ythread->thread.p_pool,
678  p_tar_ythread->thread.unit);
680  ABTI_pool_dec_num_blocked(p_tar_ythread->thread.p_pool);
681  ABTI_CHECK_ERROR(abt_errno);
682  }
683 
686 
687  /* This operation is corresponding to yield */
688  ABTI_tool_event_ythread_yield(p_local_xstream, p_cur_ythread,
689  p_cur_ythread->thread.p_parent,
691 
692  /* Add the current thread to the pool again. */
693  ABTI_pool_push(p_cur_ythread->thread.p_pool, p_cur_ythread->thread.unit);
694 
695  /* We set the last ES */
696  p_tar_ythread->thread.p_last_xstream = p_local_xstream;
697 
698  /* Switch the context */
701  ABTI_ythread *p_prev =
702  ABTI_ythread_context_switch_to_sibling(&p_local_xstream, p_cur_ythread,
703  p_tar_ythread);
704  ABTI_tool_event_thread_run(p_local_xstream, &p_cur_ythread->thread,
705  &p_prev->thread, p_cur_ythread->thread.p_parent);
706  return ABT_SUCCESS;
707 }
708 
724 {
725  ABTI_xstream *p_local_xstream;
726  ABTI_ythread *p_ythread;
727  ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(&p_local_xstream, &p_ythread);
728 
729  ABTI_ythread_yield(&p_local_xstream, p_ythread, ABT_SYNC_EVENT_TYPE_USER,
730  NULL);
731  return ABT_SUCCESS;
732 }
733 
751 {
752  ABTI_local *p_local = ABTI_local_get_local();
753 
754  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
755  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
756  ABTI_ythread *p_ythread;
757  ABTI_CHECK_YIELDABLE(p_thread, &p_ythread, ABT_ERR_INV_THREAD);
758 
759  /* The ULT must be in BLOCKED state. */
763 
764  ABTI_ythread_set_ready(p_local, p_ythread);
765  return ABT_SUCCESS;
766 }
767 
792 {
793 #ifndef ABT_CONFIG_DISABLE_MIGRATION
794  ABTI_local *p_local = ABTI_local_get_local();
795  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
796  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
797  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
798  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
799 
800  int abt_errno = thread_migrate_to_xstream(&p_local, p_thread, p_xstream);
801  ABTI_CHECK_ERROR(abt_errno);
802  return ABT_SUCCESS;
803 #else
805 #endif
806 }
807 
831 {
832 #ifndef ABT_CONFIG_DISABLE_MIGRATION
833  ABTI_local *p_local = ABTI_local_get_local();
834  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
835  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
836  ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
837  ABTI_CHECK_NULL_SCHED_PTR(p_sched);
838 
839  /* checking for cases when migration is not allowed */
840  ABTI_CHECK_TRUE(!(p_thread->type &
846 
847  /* Find a pool */
848  ABTI_pool *p_pool;
849  int abt_errno;
850  abt_errno =
851  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
852  ABTI_CHECK_ERROR(abt_errno);
853 
854  abt_errno = thread_migrate_to_pool(&p_local, p_thread, p_pool);
855  ABTI_CHECK_ERROR(abt_errno);
856 
858  return ABT_SUCCESS;
859 #else
861 #endif
862 }
863 
884 {
885 #ifndef ABT_CONFIG_DISABLE_MIGRATION
886  ABTI_local *p_local = ABTI_local_get_local();
887  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
888  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
889  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
890  ABTI_CHECK_NULL_POOL_PTR(p_pool);
891 
892  int abt_errno = thread_migrate_to_pool(&p_local, p_thread, p_pool);
893  ABTI_CHECK_ERROR(abt_errno);
894 
896  return ABT_SUCCESS;
897 #else
899 #endif
900 }
901 
924 {
925 #ifndef ABT_CONFIG_DISABLE_MIGRATION
926  /* TODO: fix the bug(s) */
927  ABTI_local *p_local = ABTI_local_get_local();
928 
929  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
930  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
932 
933  /* Choose the destination xstream */
934  /* FIXME: Currently, the target xstream is linearly chosen. We need a
935  * better selection strategy. */
936  /* TODO: handle better when no pool accepts migration */
937 
939  while (p_xstream) {
940  if (p_xstream != p_thread->p_last_xstream) {
941  if (ABTD_atomic_acquire_load_int(&p_xstream->state) ==
943  int abt_errno =
944  thread_migrate_to_xstream(&p_local, p_thread, p_xstream);
945  if (abt_errno != ABT_ERR_INV_XSTREAM &&
946  abt_errno != ABT_ERR_MIGRATION_TARGET) {
947  ABTI_CHECK_ERROR(abt_errno);
948  break;
949  }
950  }
951  }
952  p_xstream = p_xstream->p_next;
953  }
954  return ABT_SUCCESS;
955 #else
957 #endif
958 }
959 
974  void (*cb_func)(ABT_thread thread, void *cb_arg),
975  void *cb_arg)
976 {
977 #ifndef ABT_CONFIG_DISABLE_MIGRATION
978  ABTI_local *p_local = ABTI_local_get_local();
979  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
980  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
981 
982  ABTI_thread_mig_data *p_mig_data;
983  int abt_errno = ABTI_thread_get_mig_data(p_local, p_thread, &p_mig_data);
984  ABTI_CHECK_ERROR(abt_errno);
985 
986  p_mig_data->f_migration_cb = cb_func;
987  p_mig_data->p_migration_cb_arg = cb_arg;
988  return ABT_SUCCESS;
989 #else
991 #endif
992 }
993 
1010 {
1011 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1012  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1013  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1014 
1015  if (!(p_thread->type &
1017  if (flag) {
1018  p_thread->type |= ABTI_THREAD_TYPE_MIGRATABLE;
1019  } else {
1020  p_thread->type &= ~ABTI_THREAD_TYPE_MIGRATABLE;
1021  }
1022  }
1023  return ABT_SUCCESS;
1024 #else
1026 #endif
1027 }
1028 
1044 {
1045 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1046  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1047  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1048 
1049  *flag =
1051  return ABT_SUCCESS;
1052 #else
1054 #endif
1055 }
1056 
1074 {
1075  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1076  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1077 
1078  *flag = (p_thread->type & ABTI_THREAD_TYPE_MAIN) ? ABT_TRUE : ABT_FALSE;
1079  return ABT_SUCCESS;
1080 }
1081 
1097 {
1098  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1099  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1100 
1101  *flag = (p_thread->type & ABTI_THREAD_TYPE_NAMED) ? ABT_FALSE : ABT_TRUE;
1102  return ABT_SUCCESS;
1103 }
1104 
1120 int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result)
1121 {
1122  ABTI_thread *p_thread1 = ABTI_thread_get_ptr(thread1);
1123  ABTI_thread *p_thread2 = ABTI_thread_get_ptr(thread2);
1124  *result = (p_thread1 == p_thread2) ? ABT_TRUE : ABT_FALSE;
1125  return ABT_SUCCESS;
1126 }
1127 
1139 int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize)
1140 {
1141  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1142  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1143  ABTI_ythread *p_ythread;
1144  ABTI_CHECK_YIELDABLE(p_thread, &p_ythread, ABT_ERR_INV_THREAD);
1145 
1146  *stacksize = p_ythread->stacksize;
1147  return ABT_SUCCESS;
1148 }
1149 
1162 {
1163  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1164  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1165 
1166  *thread_id = ABTI_thread_get_id(p_thread);
1167  return ABT_SUCCESS;
1168 }
1169 
1181 int ABT_thread_set_arg(ABT_thread thread, void *arg)
1182 {
1183  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1184  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1185 
1186  p_thread->p_arg = arg;
1187  return ABT_SUCCESS;
1188 }
1189 
1203 int ABT_thread_get_arg(ABT_thread thread, void **arg)
1204 {
1205  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1206  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1207 
1208  *arg = p_thread->p_arg;
1209  return ABT_SUCCESS;
1210 }
1211 
1225 int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value)
1226 {
1227  ABTI_local *p_local = ABTI_local_get_local();
1228 
1229  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1230  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1231 
1232  ABTI_key *p_key = ABTI_key_get_ptr(key);
1233  ABTI_CHECK_NULL_KEY_PTR(p_key);
1234 
1235  /* Set the value. */
1236  int abt_errno =
1237  ABTI_ktable_set(p_local, &p_thread->p_keytable, p_key, value);
1238  ABTI_CHECK_ERROR(abt_errno);
1239  return ABT_SUCCESS;
1240 }
1241 
1257 int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value)
1258 {
1259  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1260  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1261 
1262  ABTI_key *p_key = ABTI_key_get_ptr(key);
1263  ABTI_CHECK_NULL_KEY_PTR(p_key);
1264 
1265  /* Get the value. */
1266  *value = ABTI_ktable_get(&p_thread->p_keytable, p_key);
1267  return ABT_SUCCESS;
1268 }
1269 
1286 {
1287  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1288  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1289 
1290  ABTI_thread_attr thread_attr, *p_attr;
1291  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1292  if (p_ythread) {
1293  thread_attr.p_stack = p_ythread->p_stack;
1294  thread_attr.stacksize = p_ythread->stacksize;
1295  } else {
1296  thread_attr.p_stack = NULL;
1297  thread_attr.stacksize = 0;
1298  }
1299  thread_attr.thread_type = p_thread->type;
1300 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1301  thread_attr.migratable =
1303  ABTI_thread_mig_data *p_mig_data =
1305  &g_thread_mig_data_key);
1306  if (p_mig_data) {
1307  thread_attr.f_cb = p_mig_data->f_migration_cb;
1308  thread_attr.p_cb_arg = p_mig_data->p_migration_cb_arg;
1309  } else {
1310  thread_attr.f_cb = NULL;
1311  thread_attr.p_cb_arg = NULL;
1312  }
1313 #endif
1314  int abt_errno = ABTI_thread_attr_dup(&thread_attr, &p_attr);
1315  ABTI_CHECK_ERROR(abt_errno);
1316 
1317  *attr = ABTI_thread_attr_get_handle(p_attr);
1318  return ABT_SUCCESS;
1319 }
1320 
1321 /*****************************************************************************/
1322 /* Private APIs */
1323 /*****************************************************************************/
1324 
1325 void ABTI_thread_revive(ABTI_local *p_local, ABTI_pool *p_pool,
1326  void (*thread_func)(void *), void *arg,
1327  ABTI_thread *p_thread)
1328 {
1331  p_thread->f_thread = thread_func;
1332  p_thread->p_arg = arg;
1333 
1336  p_thread->p_last_xstream = NULL;
1337  p_thread->p_parent = NULL;
1338 
1339  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1340  if (p_thread->p_pool != p_pool) {
1341  /* Free the unit for the old pool */
1342  p_thread->p_pool->u_free(&p_thread->unit);
1343 
1344  /* Set the new pool */
1345  p_thread->p_pool = p_pool;
1346 
1347  /* Create a wrapper unit */
1348  if (p_ythread) {
1349  ABT_thread h_thread = ABTI_ythread_get_handle(p_ythread);
1350  p_thread->unit = p_pool->u_create_from_thread(h_thread);
1351  } else {
1352  ABT_task task = ABTI_thread_get_handle(p_thread);
1353  p_thread->unit = p_pool->u_create_from_task(task);
1354  }
1355  }
1356 
1357  if (p_ythread) {
1358  /* Create a ULT context */
1359  size_t stacksize = p_ythread->stacksize;
1360  ABTD_ythread_context_create(NULL, stacksize, p_ythread->p_stack,
1361  &p_ythread->ctx);
1362  }
1363 
1364  /* Invoke a thread revive event. */
1365  ABTI_tool_event_thread_revive(p_local, p_thread,
1367  ? ABTI_local_get_xstream(p_local)
1368  ->p_thread
1369  : NULL,
1370  p_pool);
1371 
1372  LOG_DEBUG("[U%" PRIu64 "] revived\n", ABTI_thread_get_id(p_thread));
1373 
1374  /* Add this thread to the pool */
1375  ABTI_pool_push(p_pool, p_thread->unit);
1376 }
1377 
1379  ABTI_xstream *p_xstream,
1380  ABTI_ythread **p_ythread)
1381 {
1382  ABTI_thread_attr attr;
1383  ABTI_pool *p_pool;
1384 
1385  /* Get the first pool of ES */
1386  p_pool = ABTI_pool_get_ptr(p_xstream->p_main_sched->pools[0]);
1387 
1388  /* Allocate a ULT object */
1389 
1390  /* TODO: Need to set the actual stack address and size for the main ULT */
1392  ABT_FALSE);
1393 
1394  /* Although this main ULT is running now, we add this main ULT to the pool
1395  * so that the scheduler can schedule the main ULT when the main ULT is
1396  * context switched to the scheduler for the first time. */
1397  ABT_bool push_pool = ABT_TRUE;
1398  int abt_errno =
1399  ythread_create(p_local, p_pool, NULL, NULL, &attr,
1401  push_pool, p_ythread);
1402  ABTI_CHECK_ERROR(abt_errno);
1403  return ABT_SUCCESS;
1404 }
1405 
1407  ABTI_xstream *p_xstream,
1408  ABTI_ythread **pp_root_ythread)
1409 {
1410  ABTI_thread_attr attr;
1411  /* Create a ULT context */
1412  if (p_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
1413  /* Create a thread with its stack */
1416  ABT_FALSE);
1417  } else {
1418  /* For secondary ESs, the stack of an OS thread is used. */
1420  ABT_FALSE);
1421  }
1422  ABTI_ythread *p_root_ythread;
1423  int abt_errno =
1424  ythread_create(p_local, NULL, thread_root_func, NULL, &attr,
1426  ABT_FALSE, &p_root_ythread);
1427  ABTI_CHECK_ERROR(abt_errno);
1428  *pp_root_ythread = p_root_ythread;
1429  return ABT_SUCCESS;
1430 }
1431 
1433  ABTI_xstream *p_xstream,
1434  ABTI_sched *p_sched)
1435 {
1436  ABTI_thread_attr attr;
1437 
1438  /* Allocate a ULT object and its stack */
1441  int abt_errno =
1442  ythread_create(p_local, p_xstream->p_root_pool, thread_main_sched_func,
1443  NULL, &attr,
1446  p_sched, ABT_TRUE, &p_sched->p_ythread);
1447  ABTI_CHECK_ERROR(abt_errno);
1448  return ABT_SUCCESS;
1449 }
1450 
1451 /* This routine is to create a ULT for the scheduler. */
1453  ABTI_pool *p_pool,
1454  ABTI_sched *p_sched)
1455 {
1456  ABTI_thread_attr attr;
1457 
1458  /* Allocate a ULT object and its stack */
1461  int abt_errno =
1462  ythread_create(p_local, p_pool, (void (*)(void *))p_sched->run,
1463  (void *)ABTI_sched_get_handle(p_sched), &attr,
1465  &p_sched->p_ythread);
1466  ABTI_CHECK_ERROR(abt_errno);
1467  return ABT_SUCCESS;
1468 }
1469 
1470 void ABTI_thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
1471 {
1472  thread_join(pp_local, p_thread);
1473 }
1474 
1475 void ABTI_thread_free(ABTI_local *p_local, ABTI_thread *p_thread)
1476 {
1477  LOG_DEBUG("[U%" PRIu64 ":E%d] freed\n", ABTI_thread_get_id(p_thread),
1479  ? ABTI_local_get_xstream(p_local)->rank
1480  : -1);
1481  thread_free(p_local, p_thread, ABT_TRUE);
1482 }
1483 
1485 {
1486  ABTI_thread *p_thread = &p_ythread->thread;
1487  LOG_DEBUG("[U%" PRIu64 ":E%d] main ULT freed\n",
1488  ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
1489  thread_free(p_local, p_thread, ABT_FALSE);
1490 }
1491 
1493 {
1494  thread_free(p_local, &p_ythread->thread, ABT_FALSE);
1495 }
1496 
1498  ABTI_ythread *p_ythread)
1499 {
1500  /* Set the exit request */
1502 
1503  /* Terminate this ULT */
1504  ABTD_ythread_exit(p_local_xstream, p_ythread);
1505  ABTU_unreachable();
1506 }
1507 
1509  ABTI_thread *p_thread,
1510  ABTI_thread_mig_data **pp_mig_data)
1511 {
1512  ABTI_thread_mig_data *p_mig_data =
1514  &g_thread_mig_data_key);
1515  if (!p_mig_data) {
1516  int abt_errno;
1517  abt_errno =
1518  ABTU_calloc(1, sizeof(ABTI_thread_mig_data), (void **)&p_mig_data);
1519  ABTI_CHECK_ERROR(abt_errno);
1520  abt_errno = ABTI_ktable_set(p_local, &p_thread->p_keytable,
1521  &g_thread_mig_data_key, (void *)p_mig_data);
1522  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
1523  /* Failed to add p_mig_data to p_thread's keytable. */
1524  ABTU_free(p_mig_data);
1525  return abt_errno;
1526  }
1527  }
1528  *pp_mig_data = p_mig_data;
1529  return ABT_SUCCESS;
1530 }
1531 
1532 void ABTI_thread_print(ABTI_thread *p_thread, FILE *p_os, int indent)
1533 {
1534  if (p_thread == NULL) {
1535  fprintf(p_os, "%*s== NULL thread ==\n", indent, "");
1536  } else {
1537  ABTI_xstream *p_xstream = p_thread->p_last_xstream;
1538  int xstream_rank = p_xstream ? p_xstream->rank : 0;
1539  char *type, *yieldable, *state;
1540 
1541  if (p_thread->type & ABTI_THREAD_TYPE_MAIN) {
1542  type = "MAIN";
1543  } else if (p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED) {
1544  type = "MAIN_SCHED";
1545  } else {
1546  type = "USER";
1547  }
1548  if (p_thread->type & ABTI_THREAD_TYPE_YIELDABLE) {
1549  yieldable = "yes";
1550  } else {
1551  yieldable = "no";
1552  }
1553  switch (ABTD_atomic_acquire_load_int(&p_thread->state)) {
1555  state = "READY";
1556  break;
1558  state = "RUNNING";
1559  break;
1561  state = "BLOCKED";
1562  break;
1564  state = "TERMINATED";
1565  break;
1566  default:
1567  state = "UNKNOWN";
1568  break;
1569  }
1570 
1571  fprintf(p_os,
1572  "%*s== Thread (%p) ==\n"
1573  "%*sid : %" PRIu64 "\n"
1574  "%*stype : %s\n"
1575  "%*syieldable : %s\n"
1576  "%*sstate : %s\n"
1577  "%*slast_ES : %p (%d)\n"
1578  "%*sp_arg : %p\n"
1579  "%*spool : %p\n"
1580  "%*srequest : 0x%x\n"
1581  "%*skeytable : %p\n",
1582  indent, "", (void *)p_thread, indent, "",
1583  ABTI_thread_get_id(p_thread), indent, "", type, indent, "",
1584  yieldable, indent, "", state, indent, "", (void *)p_xstream,
1585  xstream_rank, indent, "", p_thread->p_arg, indent, "",
1586  (void *)p_thread->p_pool, indent, "",
1587  ABTD_atomic_acquire_load_uint32(&p_thread->request), indent, "",
1589  }
1590  fflush(p_os);
1591 }
1592 
1596 {
1597  ABTD_atomic_release_store_uint64(&g_thread_id, 0);
1598 }
1599 
1601 {
1602  if (p_thread == NULL)
1603  return ABTI_THREAD_INIT_ID;
1604 
1605  if (p_thread->id == ABTI_THREAD_INIT_ID) {
1606  p_thread->id = thread_get_new_id();
1607  }
1608  return p_thread->id;
1609 }
1610 
1611 /*****************************************************************************/
1612 /* Internal static functions */
1613 /*****************************************************************************/
1614 
1615 ABTU_ret_err static inline int
1617  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
1618  ABTI_thread_type thread_type, ABTI_sched *p_sched,
1619  ABT_bool push_pool, ABTI_ythread **pp_newthread)
1620 {
1621  int abt_errno;
1622  ABTI_ythread *p_newthread;
1623  ABT_thread h_newthread;
1624  ABTI_ktable *p_keytable = NULL;
1625 
1626  /* Allocate a ULT object and its stack, then create a thread context. */
1627  if (!p_attr) {
1628  abt_errno = ABTI_mem_alloc_ythread_default(p_local, &p_newthread);
1629  ABTI_CHECK_ERROR(abt_errno);
1630 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1631  thread_type |= ABTI_THREAD_TYPE_MIGRATABLE;
1632 #endif
1633  } else {
1634  ABTI_thread_type attr_type = p_attr->thread_type;
1635  if (attr_type & ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC_STACK) {
1636 #ifdef ABT_CONFIG_USE_MEM_POOL
1637  abt_errno =
1638  ABTI_mem_alloc_ythread_mempool_desc_stack(p_local, p_attr,
1639  &p_newthread);
1640  ABTI_CHECK_ERROR(abt_errno);
1641 #else
1642  abt_errno =
1643  ABTI_mem_alloc_ythread_malloc_desc_stack(p_attr, &p_newthread);
1644 #endif
1645  ABTI_CHECK_ERROR(abt_errno);
1646  } else if (attr_type & ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK) {
1647  abt_errno =
1648  ABTI_mem_alloc_ythread_malloc_desc_stack(p_attr, &p_newthread);
1649  ABTI_CHECK_ERROR(abt_errno);
1650  } else {
1653  /* Let's try to use mempool first since it performs better. */
1654  abt_errno = ABTI_mem_alloc_ythread_mempool_desc(p_local, p_attr,
1655  &p_newthread);
1656  ABTI_CHECK_ERROR(abt_errno);
1657  }
1658 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1659  thread_type |= p_attr->migratable ? ABTI_THREAD_TYPE_MIGRATABLE : 0;
1660  if (ABTU_unlikely(p_attr->f_cb)) {
1661  ABTI_thread_mig_data *p_mig_data;
1662  abt_errno = ABTU_calloc(1, sizeof(ABTI_thread_mig_data),
1663  (void **)&p_mig_data);
1665  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1666  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1667  return abt_errno;
1668  }
1669  p_mig_data->f_migration_cb = p_attr->f_cb;
1670  p_mig_data->p_migration_cb_arg = p_attr->p_cb_arg;
1671  abt_errno = ABTI_ktable_set_unsafe(p_local, &p_keytable,
1672  &g_thread_mig_data_key,
1673  (void *)p_mig_data);
1675  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1676  if (p_keytable)
1677  ABTI_ktable_free(p_local, p_keytable);
1678  ABTU_free(p_mig_data);
1679  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1680  return abt_errno;
1681  }
1682  }
1683 #endif
1684  }
1685 
1686  if (thread_type & (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_ROOT)) {
1687  if (p_newthread->p_stack == NULL) {
1688  /* We don't need to initialize the context if a thread will run on
1689  * OS-level threads. Invalidate the context here. */
1690  ABTD_ythread_context_invalidate(&p_newthread->ctx);
1691  } else {
1692  /* Create the context. This thread is special, so dynamic promotion
1693  * is not supported. */
1694  size_t stack_size = p_newthread->stacksize;
1695  void *p_stack = p_newthread->p_stack;
1696  ABTD_ythread_context_create(NULL, stack_size, p_stack,
1697  &p_newthread->ctx);
1698  }
1699  } else {
1700 #if ABT_CONFIG_THREAD_TYPE != ABT_THREAD_TYPE_DYNAMIC_PROMOTION
1701  size_t stack_size = p_newthread->stacksize;
1702  void *p_stack = p_newthread->p_stack;
1703  ABTD_ythread_context_create(NULL, stack_size, p_stack,
1704  &p_newthread->ctx);
1705 #else
1706  /* The context is not fully created now. */
1707  ABTD_ythread_context_init(NULL, &p_newthread->ctx);
1708 #endif
1709  }
1710  p_newthread->thread.f_thread = thread_func;
1711  p_newthread->thread.p_arg = arg;
1712 
1716  p_newthread->thread.p_last_xstream = NULL;
1717  p_newthread->thread.p_parent = NULL;
1718  p_newthread->thread.p_pool = p_pool;
1719  p_newthread->thread.type |= thread_type;
1720  p_newthread->thread.id = ABTI_THREAD_INIT_ID;
1721  if (p_sched && !(thread_type &
1723  /* Set a destructor for p_sched. */
1724  abt_errno = ABTI_ktable_set_unsafe(p_local, &p_keytable,
1725  &g_thread_sched_key, p_sched);
1727  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1728  if (p_keytable)
1729  ABTI_ktable_free(p_local, p_keytable);
1730  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1731  return abt_errno;
1732  }
1733  }
1734  ABTD_atomic_relaxed_store_ptr(&p_newthread->thread.p_keytable, p_keytable);
1735 
1736 #ifdef ABT_CONFIG_USE_DEBUG_LOG
1737  ABT_unit_id thread_id = ABTI_thread_get_id(&p_newthread->thread);
1738  if (thread_type & ABTI_THREAD_TYPE_MAIN) {
1739  LOG_DEBUG("[U%" PRIu64 "] main ULT created\n", thread_id);
1740  } else if (thread_type & ABTI_THREAD_TYPE_MAIN_SCHED) {
1741  LOG_DEBUG("[U%" PRIu64 "] main sched ULT created\n", thread_id);
1742  } else {
1743  LOG_DEBUG("[U%" PRIu64 "] created\n", thread_id);
1744  }
1745 #endif
1746 
1747  /* Invoke a thread creation event. */
1748  ABTI_tool_event_thread_create(p_local, &p_newthread->thread,
1750  ? ABTI_local_get_xstream(p_local)
1751  ->p_thread
1752  : NULL,
1753  push_pool ? p_pool : NULL);
1754 
1755  /* Create a wrapper unit */
1756  h_newthread = ABTI_ythread_get_handle(p_newthread);
1757  if (push_pool) {
1758  p_newthread->thread.unit = p_pool->u_create_from_thread(h_newthread);
1759  /* Add this thread to the pool */
1760  ABTI_pool_push(p_pool, p_newthread->thread.unit);
1761  } else {
1762  p_newthread->thread.unit = ABT_UNIT_NULL;
1763  }
1764 
1765  /* Return value */
1766  *pp_newthread = p_newthread;
1767  return ABT_SUCCESS;
1768 }
1769 
1770 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1772  ABTI_thread *p_thread,
1773  ABTI_pool *p_pool)
1774 {
1775  /* checking for cases when migration is not allowed */
1776  ABTI_CHECK_TRUE(!(p_thread->type &
1782 
1783  /* checking for migration to the same pool */
1784  ABTI_CHECK_TRUE(p_thread->p_pool != p_pool, ABT_ERR_MIGRATION_TARGET);
1785 
1786  /* adding request to the thread. p_migration_pool must be updated before
1787  * setting the request since the target thread would read p_migration_pool
1788  * after ABTI_THREAD_REQ_MIGRATE. The update must be "atomic" (but does not
1789  * require acq-rel) since two threads can update the pointer value
1790  * simultaneously. */
1791  ABTI_thread_mig_data *p_mig_data;
1792  int abt_errno = ABTI_thread_get_mig_data(*pp_local, p_thread, &p_mig_data);
1793  ABTI_CHECK_ERROR(abt_errno);
1795  (void *)p_pool);
1796 
1798 
1799  /* yielding if it is the same thread */
1800  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
1801  if ((!ABTI_IS_EXT_THREAD_ENABLED || p_local_xstream) &&
1802  p_thread == p_local_xstream->p_thread) {
1803  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1804  if (p_ythread) {
1805  ABTI_ythread_yield(&p_local_xstream, p_ythread,
1807  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1808  }
1809  }
1810  return ABT_SUCCESS;
1811 }
1812 #endif
1813 
1814 static inline void thread_free(ABTI_local *p_local, ABTI_thread *p_thread,
1815  ABT_bool free_unit)
1816 {
1817  /* Invoke a thread freeing event. */
1818  ABTI_tool_event_thread_free(p_local, p_thread,
1820  ? ABTI_local_get_xstream(p_local)->p_thread
1821  : NULL);
1822 
1823  /* Free the unit */
1824  if (free_unit) {
1825  p_thread->p_pool->u_free(&p_thread->unit);
1826  }
1827 
1828  /* Free the key-value table */
1829  ABTI_ktable *p_ktable = ABTD_atomic_acquire_load_ptr(&p_thread->p_keytable);
1830  /* No parallel access to TLS is allowed. */
1831  ABTI_ASSERT(p_ktable != ABTI_KTABLE_LOCKED);
1832  if (p_ktable) {
1833  ABTI_ktable_free(p_local, p_ktable);
1834  }
1835 
1836  /* Free ABTI_thread (stack will also be freed) */
1837  ABTI_mem_free_thread(p_local, p_thread);
1838 }
1839 
1840 static void thread_key_destructor_stackable_sched(void *p_value)
1841 {
1842  /* This destructor should be called in ABTI_ythread_free(), so it should not
1843  * free the thread again. */
1844  ABTI_sched *p_sched = (ABTI_sched *)p_value;
1845  p_sched->used = ABTI_SCHED_NOT_USED;
1846  if (p_sched->automatic == ABT_TRUE) {
1847  p_sched->p_ythread = NULL;
1849  }
1850 }
1851 
1852 static void thread_key_destructor_migration(void *p_value)
1853 {
1854  ABTI_thread_mig_data *p_mig_data = (ABTI_thread_mig_data *)p_value;
1855  ABTU_free(p_mig_data);
1856 }
1857 
1858 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
1859 {
1860  if (ABTD_atomic_acquire_load_int(&p_thread->state) ==
1862  goto fn_exit;
1863  }
1864  /* The main ULT cannot be joined. */
1865  ABTI_ASSERT(!(p_thread->type & ABTI_THREAD_TYPE_MAIN));
1866 
1867  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
1868  if (ABTI_IS_EXT_THREAD_ENABLED && !p_local_xstream)
1869  goto busywait_based;
1870 
1871  ABTI_thread *p_self_thread = p_local_xstream->p_thread;
1872 
1873  ABTI_ythread *p_self = ABTI_thread_get_ythread_or_null(p_self_thread);
1874  if (!p_self)
1875  goto busywait_based;
1876 
1877  /* The target ULT should be different. */
1878  ABTI_ASSERT(p_thread != p_self_thread);
1879 
1880  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1881  if (!p_ythread)
1882  goto yield_based_task;
1883 
1884  ABT_pool_access access = p_self->thread.p_pool->access;
1885 
1886  if ((p_self->thread.p_pool == p_ythread->thread.p_pool) &&
1887  (access == ABT_POOL_ACCESS_PRIV || access == ABT_POOL_ACCESS_MPSC ||
1888  access == ABT_POOL_ACCESS_SPSC) &&
1889  (ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
1891 
1892  ABTI_xstream *p_xstream = p_self->thread.p_last_xstream;
1893 
1894  /* If other ES is calling ABTI_ythread_set_ready(), p_ythread may not
1895  * have been added to the pool yet because ABTI_ythread_set_ready()
1896  * changes the state first followed by pushing p_ythread to the pool.
1897  * Therefore, we have to check whether p_ythread is in the pool, and if
1898  * not, we need to wait until it is added. */
1899  while (p_ythread->thread.p_pool->u_is_in_pool(p_ythread->thread.unit) !=
1900  ABT_TRUE) {
1901  }
1902 
1903  /* This is corresponding to suspension. */
1904  ABTI_tool_event_ythread_suspend(p_local_xstream, p_self,
1905  p_self->thread.p_parent,
1907  (void *)p_ythread);
1908 
1909  /* Increase the number of blocked units. Be sure to execute
1910  * ABTI_pool_inc_num_blocked before ABTI_POOL_REMOVE in order not to
1911  * underestimate the number of units in a pool. */
1913  /* Remove the target ULT from the pool */
1914  int abt_errno =
1915  ABTI_pool_remove(p_ythread->thread.p_pool, p_ythread->thread.unit);
1916  /* This failure is fatal. */
1917  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
1918 
1919  /* Set the link in the context for the target ULT. Since p_link will be
1920  * referenced by p_self, this update does not require release store. */
1922  &p_self->ctx);
1923  /* Set the last ES */
1924  p_ythread->thread.p_last_xstream = p_xstream;
1927 
1928  /* Make the current ULT BLOCKED */
1931 
1932  LOG_DEBUG("[U%" PRIu64 ":E%d] blocked to join U%" PRIu64 "\n",
1933  ABTI_thread_get_id(&p_self->thread),
1934  p_self->thread.p_last_xstream->rank,
1935  ABTI_thread_get_id(&p_ythread->thread));
1936  LOG_DEBUG("[U%" PRIu64 ":E%d] start running\n",
1937  ABTI_thread_get_id(&p_ythread->thread),
1938  p_ythread->thread.p_last_xstream->rank);
1939 
1940  /* Switch the context */
1941  ABTI_ythread *p_prev =
1942  ABTI_ythread_context_switch_to_sibling(&p_local_xstream, p_self,
1943  p_ythread);
1944  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1945  ABTI_tool_event_thread_run(p_local_xstream, &p_self->thread,
1946  &p_prev->thread, p_self->thread.p_parent);
1947 
1948  } else if ((p_self->thread.p_pool != p_ythread->thread.p_pool) &&
1949  (access == ABT_POOL_ACCESS_PRIV ||
1950  access == ABT_POOL_ACCESS_SPSC)) {
1951  /* FIXME: once we change the suspend/resume mechanism (i.e., asking the
1952  * scheduler to wake up the blocked ULT), we will be able to handle all
1953  * access modes. */
1954  goto yield_based;
1955 
1956  } else {
1957  /* Tell p_ythread that there has been a join request. */
1958  /* If request already has ABTI_THREAD_REQ_JOIN, p_ythread is
1959  * terminating. We can't block p_self in this case. */
1960  uint32_t req = ABTD_atomic_fetch_or_uint32(&p_ythread->thread.request,
1962  if (req & ABTI_THREAD_REQ_JOIN)
1963  goto yield_based;
1964 
1965  ABTI_ythread_set_blocked(p_self);
1966  LOG_DEBUG("[U%" PRIu64 ":E%d] blocked to join U%" PRIu64 "\n",
1967  ABTI_thread_get_id(&p_self->thread),
1968  p_self->thread.p_last_xstream->rank,
1969  ABTI_thread_get_id(&p_ythread->thread));
1970 
1971  /* Set the link in the context of the target ULT. This p_link might be
1972  * read by p_ythread running on another ES in parallel, so release-store
1973  * is needed here. */
1975  &p_self->ctx);
1976 
1977  /* Suspend the current ULT */
1978  ABTI_ythread_suspend(&p_local_xstream, p_self,
1980  (void *)p_ythread);
1981  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1982  }
1983 
1984  /* Resume */
1985  /* If p_self's state is BLOCKED, the target ULT has terminated on the same
1986  * ES as p_self's ES and the control has come from the target ULT.
1987  * Otherwise, the target ULT had been migrated to a different ES, p_self
1988  * has been resumed by p_self's scheduler. In the latter case, we don't
1989  * need to change p_self's state. */
1990  if (ABTD_atomic_relaxed_load_int(&p_self->thread.state) ==
1995  LOG_DEBUG("[U%" PRIu64 ":E%d] resume after join\n",
1996  ABTI_thread_get_id(&p_self->thread),
1997  p_self->thread.p_last_xstream->rank);
1998  goto fn_exit;
1999  }
2000 
2001 yield_based:
2002  while (ABTD_atomic_acquire_load_int(&p_ythread->thread.state) !=
2004  ABTI_ythread_yield(&p_local_xstream, p_self,
2005  ABT_SYNC_EVENT_TYPE_THREAD_JOIN, (void *)p_ythread);
2006  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2007  }
2008  goto fn_exit;
2009 
2010 yield_based_task:
2011  while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
2013  ABTI_ythread_yield(&p_local_xstream, p_self,
2014  ABT_SYNC_EVENT_TYPE_TASK_JOIN, (void *)p_thread);
2015  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2016  }
2017  goto fn_exit;
2018 
2019 busywait_based:
2020  while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
2023  }
2024 
2025 fn_exit:
2026  ABTI_tool_event_thread_join(*pp_local, p_thread,
2028  ? ABTI_local_get_xstream(*pp_local)
2029  ->p_thread
2030  : NULL);
2031 }
2032 
2033 static void thread_root_func(void *arg)
2034 {
2035  /* root thread is working on a special context, so it should not rely on
2036  * functionality that needs yield. */
2037  ABTI_local *p_local = ABTI_local_get_local();
2038  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
2039  ABTI_ASSERT(ABTD_atomic_relaxed_load_int(&p_local_xstream->state) ==
2041 
2042  ABTI_ythread *p_root_ythread = p_local_xstream->p_root_ythread;
2043  p_local_xstream->p_thread = &p_root_ythread->thread;
2044  ABTI_pool *p_root_pool = p_local_xstream->p_root_pool;
2045 
2046  do {
2047  ABT_unit unit = ABTI_pool_pop(p_root_pool);
2048  if (unit != ABT_UNIT_NULL) {
2049  ABTI_xstream *p_xstream = p_local_xstream;
2050  ABTI_xstream_run_unit(&p_xstream, unit, p_root_pool);
2051  /* The root thread must be executed on the same execution stream. */
2052  ABTI_ASSERT(p_xstream == p_local_xstream);
2053  }
2055  &p_local_xstream->p_main_sched->p_ythread->thread.state) !=
2057  /* The main scheduler thread finishes. */
2058 
2059  /* Set the ES's state as TERMINATED */
2060  ABTD_atomic_release_store_int(&p_local_xstream->state,
2062 
2063  if (p_local_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
2064  /* Let us jump back to the main thread (then finalize Argobots) */
2065  ABTD_ythread_finish_context(&p_root_ythread->ctx,
2067  }
2068 }
2069 
2070 static void thread_main_sched_func(void *arg)
2071 {
2072  ABTI_local *p_local = ABTI_local_get_local();
2073  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
2074 
2075  while (1) {
2076  /* Execute the run function of scheduler */
2077  ABTI_sched *p_sched = p_local_xstream->p_main_sched;
2078  ABTI_ASSERT(p_local_xstream->p_thread == &p_sched->p_ythread->thread);
2079 
2080  LOG_DEBUG("[S%" PRIu64 "] start\n", p_sched->id);
2081  p_sched->run(ABTI_sched_get_handle(p_sched));
2082  /* From here the main scheduler can have been already replaced. */
2083  /* The main scheduler must be executed on the same execution stream. */
2085  LOG_DEBUG("[S%" PRIu64 "] end\n", p_sched->id);
2086 
2087  p_sched = p_local_xstream->p_main_sched;
2088  uint32_t request = ABTD_atomic_acquire_load_uint32(
2089  &p_sched->p_ythread->thread.request);
2090 
2091  /* If there is an exit or a cancel request, the ES terminates
2092  * regardless of remaining work units. */
2094  break;
2095 
2096  /* When join is requested, the ES terminates after finishing
2097  * execution of all work units. */
2098  if ((ABTD_atomic_relaxed_load_uint32(&p_sched->request) &
2100  ABTI_sched_get_effective_size(p_local, p_sched) == 0) {
2101  break;
2102  }
2103  }
2104  /* Finish this thread and goes back to the root thread. */
2105 }
2106 
2107 #ifndef ABT_CONFIG_DISABLE_MIGRATION
2109  ABTI_thread *p_thread,
2110  ABTI_xstream *p_xstream)
2111 {
2112  /* checking for cases when migration is not allowed */
2116  ABTI_CHECK_TRUE(!(p_thread->type &
2122 
2123  /* We need to find the target scheduler */
2124  /* We check the state of the ES */
2128  /* The migration target should be the main scheduler since it is
2129  * hard to guarantee the lifetime of the stackable scheduler. */
2130  ABTI_sched *p_sched = p_xstream->p_main_sched;
2131 
2132  /* We check the state of the sched */
2133  /* Find a pool */
2134  ABTI_pool *p_pool = NULL;
2135  int abt_errno;
2136  abt_errno =
2137  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
2138  ABTI_CHECK_ERROR(abt_errno);
2139  /* We set the migration counter to prevent the scheduler from
2140  * stopping */
2142 
2143  abt_errno = thread_migrate_to_pool(pp_local, p_thread, p_pool);
2144  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
2146  return abt_errno;
2147  }
2148  return ABT_SUCCESS;
2149 }
2150 #endif
2151 
2152 static inline ABT_unit_id thread_get_new_id(void)
2153 {
2154  return (ABT_unit_id)ABTD_atomic_fetch_add_uint64(&g_thread_id, 1);
2155 }
static void ABTI_pool_inc_num_blocked(ABTI_pool *p_pool)
Definition: abti_pool.h:42
static void ABTI_thread_attr_init(ABTI_thread_attr *p_attr, void *p_stack, size_t stacksize, ABTI_thread_type thread_type, ABT_bool migratable)
int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value) ABT_API_PUBLIC
Set the ULT-specific value associated with the key.
Definition: thread.c:1225
static ABTI_ythread * ABTI_ythread_get_ptr(ABT_thread thread)
Definition: abti_ythread.h:11
static void * ABTI_ktable_get(ABTD_atomic_ptr *pp_ktable, ABTI_key *p_key)
Definition: abti_key.h:283
struct ABT_thread_attr_opaque * ABT_thread_attr
Definition: abt.h:345
ABTD_ythread_context_atomic_ptr p_link
Definition: abtd_context.h:51
int ABT_thread_exit(void) ABT_API_PUBLIC
The calling ULT terminates its execution.
Definition: thread.c:409
ABTI_pool * p_pool
Definition: abti.h:324
static ABTI_ythread * ABTI_thread_get_ythread_or_null(ABTI_thread *p_thread)
Definition: abti_thread.h:59
int ABT_thread_get_last_pool_id(ABT_thread thread, int *id) ABT_API_PUBLIC
Get the last pool&#39;s ID of the ULT.
Definition: thread.c:577
struct ABT_unit_opaque * ABT_unit
Definition: abt.h:337
static void ABTD_ythread_context_init(ABTD_ythread_context *p_link, ABTD_ythread_context *p_newctx)
Definition: abtd_ythread.h:47
ABTD_atomic_uint32 request
Definition: abti.h:323
#define ABTI_CHECK_NULL_SCHED_PTR(p)
Definition: abti_error.h:184
#define ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC_STACK
Definition: abti.h:83
static ABTU_ret_err int ABTI_mem_alloc_ythread_mempool_desc(ABTI_local *p_local, ABTI_thread_attr *p_attr, ABTI_ythread **pp_ythread)
Definition: abti_mem.h:221
void(* f_migration_cb)(ABT_thread, void *)
Definition: abti.h:341
static ABT_pool ABTI_pool_get_handle(ABTI_pool *p_pool)
Definition: abti_pool.h:26
int ABT_thread_get_state(ABT_thread thread, ABT_thread_state *state) ABT_API_PUBLIC
Return the state of thread.
Definition: thread.c:533
static ABT_xstream ABTI_xstream_get_handle(ABTI_xstream *p_xstream)
Definition: abti_stream.h:26
uint64_t ABT_unit_id
Definition: abt.h:341
static ABT_unit ABTI_pool_pop(ABTI_pool *p_pool)
Definition: abti_pool.h:108
static void * ABTD_atomic_acquire_load_ptr(const ABTD_atomic_ptr *ptr)
Definition: abtd_atomic.h:848
ABT_unit_create_from_thread_fn u_create_from_thread
Definition: abti.h:296
ABTI_ythread * p_ythread
Definition: abti.h:267
ABT_unit_id id
Definition: abti.h:326
ABTD_atomic_int state
Definition: abti.h:241
static void ABTD_atomic_release_store_uint64(ABTD_atomic_uint64 *ptr, uint64_t val)
Definition: abtd_atomic.h:971
struct ABT_xstream_opaque * ABT_xstream
Definition: abt.h:313
#define ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(val)
Definition: abtd_atomic.h:59
struct ABT_sched_opaque * ABT_sched
Definition: abt.h:319
static void ABTD_atomic_relaxed_store_ythread_context_ptr(ABTD_ythread_context_atomic_ptr *ptr, ABTD_ythread_context *p_ctx)
Definition: abtd_context.h:36
int ABT_thread_join_many(int num_threads, ABT_thread *thread_list) ABT_API_PUBLIC
Wait for a number of ULTs to terminate.
Definition: thread.c:386
ABT_pool * pools
Definition: abti.h:265
static void thread_key_destructor_stackable_sched(void *p_value)
Definition: thread.c:1840
static ABT_sched ABTI_sched_get_handle(ABTI_sched *p_sched)
Definition: abti_sched.h:26
#define ABTU_unreachable()
Definition: abtu.h:25
static void ABTD_atomic_relaxed_store_ptr(ABTD_atomic_ptr *ptr, void *val)
Definition: abtd_atomic.h:914
int ABT_thread_cancel(ABT_thread thread) ABT_API_PUBLIC
Request the cancellation of the target thread.
Definition: thread.c:427
struct ABTI_local ABTI_local
Definition: abti.h:101
#define ABTU_unlikely(cond)
Definition: abtu.h:18
ABTU_ret_err int ABTI_ythread_create_root(ABTI_local *p_local, ABTI_xstream *p_xstream, ABTI_ythread **pp_root_ythread)
Definition: thread.c:1406
static ABTU_ret_err int thread_migrate_to_xstream(ABTI_local **pp_local, ABTI_thread *p_thread, ABTI_xstream *p_xstream)
Definition: thread.c:2108
void * p_migration_cb_arg
Definition: abti.h:342
static ABTU_ret_err int ABTI_mem_alloc_ythread_malloc_desc_stack(ABTI_thread_attr *p_attr, ABTI_ythread **pp_ythread)
Definition: abti_mem.h:201
#define ABTI_tool_event_thread_create(p_local, p_thread, p_caller, p_pool)
Definition: abti_tool.h:309
static void ABTI_mem_free_thread(ABTI_local *p_local, ABTI_thread *p_thread)
Definition: abti_mem.h:246
void * p_cb_arg
Definition: abti.h:336
static ABTI_key g_thread_mig_data_key
Definition: thread.c:33
static ABTD_atomic_uint64 g_thread_id
Definition: thread.c:1593
static void ABTD_atomic_release_store_int(ABTD_atomic_int *ptr, int val)
Definition: abtd_atomic.h:924
void(* f_thread)(void *)
Definition: abti.h:320
ABTI_thread_type type
Definition: abti.h:316
#define ABTI_CHECK_NULL_YTHREAD_PTR(p)
Definition: abti_error.h:202
static ABTI_pool * ABTI_xstream_get_main_pool(ABTI_xstream *p_xstream)
Definition: abti_stream.h:42
static ABTI_thread * ABTI_thread_get_ptr(ABT_thread thread)
Definition: abti_thread.h:9
#define ABT_UNIT_NULL
Definition: abt.h:415
struct ABT_thread_opaque * ABT_task
Definition: abt.h:353
#define ABTI_KEY_STATIC_INITIALIZER(f_destructor, id)
Definition: abti_key.h:42
ABTU_ret_err int ABTI_ythread_create_sched(ABTI_local *p_local, ABTI_pool *p_pool, ABTI_sched *p_sched)
Definition: thread.c:1452
#define ABT_ERR_INV_THREAD
Definition: abt.h:80
int ABT_thread_free(ABT_thread *thread) ABT_API_PUBLIC
Release the thread object associated with thread handle.
Definition: thread.c:284
int ABT_thread_get_attr(ABT_thread thread, ABT_thread_attr *attr) ABT_API_PUBLIC
Get attributes of the target ULT.
Definition: thread.c:1285
size_t sched_stacksize
Definition: abti.h:183
ABTI_thread * p_thread
Definition: abti.h:251
size_t ABTI_sched_get_effective_size(ABTI_local *p_local, ABTI_sched *p_sched)
Definition: sched.c:604
void ABTI_thread_reset_id(void)
Definition: thread.c:1595
#define ABTI_THREAD_TYPE_NAMED
Definition: abti.h:78
#define ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(pp_local_xstream, pp_ythread)
Definition: abti_error.h:115
static ABTI_key * ABTI_key_get_ptr(ABT_key key)
Definition: abti_key.h:11
int ABT_thread_resume(ABT_thread thread) ABT_API_PUBLIC
Resume the target ULT.
Definition: thread.c:750
static void ABTI_thread_set_request(ABTI_thread *p_thread, uint32_t req)
Definition: abti_thread.h:68
void ABTD_ythread_exit(ABTI_xstream *p_local_xstream, ABTI_ythread *p_ythread)
Definition: abtd_ythread.c:30
static void ABTD_atomic_release_store_uint32(ABTD_atomic_uint32 *ptr, uint32_t val)
Definition: abtd_atomic.h:947
static void thread_root_func(void *arg)
Definition: thread.c:2033
#define ABTI_THREAD_REQ_CANCEL
Definition: abti.h:43
int ABT_bool
Definition: abt.h:373
static void thread_key_destructor_migration(void *p_value)
Definition: thread.c:1852
Definition: abti.h:354
void ABTI_ktable_free(ABTI_local *p_local, ABTI_ktable *p_ktable)
Definition: key.c:139
ABTI_xstream_type type
Definition: abti.h:240
#define ABTI_tool_event_ythread_suspend(p_local_xstream, p_ythread, p_parent,sync_event_type, p_sync)
Definition: abti_tool.h:373
void ABTI_ythread_free_main(ABTI_local *p_local, ABTI_ythread *p_ythread)
Definition: thread.c:1484
int ABT_thread_migrate(ABT_thread thread) ABT_API_PUBLIC
Request migration of the thread to an any available ES.
Definition: thread.c:923
#define ABTI_KEY_ID_MIGRATION
Definition: abti_key.h:48
static void thread_free(ABTI_local *p_local, ABTI_thread *p_thread, ABT_bool free_unit)
Definition: thread.c:1814
ABTU_ret_err int ABTI_ythread_create_main(ABTI_local *p_local, ABTI_xstream *p_xstream, ABTI_ythread **p_ythread)
Definition: thread.c:1378
int ABT_thread_migrate_to_xstream(ABT_thread thread, ABT_xstream xstream) ABT_API_PUBLIC
Migrate a thread to a specific ES.
Definition: thread.c:791
ABTI_ythread * p_root_ythread
Definition: abti.h:247
#define ABTI_CHECK_YIELDABLE(p_thread, pp_ythread, abt_errno)
Definition: abti_error.h:146
struct ABT_pool_opaque * ABT_pool
Definition: abt.h:329
static void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
Definition: thread.c:1858
static uint64_t ABTD_atomic_fetch_add_uint64(ABTD_atomic_uint64 *ptr, uint64_t v)
Definition: abtd_atomic.h:411
void * p_stack
Definition: abti.h:330
ABT_unit_create_from_task_fn u_create_from_task
Definition: abti.h:297
#define ABTI_THREAD_REQ_JOIN
Definition: abti.h:41
int ABT_thread_create(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread) ABT_API_PUBLIC
Create a new thread and return its handle through newthread.
Definition: thread.c:63
int ABT_thread_set_callback(ABT_thread thread, void(*cb_func)(ABT_thread thread, void *cb_arg), void *cb_arg) ABT_API_PUBLIC
Set the callback function.
Definition: thread.c:973
#define ABT_ERR_THREAD
Definition: abt.h:101
static ABTU_ret_err int ythread_create(ABTI_local *p_local, ABTI_pool *p_pool, void(*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr, ABTI_thread_type thread_type, ABTI_sched *p_sched, ABT_bool push_pool, ABTI_ythread **pp_newthread)
Definition: thread.c:1616
static uint32_t ABTD_atomic_fetch_or_uint32(ABTD_atomic_uint32 *ptr, uint32_t v)
Definition: abtd_atomic.h:538
ABTI_sched * p_main_sched
Definition: abti.h:242
ABT_unit_id ABTI_thread_get_id(ABTI_thread *p_thread)
Definition: thread.c:1600
ABT_unit_is_in_pool_fn u_is_in_pool
Definition: abti.h:295
ABTI_pool * p_root_pool
Definition: abti.h:248
struct ABT_key_opaque * ABT_key
Definition: abt.h:355
#define ABTI_CHECK_TRUE_MSG(cond, abt_errno, msg)
Definition: abti_error.h:158
static ABTI_xstream * ABTI_xstream_get_ptr(ABT_xstream xstream)
Definition: abti_stream.h:11
#define ABT_FALSE
Definition: abt.h:285
ABTI_xstream * p_next
Definition: abti.h:237
ABTD_atomic_ptr p_migration_pool
Definition: abti.h:344
int ABT_thread_yield(void) ABT_API_PUBLIC
Yield the processor from the current running ULT back to the scheduler.
Definition: thread.c:723
#define ABTI_THREAD_TYPE_MAIN
Definition: abti.h:75
int ABT_thread_get_last_pool(ABT_thread thread, ABT_pool *pool) ABT_API_PUBLIC
Return the last pool of ULT.
Definition: thread.c:554
#define ABTI_THREAD_INIT_ID
Definition: abti.h:53
struct ABT_thread_opaque * ABT_thread
Definition: abt.h:343
ABTI_xstream * p_last_xstream
Definition: abti.h:318
size_t stacksize
Definition: abti.h:351
#define ABTI_THREAD_TYPE_ROOT
Definition: abti.h:74
void ABTI_thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
Definition: thread.c:1470
#define ABTI_KEY_ID_STACKABLE_SCHED
Definition: abti_key.h:47
#define ABTI_CHECK_TRUE(cond, abt_errno)
Definition: abti_error.h:137
int rank
Definition: abti.h:239
uint32_t ABTI_thread_type
Definition: abti.h:115
static uint32_t ABTD_atomic_acquire_load_uint32(const ABTD_atomic_uint32 *ptr)
Definition: abtd_atomic.h:797
#define ABTI_SCHED_REQ_FINISH
Definition: abti.h:38
int ABT_thread_free_many(int num, ABT_thread *thread_list) ABT_API_PUBLIC
Release a set of ULT objects.
Definition: thread.c:329
ABTD_atomic_int state
Definition: abti.h:322
static void ABTI_pool_dec_num_migrations(ABTI_pool *p_pool)
Definition: abti_pool.h:60
void ABTI_sched_free(ABTI_local *p_local, ABTI_sched *p_sched, ABT_bool force_free)
Definition: sched.c:497
ABT_bool migratable
Definition: abti.h:334
#define ABTI_IS_EXT_THREAD_ENABLED
Definition: abti.h:28
#define ABTI_tool_event_thread_join(p_local, p_thread, p_caller)
Definition: abti_tool.h:317
ABTD_atomic_uint32 request
Definition: abti.h:264
static ABTI_pool * ABTI_pool_get_ptr(ABT_pool pool)
Definition: abti_pool.h:11
int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize) ABT_API_PUBLIC
Get the ULT&#39;s stack size.
Definition: thread.c:1139
ABT_bool automatic
Definition: abti.h:261
static ABTU_ret_err int ABTI_pool_remove(ABTI_pool *p_pool, ABT_unit unit)
Definition: abti_pool.h:80
int ABT_thread_is_migratable(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Get the ULT&#39;s migratability.
Definition: thread.c:1043
ABT_unit_free_fn u_free
Definition: abti.h:298
void * p_arg
Definition: abti.h:321
ABTI_global * gp_ABTI_global
Definition: global.c:18
ABTI_thread thread
Definition: abti.h:348
#define ABT_SUCCESS
Definition: abt.h:64
void ABTI_xstream_run_unit(ABTI_xstream **pp_local_xstream, ABT_unit unit, ABTI_pool *p_pool)
Definition: stream.c:903
ABT_pool_access
Definition: abt.h:161
static void thread_main_sched_func(void *arg)
Definition: thread.c:2070
ABTI_xstream * p_xstream_head
Definition: abti.h:172
ABT_sched_run_fn run
Definition: abti.h:272
static void ABTD_ythread_context_invalidate(ABTD_ythread_context *p_newctx)
Definition: abtd_ythread.h:34
int ABT_thread_is_primary(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Check if the target ULT is the primary ULT.
Definition: thread.c:1073
#define ABT_TRUE
Definition: abt.h:284
#define ABTI_THREAD_REQ_TERMINATE
Definition: abti.h:42
int ABT_thread_create_many(int num, ABT_pool *pool_list, void(**thread_func_list)(void *), void **arg_list, ABT_thread_attr attr, ABT_thread *newthread_list) ABT_API_PUBLIC
Create a set of ULTs.
Definition: thread.c:178
#define ABTI_SETUP_LOCAL_YTHREAD(pp_local_xstream, pp_ythread)
Definition: abti_error.h:83
int ABT_thread_set_migratable(ABT_thread thread, ABT_bool flag) ABT_API_PUBLIC
Set the ULT&#39;s migratability.
Definition: thread.c:1009
#define ABT_THREAD_ATTR_NULL
Definition: abt.h:417
#define ABTI_THREAD_TYPE_YIELDABLE
Definition: abti.h:77
static void ABTI_pool_inc_num_migrations(ABTI_pool *p_pool)
Definition: abti_pool.h:54
void ABTI_thread_free(ABTI_local *p_local, ABTI_thread *p_thread)
Definition: thread.c:1475
static ABTU_ret_err int ABTI_ktable_set(ABTI_local *p_local, ABTD_atomic_ptr *pp_ktable, ABTI_key *p_key, void *value)
Definition: abti_key.h:224
ABTU_ret_err int ABTI_thread_get_mig_data(ABTI_local *p_local, ABTI_thread *p_thread, ABTI_thread_mig_data **pp_mig_data)
Definition: thread.c:1508
int ABT_thread_join(ABT_thread thread) ABT_API_PUBLIC
Wait for thread to terminate.
Definition: thread.c:353
int ABT_thread_self_id(ABT_unit_id *id) ABT_API_PUBLIC
Return the calling ULT&#39;s ID.
Definition: thread.c:493
static ABTU_ret_err int ABTI_ktable_set_unsafe(ABTI_local *p_local, ABTI_ktable **pp_ktable, ABTI_key *p_key, void *value)
Definition: abti_key.h:265
#define ABTI_KTABLE_LOCKED
Definition: abti_key.h:59
static ABT_thread_attr ABTI_thread_attr_get_handle(ABTI_thread_attr *p_attr)
static ABTI_thread_attr * ABTI_thread_attr_get_ptr(ABT_thread_attr attr)
int ABT_thread_migrate_to_pool(ABT_thread thread, ABT_pool pool) ABT_API_PUBLIC
Migrate a thread to a specific pool.
Definition: thread.c:883
static void ABTD_atomic_relaxed_store_uint32(ABTD_atomic_uint32 *ptr, uint32_t val)
Definition: abtd_atomic.h:884
void(* f_cb)(ABT_thread, void *)
Definition: abti.h:335
#define ABTI_CHECK_NULL_XSTREAM_PTR(p)
Definition: abti_error.h:166
ABT_pool_access access
Definition: abti.h:282
int ABT_thread_set_associated_pool(ABT_thread thread, ABT_pool pool) ABT_API_PUBLIC
Set the associated pool for the target ULT.
Definition: thread.c:604
void ABTI_thread_print(ABTI_thread *p_thread, FILE *p_os, int indent)
Definition: thread.c:1532
ABTU_ret_err int ABTI_thread_attr_dup(const ABTI_thread_attr *p_attr, ABTI_thread_attr **pp_dup_attr) ABTU_ret_err
Definition: thread_attr.c:300
#define ABT_THREAD_NULL
Definition: abt.h:416
static void ABTI_pool_push(ABTI_pool *p_pool, ABT_unit unit)
Definition: abti_pool.h:65
ABTU_ret_err int ABTI_ythread_create_main_sched(ABTI_local *p_local, ABTI_xstream *p_xstream, ABTI_sched *p_sched)
Definition: thread.c:1432
#define ABTI_CHECK_NULL_POOL_PTR(p)
Definition: abti_error.h:175
#define ABTU_noreturn
Definition: abtu.h:31
static ABT_unit_id thread_get_new_id(void)
Definition: thread.c:2152
#define ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK
Definition: abti.h:84
int ABT_thread_set_arg(ABT_thread thread, void *arg) ABT_API_PUBLIC
Set the argument for the ULT function.
Definition: thread.c:1181
static uint32_t ABTD_atomic_relaxed_load_uint32(const ABTD_atomic_uint32 *ptr)
Definition: abtd_atomic.h:689
static ABT_thread ABTI_ythread_get_handle(ABTI_ythread *p_thread)
ABTU_ret_err int ABTI_sched_get_migration_pool(ABTI_sched *, ABTI_pool *, ABTI_pool **)
Definition: sched.c:567
#define ABT_ERR_FEATURE_NA
Definition: abt.h:116
size_t stacksize
Definition: abti.h:331
#define ABTI_THREAD_TYPE_MEM_MALLOC_DESC
Definition: abti.h:82
static int ABTD_atomic_relaxed_load_int(const ABTD_atomic_int *ptr)
Definition: abtd_atomic.h:661
void ABTI_ythread_free_root(ABTI_local *p_local, ABTI_ythread *p_ythread)
Definition: thread.c:1492
ABTD_ythread_context ctx
Definition: abti.h:349
static void ABTD_ythread_context_create(ABTD_ythread_context *p_link, size_t stacksize, void *p_stack, ABTD_ythread_context *p_newctx)
Definition: abtd_ythread.h:20
#define ABT_ERR_MIGRATION_NA
Definition: abt.h:114
static ABTI_sched * ABTI_sched_get_ptr(ABT_sched sched)
Definition: abti_sched.h:11
void * p_stack
Definition: abti.h:350
static ABTI_key g_thread_sched_key
Definition: thread.c:29
int ABT_thread_get_arg(ABT_thread thread, void **arg) ABT_API_PUBLIC
Retrieve the argument for the ULT function.
Definition: thread.c:1203
#define ABTI_ASSERT(cond)
Definition: abti_error.h:12
#define ABT_ERR_MIGRATION_TARGET
Definition: abt.h:113
#define LOG_DEBUG(fmt,...)
Definition: abti_log.h:26
static ABTI_local * ABTI_local_get_local_uninlined(void)
Definition: abti_local.h:51
static void ABTD_atomic_pause(void)
Definition: abtd_atomic.h:1091
#define ABTI_THREAD_TYPE_MIGRATABLE
Definition: abti.h:79
#define ABTI_CHECK_ERROR(abt_errno)
Definition: abti_error.h:127
uint64_t id
Definition: abti.h:289
static void ABTD_atomic_relaxed_store_int(ABTD_atomic_int *ptr, int val)
Definition: abtd_atomic.h:865
int ABT_thread_create_on_xstream(ABT_xstream xstream, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread) ABT_API_PUBLIC
Create a new ULT associated with the target ES (xstream).
Definition: thread.c:127
#define ABTI_tool_event_thread_free(p_local, p_thread, p_caller)
Definition: abti_tool.h:324
#define ABTI_tool_event_ythread_yield(p_local_xstream, p_ythread, p_parent,sync_event_type, p_sync)
Definition: abti_tool.h:363
int ABT_thread_yield_to(ABT_thread thread) ABT_API_PUBLIC
Yield the processor from the current running thread to the specific thread.
Definition: thread.c:629
static ABTI_xstream * ABTI_local_get_xstream(ABTI_local *p_local)
Definition: abti_local.h:86
ABTD_atomic_ptr p_keytable
Definition: abti.h:325
ABTI_ythread * p_main_ythread
Definition: abti.h:186
int ABT_thread_is_unnamed(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Check if the target ULT is unnamed.
Definition: thread.c:1096
int ABT_thread_get_last_xstream(ABT_thread thread, ABT_xstream *xstream) ABT_API_PUBLIC
Get the ES associated with the target thread.
Definition: thread.c:515
ABTI_sched_used used
Definition: abti.h:260
ABT_unit unit
Definition: abti.h:317
ABT_thread_state
Definition: abt.h:125
static void ABTI_ythread_yield(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_ythread, ABT_sync_event_type sync_event_type, void *p_sync)
Definition: abti_ythread.h:350
static int ABTD_atomic_acquire_load_int(const ABTD_atomic_int *ptr)
Definition: abtd_atomic.h:763
int ABT_thread_get_id(ABT_thread thread, ABT_unit_id *thread_id) ABT_API_PUBLIC
Get the ULT&#39;s id.
Definition: thread.c:1161
int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value) ABT_API_PUBLIC
Get the ULT-specific value associated with the key.
Definition: thread.c:1257
void ABTI_thread_revive(ABTI_local *p_local, ABTI_pool *p_pool, void(*thread_func)(void *), void *arg, ABTI_thread *p_thread)
Definition: thread.c:1325
#define ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC
Definition: abti.h:81
#define ABTI_SETUP_LOCAL_XSTREAM_WITH_INIT_CHECK(pp_local_xstream)
Definition: abti_error.h:109
#define ABT_ERR_INV_XSTREAM
Definition: abt.h:68
void ABTI_ythread_set_ready(ABTI_local *p_local, ABTI_ythread *p_ythread)
Definition: ythread.c:50
#define ABTI_tool_event_thread_run(p_local_xstream, p_thread, p_prev,p_parent)
Definition: abti_tool.h:339
int ABT_thread_revive(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread *thread) ABT_API_PUBLIC
Revive the ULT.
Definition: thread.c:251
#define ABT_ERR_INV_THREAD_ATTR
Definition: abt.h:81
static ABTI_local * ABTI_xstream_get_local(ABTI_xstream *p_xstream)
Definition: abti_stream.h:67
void ABTI_ythread_set_blocked(ABTI_ythread *p_ythread)
Definition: ythread.c:8
static void ABTD_atomic_release_store_ythread_context_ptr(ABTD_ythread_context_atomic_ptr *ptr, ABTD_ythread_context *p_ctx)
Definition: abtd_context.h:42
int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result) ABT_API_PUBLIC
Compare two ULT handles for equality.
Definition: thread.c:1120
static ABTI_local * ABTI_local_get_local(void)
Definition: abti_local.h:41
#define ABTI_IS_ERROR_CHECK_ENABLED
Definition: abti.h:20
#define ABTI_tool_event_thread_revive(p_local, p_thread, p_caller, p_pool)
Definition: abti_tool.h:331
ABTI_thread_type thread_type
Definition: abti.h:332
static ABTU_ret_err int ABTU_calloc(size_t num, size_t size, void **p_ptr)
Definition: abtu.h:152
static void ABTU_free(void *ptr)
Definition: abtu.h:135
int num_xstreams
Definition: abti.h:171
static ABTU_ret_err int thread_migrate_to_pool(ABTI_local **p_local, ABTI_thread *p_thread, ABTI_pool *p_pool)
Definition: thread.c:1771
int ABT_thread_self(ABT_thread *thread) ABT_API_PUBLIC
Return the handle of the calling ULT.
Definition: thread.c:464
static ABTU_ret_err int ABTI_mem_alloc_ythread_default(ABTI_local *p_local, ABTI_ythread **pp_ythread)
Definition: abti_mem.h:135
#define ABTI_THREAD_TYPE_MAIN_SCHED
Definition: abti.h:76
static ABTI_ythread * ABTI_ythread_context_switch_to_sibling(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_old, ABTI_ythread *p_new)
Definition: abti_ythread.h:304
void ABTI_ythread_suspend(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_ythread, ABT_sync_event_type sync_event_type, void *p_sync)
Definition: ythread.c:30
ABTI_thread * p_parent
Definition: abti.h:319
#define ABTI_CHECK_NULL_THREAD_PTR(p)
Definition: abti_error.h:193
static ABT_thread ABTI_thread_get_handle(ABTI_thread *p_thread)
Definition: abti_thread.h:24
static ABTU_noreturn void ABTD_ythread_finish_context(ABTD_ythread_context *p_old, ABTD_ythread_context *p_new)
Definition: abtd_ythread.h:80
#define ABTU_ret_err
Definition: abtu.h:49
int ABT_thread_migrate_to_sched(ABT_thread thread, ABT_sched sched) ABT_API_PUBLIC
Migrate a thread to a specific scheduler.
Definition: thread.c:830
static ABTI_xstream * ABTI_local_get_xstream_or_null(ABTI_local *p_local)
Definition: abti_local.h:77
static void ABTI_pool_dec_num_blocked(ABTI_pool *p_pool)
Definition: abti_pool.h:48
#define ABTI_THREAD_REQ_MIGRATE
Definition: abti.h:44
#define ABTI_HANDLE_ERROR(n)
Definition: abti_error.h:121
ABTU_noreturn void ABTI_ythread_exit(ABTI_xstream *p_local_xstream, ABTI_ythread *p_ythread)
Definition: thread.c:1497
#define ABTI_CHECK_NULL_KEY_PTR(p)
Definition: abti_error.h:229