ARGOBOTS  c6511494322293e01714f56f341b8d2b22c1e3c1
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
thread.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 ABTU_ret_err static inline int
9 ythread_create(ABTI_local *p_local, ABTI_pool *p_pool,
10  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
11  ABTI_thread_type thread_type, ABTI_sched *p_sched,
12  ABT_bool push_pool, ABTI_ythread **pp_newthread);
13 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread);
14 static inline void thread_free(ABTI_local *p_local, ABTI_thread *p_thread,
15  ABT_bool free_unit);
16 static void thread_root_func(void *arg);
17 static void thread_main_sched_func(void *arg);
18 #ifndef ABT_CONFIG_DISABLE_MIGRATION
19 ABTU_ret_err static int thread_migrate_to_xstream(ABTI_local **pp_local,
20  ABTI_thread *p_thread,
21  ABTI_xstream *p_xstream);
22 ABTU_ret_err static int thread_migrate_to_pool(ABTI_local **p_local,
23  ABTI_thread *p_thread,
24  ABTI_pool *p_pool);
25 #endif
26 static inline ABT_unit_id thread_get_new_id(void);
27 
28 static void thread_key_destructor_stackable_sched(void *p_value);
29 static ABTI_key g_thread_sched_key =
30  ABTI_KEY_STATIC_INITIALIZER(thread_key_destructor_stackable_sched,
31  ABTI_KEY_ID_STACKABLE_SCHED);
32 static void thread_key_destructor_migration(void *p_value);
33 static ABTI_key g_thread_mig_data_key =
34  ABTI_KEY_STATIC_INITIALIZER(thread_key_destructor_migration,
35  ABTI_KEY_ID_MIGRATION);
36 
63 int ABT_thread_create(ABT_pool pool, void (*thread_func)(void *), void *arg,
64  ABT_thread_attr attr, ABT_thread *newthread)
65 {
66  ABTI_local *p_local = ABTI_local_get_local();
67  ABTI_ythread *p_newthread;
68 
69  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
70  ABTI_CHECK_NULL_POOL_PTR(p_pool);
71 
72  ABTI_thread_type unit_type =
73  (newthread != NULL)
74  ? (ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_NAMED)
75  : ABTI_THREAD_TYPE_YIELDABLE;
76  int abt_errno = ythread_create(p_local, p_pool, thread_func, arg,
77  ABTI_thread_attr_get_ptr(attr), unit_type,
78  NULL, ABT_TRUE, &p_newthread);
79  ABTI_CHECK_ERROR(abt_errno);
80 
81  /* Return value */
82  if (newthread)
83  *newthread = ABTI_ythread_get_handle(p_newthread);
84  return ABT_SUCCESS;
85 }
86 
128  void (*thread_func)(void *), void *arg,
129  ABT_thread_attr attr, ABT_thread *newthread)
130 {
131  ABTI_local *p_local = ABTI_local_get_local();
132  ABTI_ythread *p_newthread;
133 
134  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
135  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
136 
137  /* TODO: need to consider the access type of target pool */
138  ABTI_pool *p_pool = ABTI_xstream_get_main_pool(p_xstream);
139  ABTI_thread_type unit_type =
140  (newthread != NULL)
141  ? (ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_NAMED)
142  : ABTI_THREAD_TYPE_YIELDABLE;
143  int abt_errno = ythread_create(p_local, p_pool, thread_func, arg,
144  ABTI_thread_attr_get_ptr(attr), unit_type,
145  NULL, ABT_TRUE, &p_newthread);
146  ABTI_CHECK_ERROR(abt_errno);
147 
148  /* Return value */
149  if (newthread)
150  *newthread = ABTI_ythread_get_handle(p_newthread);
151 
152  return ABT_SUCCESS;
153 }
154 
178 int ABT_thread_create_many(int num, ABT_pool *pool_list,
179  void (**thread_func_list)(void *), void **arg_list,
180  ABT_thread_attr attr, ABT_thread *newthread_list)
181 {
182  ABTI_local *p_local = ABTI_local_get_local();
183  int i;
184 
185  if (attr != ABT_THREAD_ATTR_NULL) {
186  /* This implies that the stack is given by a user. Since threads
187  * cannot use the same stack region, this is illegal. */
188  ABTI_CHECK_TRUE(!(ABTI_thread_attr_get_ptr(attr)->thread_type &
189  (ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC |
190  ABTI_THREAD_TYPE_MEM_MALLOC_DESC)),
192  }
193 
194  if (newthread_list == NULL) {
195  for (i = 0; i < num; i++) {
196  ABTI_ythread *p_newthread;
197  ABT_pool pool = pool_list[i];
198  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
199  ABTI_CHECK_NULL_POOL_PTR(p_pool);
200 
201  void (*thread_f)(void *) = thread_func_list[i];
202  void *arg = arg_list ? arg_list[i] : NULL;
203  int abt_errno = ythread_create(p_local, p_pool, thread_f, arg,
204  ABTI_thread_attr_get_ptr(attr),
205  ABTI_THREAD_TYPE_YIELDABLE, NULL,
206  ABT_TRUE, &p_newthread);
207  ABTI_CHECK_ERROR(abt_errno);
208  }
209  } else {
210  for (i = 0; i < num; i++) {
211  ABTI_ythread *p_newthread;
212  ABT_pool pool = pool_list[i];
213  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
214  ABTI_CHECK_NULL_POOL_PTR(p_pool);
215 
216  void (*thread_f)(void *) = thread_func_list[i];
217  void *arg = arg_list ? arg_list[i] : NULL;
218  int abt_errno = ythread_create(p_local, p_pool, thread_f, arg,
219  ABTI_thread_attr_get_ptr(attr),
220  ABTI_THREAD_TYPE_YIELDABLE |
221  ABTI_THREAD_TYPE_NAMED,
222  NULL, ABT_TRUE, &p_newthread);
223  newthread_list[i] = ABTI_ythread_get_handle(p_newthread);
224  /* TODO: Release threads that have been already created. */
225  ABTI_CHECK_ERROR(abt_errno);
226  }
227  }
228 
229  return ABT_SUCCESS;
230 }
231 
251 int ABT_thread_revive(ABT_pool pool, void (*thread_func)(void *), void *arg,
252  ABT_thread *thread)
253 {
254  ABTI_local *p_local = ABTI_local_get_local();
255 
256  ABTI_thread *p_thread = ABTI_thread_get_ptr(*thread);
257  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
258 
259  ABTI_CHECK_TRUE(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
262 
263  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
264  ABTI_CHECK_NULL_POOL_PTR(p_pool);
265 
266  ABTI_thread_revive(p_local, p_pool, thread_func, arg, p_thread);
267 
268  return ABT_SUCCESS;
269 }
270 
285 {
286  ABTI_local *p_local = ABTI_local_get_local();
287  ABT_thread h_thread = *thread;
288 
289  ABTI_thread *p_thread = ABTI_thread_get_ptr(h_thread);
290  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
291 
292  /* We first need to check whether p_local_xstream is NULL because external
293  * threads might call this routine. */
294  ABTI_CHECK_TRUE_MSG(!ABTI_local_get_xstream_or_null(p_local) ||
295  p_thread !=
296  ABTI_local_get_xstream(p_local)->p_thread,
298  "The current thread cannot be freed.");
299 
300  ABTI_CHECK_TRUE_MSG(!(p_thread->type & (ABTI_THREAD_TYPE_MAIN |
301  ABTI_THREAD_TYPE_MAIN_SCHED)),
303  "The main thread cannot be freed explicitly.");
304 
305  /* Wait until the thread terminates */
306  thread_join(&p_local, p_thread);
307  /* Free the ABTI_thread structure */
308  ABTI_thread_free(p_local, p_thread);
309 
310  /* Return value */
311  *thread = ABT_THREAD_NULL;
312 
313  return ABT_SUCCESS;
314 }
315 
329 int ABT_thread_free_many(int num, ABT_thread *thread_list)
330 {
331  ABTI_local *p_local = ABTI_local_get_local();
332  int i;
333 
334  for (i = 0; i < num; i++) {
335  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread_list[i]);
336  /* TODO: check input */
337  thread_join(&p_local, p_thread);
338  ABTI_thread_free(p_local, p_thread);
339  }
340  return ABT_SUCCESS;
341 }
342 
354 {
355  ABTI_local *p_local = ABTI_local_get_local();
356  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
357  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
358 
359  ABTI_CHECK_TRUE_MSG(!ABTI_local_get_xstream_or_null(p_local) ||
360  p_thread !=
361  ABTI_local_get_xstream(p_local)->p_thread,
363  "The current thread cannot be freed.");
364 
365  ABTI_CHECK_TRUE_MSG(!(p_thread->type & (ABTI_THREAD_TYPE_MAIN |
366  ABTI_THREAD_TYPE_MAIN_SCHED)),
368  "The main thread cannot be freed explicitly.");
369 
370  thread_join(&p_local, p_thread);
371  return ABT_SUCCESS;
372 }
373 
386 int ABT_thread_join_many(int num_threads, ABT_thread *thread_list)
387 {
388  ABTI_local *p_local = ABTI_local_get_local();
389  int i;
390  for (i = 0; i < num_threads; i++) {
391  /* TODO: check input */
392  thread_join(&p_local, ABTI_thread_get_ptr(thread_list[i]));
393  }
394  return ABT_SUCCESS;
395 }
396 
410 {
411  ABTI_xstream *p_local_xstream;
412  ABTI_ythread *p_ythread;
413  ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(&p_local_xstream, &p_ythread);
414 
415  ABTI_ythread_exit(p_local_xstream, p_ythread);
416  return ABT_SUCCESS;
417 }
418 
428 {
429 #ifdef ABT_CONFIG_DISABLE_THREAD_CANCEL
430  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
431 #else
432  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
433  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
434  ABTI_CHECK_TRUE_MSG(!(p_thread->type & (ABTI_THREAD_TYPE_MAIN |
435  ABTI_THREAD_TYPE_MAIN_SCHED)),
437  "The main thread cannot be canceled.");
438 
439  /* Set the cancel request */
440  ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_CANCEL);
441  return ABT_SUCCESS;
442 #endif
443 }
444 
465 {
466  *thread = ABT_THREAD_NULL;
467 
468  ABTI_xstream *p_local_xstream;
469  ABTI_SETUP_LOCAL_XSTREAM_WITH_INIT_CHECK(&p_local_xstream);
470  ABTI_thread *p_thread = p_local_xstream->p_thread;
471  if (!(p_thread->type & ABTI_THREAD_TYPE_YIELDABLE)) {
472  /* This is checked even if an error check is disabled. */
473  ABTI_HANDLE_ERROR(ABT_ERR_INV_THREAD);
474  }
475 
476  *thread = ABTI_thread_get_handle(p_thread);
477  return ABT_SUCCESS;
478 }
479 
494 {
495  ABTI_ythread *p_self;
496  ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(NULL, &p_self);
497 
498  *id = ABTI_thread_get_id(&p_self->thread);
499  return ABT_SUCCESS;
500 }
501 
516 {
517  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
518  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
519 
520  *xstream = ABTI_xstream_get_handle(p_thread->p_last_xstream);
521  return ABT_SUCCESS;
522 }
523 
534 {
535  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
536  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
537 
538  *state = (ABT_thread_state)ABTD_atomic_acquire_load_int(&p_thread->state);
539  return ABT_SUCCESS;
540 }
541 
555 {
556  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
557  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
558 
559  *pool = ABTI_pool_get_handle(p_thread->p_pool);
560  return ABT_SUCCESS;
561 }
562 
578 {
579  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
580  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
581  ABTI_ASSERT(p_thread->p_pool);
582 
583  *id = (int)(p_thread->p_pool->id);
584  return ABT_SUCCESS;
585 }
586 
605 {
606  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
607  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
608  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
609  ABTI_CHECK_NULL_POOL_PTR(p_pool);
610 
611  p_thread->p_pool = p_pool;
612  return ABT_SUCCESS;
613 }
614 
630 {
631  ABTI_ythread *p_tar_ythread = ABTI_ythread_get_ptr(thread);
632  ABTI_CHECK_NULL_YTHREAD_PTR(p_tar_ythread);
633 
634  ABTI_xstream *p_local_xstream;
635  ABTI_ythread *p_cur_ythread;
636  ABTI_SETUP_LOCAL_YTHREAD(&p_local_xstream, &p_cur_ythread);
637 
638  LOG_DEBUG("[U%" PRIu64 ":E%d] yield_to -> U%" PRIu64 "\n",
639  ABTI_thread_get_id(&p_cur_ythread->thread),
640  p_cur_ythread->thread.p_last_xstream->rank,
641  ABTI_thread_get_id(&p_tar_ythread->thread));
642 
643  /* The target ULT must be different from the caller ULT. */
644  ABTI_CHECK_TRUE_MSG(p_cur_ythread != p_tar_ythread, ABT_ERR_INV_THREAD,
645  "The caller and target ULTs are the same.");
646 
647  ABTI_CHECK_TRUE_MSG(ABTD_atomic_relaxed_load_int(
648  &p_tar_ythread->thread.state) !=
651  "Cannot yield to the terminated thread");
652 
653  /* Both threads must be associated with the same pool. */
654  /* FIXME: instead of same pool, runnable by the same ES */
655  ABTI_CHECK_TRUE_MSG(p_cur_ythread->thread.p_pool ==
656  p_tar_ythread->thread.p_pool,
658  "The target thread's pool is not the same as mine.");
659 
660  /* If the target thread is not in READY, we don't yield. Note that ULT can
661  * be regarded as 'ready' only if its state is READY and it has been
662  * pushed into a pool. Since we set ULT's state to READY and then push it
663  * into a pool, we check them in the reverse order, i.e., check if the ULT
664  * is inside a pool and the its state. */
665  if (!(p_tar_ythread->thread.p_pool->u_is_in_pool(
666  p_tar_ythread->thread.unit) == ABT_TRUE &&
667  ABTD_atomic_acquire_load_int(&p_tar_ythread->thread.state) ==
669  return ABT_SUCCESS;
670  }
671 
672  /* Remove the target ULT from the pool */
673  if (ABTI_IS_ERROR_CHECK_ENABLED) {
674  /* This is necessary to prevent the size of this pool from 0. */
675  ABTI_pool_inc_num_blocked(p_tar_ythread->thread.p_pool);
676  }
677  int abt_errno = ABTI_pool_remove(p_tar_ythread->thread.p_pool,
678  p_tar_ythread->thread.unit);
679  if (ABTI_IS_ERROR_CHECK_ENABLED) {
680  ABTI_pool_dec_num_blocked(p_tar_ythread->thread.p_pool);
681  ABTI_CHECK_ERROR(abt_errno);
682  }
683 
684  ABTD_atomic_release_store_int(&p_cur_ythread->thread.state,
686 
687  /* This operation is corresponding to yield */
688  ABTI_tool_event_ythread_yield(p_local_xstream, p_cur_ythread,
689  p_cur_ythread->thread.p_parent,
691 
692  /* Add the current thread to the pool again. */
693  ABTI_pool_push(p_cur_ythread->thread.p_pool, p_cur_ythread->thread.unit);
694 
695  /* We set the last ES */
696  p_tar_ythread->thread.p_last_xstream = p_local_xstream;
697 
698  /* Switch the context */
699  ABTD_atomic_release_store_int(&p_tar_ythread->thread.state,
701  ABTI_ythread *p_prev =
702  ABTI_ythread_context_switch_to_sibling(&p_local_xstream, p_cur_ythread,
703  p_tar_ythread);
704  ABTI_tool_event_thread_run(p_local_xstream, &p_cur_ythread->thread,
705  &p_prev->thread, p_cur_ythread->thread.p_parent);
706  return ABT_SUCCESS;
707 }
708 
724 {
725  ABTI_xstream *p_local_xstream;
726  ABTI_ythread *p_ythread;
727  ABTI_SETUP_LOCAL_YTHREAD_WITH_INIT_CHECK(&p_local_xstream, &p_ythread);
728 
729  ABTI_ythread_yield(&p_local_xstream, p_ythread, ABT_SYNC_EVENT_TYPE_USER,
730  NULL);
731  return ABT_SUCCESS;
732 }
733 
751 {
752  ABTI_local *p_local = ABTI_local_get_local();
753 
754  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
755  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
756  ABTI_ythread *p_ythread;
757  ABTI_CHECK_YIELDABLE(p_thread, &p_ythread, ABT_ERR_INV_THREAD);
758 
759  /* The ULT must be in BLOCKED state. */
760  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
763 
764  ABTI_ythread_set_ready(p_local, p_ythread);
765  return ABT_SUCCESS;
766 }
767 
792 {
793 #ifndef ABT_CONFIG_DISABLE_MIGRATION
794  ABTI_local *p_local = ABTI_local_get_local();
795  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
796  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
797  ABTI_xstream *p_xstream = ABTI_xstream_get_ptr(xstream);
798  ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream);
799 
800  int abt_errno = thread_migrate_to_xstream(&p_local, p_thread, p_xstream);
801  ABTI_CHECK_ERROR(abt_errno);
802  return ABT_SUCCESS;
803 #else
804  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
805 #endif
806 }
807 
831 {
832 #ifndef ABT_CONFIG_DISABLE_MIGRATION
833  ABTI_local *p_local = ABTI_local_get_local();
834  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
835  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
836  ABTI_sched *p_sched = ABTI_sched_get_ptr(sched);
837  ABTI_CHECK_NULL_SCHED_PTR(p_sched);
838 
839  /* checking for cases when migration is not allowed */
840  ABTI_CHECK_TRUE(!(p_thread->type &
841  (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_MAIN_SCHED)),
843  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
846 
847  /* Find a pool */
848  ABTI_pool *p_pool;
849  int abt_errno;
850  abt_errno =
851  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
852  ABTI_CHECK_ERROR(abt_errno);
853 
854  abt_errno = thread_migrate_to_pool(&p_local, p_thread, p_pool);
855  ABTI_CHECK_ERROR(abt_errno);
856 
857  ABTI_pool_inc_num_migrations(p_pool);
858  return ABT_SUCCESS;
859 #else
860  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
861 #endif
862 }
863 
884 {
885 #ifndef ABT_CONFIG_DISABLE_MIGRATION
886  ABTI_local *p_local = ABTI_local_get_local();
887  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
888  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
889  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
890  ABTI_CHECK_NULL_POOL_PTR(p_pool);
891 
892  int abt_errno = thread_migrate_to_pool(&p_local, p_thread, p_pool);
893  ABTI_CHECK_ERROR(abt_errno);
894 
895  ABTI_pool_inc_num_migrations(p_pool);
896  return ABT_SUCCESS;
897 #else
898  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
899 #endif
900 }
901 
924 {
925 #ifndef ABT_CONFIG_DISABLE_MIGRATION
926  /* TODO: fix the bug(s) */
927  ABTI_local *p_local = ABTI_local_get_local();
928 
929  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
930  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
931  ABTI_CHECK_TRUE(gp_ABTI_global->num_xstreams != 1, ABT_ERR_MIGRATION_NA);
932 
933  /* Choose the destination xstream */
934  /* FIXME: Currently, the target xstream is linearly chosen. We need a
935  * better selection strategy. */
936  /* TODO: handle better when no pool accepts migration */
937 
938  ABTI_xstream *p_xstream = gp_ABTI_global->p_xstream_head;
939  while (p_xstream) {
940  if (p_xstream != p_thread->p_last_xstream) {
941  if (ABTD_atomic_acquire_load_int(&p_xstream->state) ==
943  int abt_errno =
944  thread_migrate_to_xstream(&p_local, p_thread, p_xstream);
945  if (abt_errno != ABT_ERR_INV_XSTREAM &&
946  abt_errno != ABT_ERR_MIGRATION_TARGET) {
947  ABTI_CHECK_ERROR(abt_errno);
948  break;
949  }
950  }
951  }
952  p_xstream = p_xstream->p_next;
953  }
954  return ABT_SUCCESS;
955 #else
956  ABTI_HANDLE_ERROR(ABT_ERR_MIGRATION_NA);
957 #endif
958 }
959 
974  void (*cb_func)(ABT_thread thread, void *cb_arg),
975  void *cb_arg)
976 {
977 #ifndef ABT_CONFIG_DISABLE_MIGRATION
978  ABTI_local *p_local = ABTI_local_get_local();
979  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
980  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
981 
982  ABTI_thread_mig_data *p_mig_data;
983  int abt_errno = ABTI_thread_get_mig_data(p_local, p_thread, &p_mig_data);
984  ABTI_CHECK_ERROR(abt_errno);
985 
986  p_mig_data->f_migration_cb = cb_func;
987  p_mig_data->p_migration_cb_arg = cb_arg;
988  return ABT_SUCCESS;
989 #else
990  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
991 #endif
992 }
993 
1010 {
1011 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1012  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1013  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1014 
1015  if (!(p_thread->type &
1016  (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_MAIN_SCHED))) {
1017  if (flag) {
1018  p_thread->type |= ABTI_THREAD_TYPE_MIGRATABLE;
1019  } else {
1020  p_thread->type &= ~ABTI_THREAD_TYPE_MIGRATABLE;
1021  }
1022  }
1023  return ABT_SUCCESS;
1024 #else
1025  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
1026 #endif
1027 }
1028 
1044 {
1045 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1046  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1047  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1048 
1049  *flag =
1050  (p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE) ? ABT_TRUE : ABT_FALSE;
1051  return ABT_SUCCESS;
1052 #else
1053  ABTI_HANDLE_ERROR(ABT_ERR_FEATURE_NA);
1054 #endif
1055 }
1056 
1074 {
1075  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1076  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1077 
1078  *flag = (p_thread->type & ABTI_THREAD_TYPE_MAIN) ? ABT_TRUE : ABT_FALSE;
1079  return ABT_SUCCESS;
1080 }
1081 
1097 {
1098  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1099  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1100 
1101  *flag = (p_thread->type & ABTI_THREAD_TYPE_NAMED) ? ABT_FALSE : ABT_TRUE;
1102  return ABT_SUCCESS;
1103 }
1104 
1120 int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result)
1121 {
1122  ABTI_thread *p_thread1 = ABTI_thread_get_ptr(thread1);
1123  ABTI_thread *p_thread2 = ABTI_thread_get_ptr(thread2);
1124  *result = (p_thread1 == p_thread2) ? ABT_TRUE : ABT_FALSE;
1125  return ABT_SUCCESS;
1126 }
1127 
1139 int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize)
1140 {
1141  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1142  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1143  ABTI_ythread *p_ythread;
1144  ABTI_CHECK_YIELDABLE(p_thread, &p_ythread, ABT_ERR_INV_THREAD);
1145 
1146  *stacksize = p_ythread->stacksize;
1147  return ABT_SUCCESS;
1148 }
1149 
1162 {
1163  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1164  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1165 
1166  *thread_id = ABTI_thread_get_id(p_thread);
1167  return ABT_SUCCESS;
1168 }
1169 
1181 int ABT_thread_set_arg(ABT_thread thread, void *arg)
1182 {
1183  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1184  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1185 
1186  p_thread->p_arg = arg;
1187  return ABT_SUCCESS;
1188 }
1189 
1203 int ABT_thread_get_arg(ABT_thread thread, void **arg)
1204 {
1205  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1206  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1207 
1208  *arg = p_thread->p_arg;
1209  return ABT_SUCCESS;
1210 }
1211 
1225 int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value)
1226 {
1227  ABTI_local *p_local = ABTI_local_get_local();
1228 
1229  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1230  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1231 
1232  ABTI_key *p_key = ABTI_key_get_ptr(key);
1233  ABTI_CHECK_NULL_KEY_PTR(p_key);
1234 
1235  /* Set the value. */
1236  int abt_errno =
1237  ABTI_ktable_set(p_local, &p_thread->p_keytable, p_key, value);
1238  ABTI_CHECK_ERROR(abt_errno);
1239  return ABT_SUCCESS;
1240 }
1241 
1257 int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value)
1258 {
1259  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1260  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1261 
1262  ABTI_key *p_key = ABTI_key_get_ptr(key);
1263  ABTI_CHECK_NULL_KEY_PTR(p_key);
1264 
1265  /* Get the value. */
1266  *value = ABTI_ktable_get(&p_thread->p_keytable, p_key);
1267  return ABT_SUCCESS;
1268 }
1269 
1286 {
1287  ABTI_thread *p_thread = ABTI_thread_get_ptr(thread);
1288  ABTI_CHECK_NULL_THREAD_PTR(p_thread);
1289 
1290  ABTI_thread_attr thread_attr, *p_attr;
1291  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1292  if (p_ythread) {
1293  thread_attr.p_stack = p_ythread->p_stack;
1294  thread_attr.stacksize = p_ythread->stacksize;
1295  } else {
1296  thread_attr.p_stack = NULL;
1297  thread_attr.stacksize = 0;
1298  }
1299  thread_attr.thread_type = p_thread->type;
1300 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1301  thread_attr.migratable =
1302  (p_thread->type & ABTI_THREAD_TYPE_MIGRATABLE) ? ABT_TRUE : ABT_FALSE;
1303  ABTI_thread_mig_data *p_mig_data =
1304  (ABTI_thread_mig_data *)ABTI_ktable_get(&p_thread->p_keytable,
1305  &g_thread_mig_data_key);
1306  if (p_mig_data) {
1307  thread_attr.f_cb = p_mig_data->f_migration_cb;
1308  thread_attr.p_cb_arg = p_mig_data->p_migration_cb_arg;
1309  } else {
1310  thread_attr.f_cb = NULL;
1311  thread_attr.p_cb_arg = NULL;
1312  }
1313 #endif
1314  int abt_errno = ABTI_thread_attr_dup(&thread_attr, &p_attr);
1315  ABTI_CHECK_ERROR(abt_errno);
1316 
1317  *attr = ABTI_thread_attr_get_handle(p_attr);
1318  return ABT_SUCCESS;
1319 }
1320 
1321 /*****************************************************************************/
1322 /* Private APIs */
1323 /*****************************************************************************/
1324 
1325 void ABTI_thread_revive(ABTI_local *p_local, ABTI_pool *p_pool,
1326  void (*thread_func)(void *), void *arg,
1327  ABTI_thread *p_thread)
1328 {
1329  ABTI_ASSERT(ABTD_atomic_relaxed_load_int(&p_thread->state) ==
1331  p_thread->f_thread = thread_func;
1332  p_thread->p_arg = arg;
1333 
1334  ABTD_atomic_relaxed_store_int(&p_thread->state, ABT_THREAD_STATE_READY);
1335  ABTD_atomic_relaxed_store_uint32(&p_thread->request, 0);
1336  p_thread->p_last_xstream = NULL;
1337  p_thread->p_parent = NULL;
1338 
1339  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1340  if (p_thread->p_pool != p_pool) {
1341  /* Free the unit for the old pool */
1342  p_thread->p_pool->u_free(&p_thread->unit);
1343 
1344  /* Set the new pool */
1345  p_thread->p_pool = p_pool;
1346 
1347  /* Create a wrapper unit */
1348  if (p_ythread) {
1349  ABT_thread h_thread = ABTI_ythread_get_handle(p_ythread);
1350  p_thread->unit = p_pool->u_create_from_thread(h_thread);
1351  } else {
1352  ABT_task task = ABTI_thread_get_handle(p_thread);
1353  p_thread->unit = p_pool->u_create_from_task(task);
1354  }
1355  }
1356 
1357  if (p_ythread) {
1358  /* Create a ULT context */
1359  size_t stacksize = p_ythread->stacksize;
1360  ABTD_ythread_context_create(NULL, stacksize, p_ythread->p_stack,
1361  &p_ythread->ctx);
1362  }
1363 
1364  /* Invoke a thread revive event. */
1365  ABTI_tool_event_thread_revive(p_local, p_thread,
1366  ABTI_local_get_xstream_or_null(p_local)
1367  ? ABTI_local_get_xstream(p_local)
1368  ->p_thread
1369  : NULL,
1370  p_pool);
1371 
1372  LOG_DEBUG("[U%" PRIu64 "] revived\n", ABTI_thread_get_id(p_thread));
1373 
1374  /* Add this thread to the pool */
1375  ABTI_pool_push(p_pool, p_thread->unit);
1376 }
1377 
1378 ABTU_ret_err int ABTI_ythread_create_main(ABTI_local *p_local,
1379  ABTI_xstream *p_xstream,
1380  ABTI_ythread **p_ythread)
1381 {
1382  ABTI_thread_attr attr;
1383  ABTI_pool *p_pool;
1384 
1385  /* Get the first pool of ES */
1386  p_pool = ABTI_pool_get_ptr(p_xstream->p_main_sched->pools[0]);
1387 
1388  /* Allocate a ULT object */
1389 
1390  /* TODO: Need to set the actual stack address and size for the main ULT */
1391  ABTI_thread_attr_init(&attr, NULL, 0, ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC,
1392  ABT_FALSE);
1393 
1394  /* Although this main ULT is running now, we add this main ULT to the pool
1395  * so that the scheduler can schedule the main ULT when the main ULT is
1396  * context switched to the scheduler for the first time. */
1397  ABT_bool push_pool = ABT_TRUE;
1398  int abt_errno =
1399  ythread_create(p_local, p_pool, NULL, NULL, &attr,
1400  ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_MAIN, NULL,
1401  push_pool, p_ythread);
1402  ABTI_CHECK_ERROR(abt_errno);
1403  return ABT_SUCCESS;
1404 }
1405 
1406 ABTU_ret_err int ABTI_ythread_create_root(ABTI_local *p_local,
1407  ABTI_xstream *p_xstream,
1408  ABTI_ythread **pp_root_ythread)
1409 {
1410  ABTI_thread_attr attr;
1411  /* Create a ULT context */
1412  if (p_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
1413  /* Create a thread with its stack */
1414  ABTI_thread_attr_init(&attr, NULL, gp_ABTI_global->sched_stacksize,
1415  ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK,
1416  ABT_FALSE);
1417  } else {
1418  /* For secondary ESs, the stack of an OS thread is used. */
1419  ABTI_thread_attr_init(&attr, NULL, 0, ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC,
1420  ABT_FALSE);
1421  }
1422  ABTI_ythread *p_root_ythread;
1423  int abt_errno =
1424  ythread_create(p_local, NULL, thread_root_func, NULL, &attr,
1425  ABTI_THREAD_TYPE_YIELDABLE | ABTI_THREAD_TYPE_ROOT, NULL,
1426  ABT_FALSE, &p_root_ythread);
1427  ABTI_CHECK_ERROR(abt_errno);
1428  *pp_root_ythread = p_root_ythread;
1429  return ABT_SUCCESS;
1430 }
1431 
1432 ABTU_ret_err int ABTI_ythread_create_main_sched(ABTI_local *p_local,
1433  ABTI_xstream *p_xstream,
1434  ABTI_sched *p_sched)
1435 {
1436  ABTI_thread_attr attr;
1437 
1438  /* Allocate a ULT object and its stack */
1439  ABTI_thread_attr_init(&attr, NULL, gp_ABTI_global->sched_stacksize,
1440  ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK, ABT_FALSE);
1441  int abt_errno =
1442  ythread_create(p_local, p_xstream->p_root_pool, thread_main_sched_func,
1443  NULL, &attr,
1444  ABTI_THREAD_TYPE_YIELDABLE |
1445  ABTI_THREAD_TYPE_MAIN_SCHED | ABTI_THREAD_TYPE_NAMED,
1446  p_sched, ABT_TRUE, &p_sched->p_ythread);
1447  ABTI_CHECK_ERROR(abt_errno);
1448  return ABT_SUCCESS;
1449 }
1450 
1451 /* This routine is to create a ULT for the scheduler. */
1452 ABTU_ret_err int ABTI_ythread_create_sched(ABTI_local *p_local,
1453  ABTI_pool *p_pool,
1454  ABTI_sched *p_sched)
1455 {
1456  ABTI_thread_attr attr;
1457 
1458  /* Allocate a ULT object and its stack */
1459  ABTI_thread_attr_init(&attr, NULL, gp_ABTI_global->sched_stacksize,
1460  ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK, ABT_FALSE);
1461  int abt_errno =
1462  ythread_create(p_local, p_pool, (void (*)(void *))p_sched->run,
1463  (void *)ABTI_sched_get_handle(p_sched), &attr,
1464  ABTI_THREAD_TYPE_YIELDABLE, p_sched, ABT_TRUE,
1465  &p_sched->p_ythread);
1466  ABTI_CHECK_ERROR(abt_errno);
1467  return ABT_SUCCESS;
1468 }
1469 
1470 void ABTI_thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
1471 {
1472  thread_join(pp_local, p_thread);
1473 }
1474 
1475 void ABTI_thread_free(ABTI_local *p_local, ABTI_thread *p_thread)
1476 {
1477  LOG_DEBUG("[U%" PRIu64 ":E%d] freed\n", ABTI_thread_get_id(p_thread),
1478  ABTI_local_get_xstream_or_null(p_local)
1479  ? ABTI_local_get_xstream(p_local)->rank
1480  : -1);
1481  thread_free(p_local, p_thread, ABT_TRUE);
1482 }
1483 
1484 void ABTI_ythread_free_main(ABTI_local *p_local, ABTI_ythread *p_ythread)
1485 {
1486  ABTI_thread *p_thread = &p_ythread->thread;
1487  LOG_DEBUG("[U%" PRIu64 ":E%d] main ULT freed\n",
1488  ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank);
1489  thread_free(p_local, p_thread, ABT_FALSE);
1490 }
1491 
1492 void ABTI_ythread_free_root(ABTI_local *p_local, ABTI_ythread *p_ythread)
1493 {
1494  thread_free(p_local, &p_ythread->thread, ABT_FALSE);
1495 }
1496 
1497 ABTU_noreturn void ABTI_ythread_exit(ABTI_xstream *p_local_xstream,
1498  ABTI_ythread *p_ythread)
1499 {
1500  /* Set the exit request */
1501  ABTI_thread_set_request(&p_ythread->thread, ABTI_THREAD_REQ_TERMINATE);
1502 
1503  /* Terminate this ULT */
1504  ABTD_ythread_exit(p_local_xstream, p_ythread);
1505  ABTU_unreachable();
1506 }
1507 
1508 ABTU_ret_err int ABTI_thread_get_mig_data(ABTI_local *p_local,
1509  ABTI_thread *p_thread,
1510  ABTI_thread_mig_data **pp_mig_data)
1511 {
1512  ABTI_thread_mig_data *p_mig_data =
1513  (ABTI_thread_mig_data *)ABTI_ktable_get(&p_thread->p_keytable,
1514  &g_thread_mig_data_key);
1515  if (!p_mig_data) {
1516  int abt_errno;
1517  abt_errno =
1518  ABTU_calloc(1, sizeof(ABTI_thread_mig_data), (void **)&p_mig_data);
1519  ABTI_CHECK_ERROR(abt_errno);
1520  abt_errno = ABTI_ktable_set(p_local, &p_thread->p_keytable,
1521  &g_thread_mig_data_key, (void *)p_mig_data);
1522  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
1523  /* Failed to add p_mig_data to p_thread's keytable. */
1524  ABTU_free(p_mig_data);
1525  return abt_errno;
1526  }
1527  }
1528  *pp_mig_data = p_mig_data;
1529  return ABT_SUCCESS;
1530 }
1531 
1532 void ABTI_thread_print(ABTI_thread *p_thread, FILE *p_os, int indent)
1533 {
1534  if (p_thread == NULL) {
1535  fprintf(p_os, "%*s== NULL thread ==\n", indent, "");
1536  } else {
1537  ABTI_xstream *p_xstream = p_thread->p_last_xstream;
1538  int xstream_rank = p_xstream ? p_xstream->rank : 0;
1539  const char *type, *yieldable, *state;
1540 
1541  if (p_thread->type & ABTI_THREAD_TYPE_MAIN) {
1542  type = "MAIN";
1543  } else if (p_thread->type & ABTI_THREAD_TYPE_MAIN_SCHED) {
1544  type = "MAIN_SCHED";
1545  } else {
1546  type = "USER";
1547  }
1548  if (p_thread->type & ABTI_THREAD_TYPE_YIELDABLE) {
1549  yieldable = "yes";
1550  } else {
1551  yieldable = "no";
1552  }
1553  switch (ABTD_atomic_acquire_load_int(&p_thread->state)) {
1555  state = "READY";
1556  break;
1558  state = "RUNNING";
1559  break;
1561  state = "BLOCKED";
1562  break;
1564  state = "TERMINATED";
1565  break;
1566  default:
1567  state = "UNKNOWN";
1568  break;
1569  }
1570 
1571  fprintf(p_os,
1572  "%*s== Thread (%p) ==\n"
1573  "%*sid : %" PRIu64 "\n"
1574  "%*stype : %s\n"
1575  "%*syieldable : %s\n"
1576  "%*sstate : %s\n"
1577  "%*slast_ES : %p (%d)\n"
1578  "%*sp_arg : %p\n"
1579  "%*spool : %p\n"
1580  "%*srequest : 0x%x\n"
1581  "%*skeytable : %p\n",
1582  indent, "", (void *)p_thread, indent, "",
1583  ABTI_thread_get_id(p_thread), indent, "", type, indent, "",
1584  yieldable, indent, "", state, indent, "", (void *)p_xstream,
1585  xstream_rank, indent, "", p_thread->p_arg, indent, "",
1586  (void *)p_thread->p_pool, indent, "",
1587  ABTD_atomic_acquire_load_uint32(&p_thread->request), indent, "",
1588  ABTD_atomic_acquire_load_ptr(&p_thread->p_keytable));
1589  }
1590  fflush(p_os);
1591 }
1592 
1593 static ABTD_atomic_uint64 g_thread_id =
1594  ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(0);
1595 void ABTI_thread_reset_id(void)
1596 {
1597  ABTD_atomic_release_store_uint64(&g_thread_id, 0);
1598 }
1599 
1600 ABT_unit_id ABTI_thread_get_id(ABTI_thread *p_thread)
1601 {
1602  if (p_thread == NULL)
1603  return ABTI_THREAD_INIT_ID;
1604 
1605  if (p_thread->id == ABTI_THREAD_INIT_ID) {
1606  p_thread->id = thread_get_new_id();
1607  }
1608  return p_thread->id;
1609 }
1610 
1611 /*****************************************************************************/
1612 /* Internal static functions */
1613 /*****************************************************************************/
1614 
1615 ABTU_ret_err static inline int
1616 ythread_create(ABTI_local *p_local, ABTI_pool *p_pool,
1617  void (*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr,
1618  ABTI_thread_type thread_type, ABTI_sched *p_sched,
1619  ABT_bool push_pool, ABTI_ythread **pp_newthread)
1620 {
1621  int abt_errno;
1622  ABTI_ythread *p_newthread;
1623  ABT_thread h_newthread;
1624  ABTI_ktable *p_keytable = NULL;
1625 
1626  /* Allocate a ULT object and its stack, then create a thread context. */
1627  if (!p_attr) {
1628  abt_errno = ABTI_mem_alloc_ythread_default(p_local, &p_newthread);
1629  ABTI_CHECK_ERROR(abt_errno);
1630 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1631  thread_type |= ABTI_THREAD_TYPE_MIGRATABLE;
1632 #endif
1633  } else {
1634  ABTI_thread_type attr_type = p_attr->thread_type;
1635  if (attr_type & ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC_STACK) {
1636 #ifdef ABT_CONFIG_USE_MEM_POOL
1637  abt_errno =
1638  ABTI_mem_alloc_ythread_mempool_desc_stack(p_local, p_attr,
1639  &p_newthread);
1640  ABTI_CHECK_ERROR(abt_errno);
1641 #else
1642  abt_errno =
1643  ABTI_mem_alloc_ythread_malloc_desc_stack(p_attr, &p_newthread);
1644 #endif
1645  ABTI_CHECK_ERROR(abt_errno);
1646  } else if (attr_type & ABTI_THREAD_TYPE_MEM_MALLOC_DESC_STACK) {
1647  abt_errno =
1648  ABTI_mem_alloc_ythread_malloc_desc_stack(p_attr, &p_newthread);
1649  ABTI_CHECK_ERROR(abt_errno);
1650  } else {
1651  ABTI_ASSERT(attr_type & (ABTI_THREAD_TYPE_MEM_MEMPOOL_DESC |
1652  ABTI_THREAD_TYPE_MEM_MALLOC_DESC));
1653  /* Let's try to use mempool first since it performs better. */
1654  abt_errno = ABTI_mem_alloc_ythread_mempool_desc(p_local, p_attr,
1655  &p_newthread);
1656  ABTI_CHECK_ERROR(abt_errno);
1657  }
1658 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1659  thread_type |= p_attr->migratable ? ABTI_THREAD_TYPE_MIGRATABLE : 0;
1660  if (ABTU_unlikely(p_attr->f_cb)) {
1661  ABTI_thread_mig_data *p_mig_data;
1662  abt_errno = ABTU_calloc(1, sizeof(ABTI_thread_mig_data),
1663  (void **)&p_mig_data);
1664  if (ABTI_IS_ERROR_CHECK_ENABLED &&
1665  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1666  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1667  return abt_errno;
1668  }
1669  p_mig_data->f_migration_cb = p_attr->f_cb;
1670  p_mig_data->p_migration_cb_arg = p_attr->p_cb_arg;
1671  abt_errno = ABTI_ktable_set_unsafe(p_local, &p_keytable,
1672  &g_thread_mig_data_key,
1673  (void *)p_mig_data);
1674  if (ABTI_IS_ERROR_CHECK_ENABLED &&
1675  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1676  if (p_keytable)
1677  ABTI_ktable_free(p_local, p_keytable);
1678  ABTU_free(p_mig_data);
1679  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1680  return abt_errno;
1681  }
1682  }
1683 #endif
1684  }
1685 
1686  if (thread_type & (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_ROOT)) {
1687  if (p_newthread->p_stack == NULL) {
1688  /* We don't need to initialize the context if a thread will run on
1689  * OS-level threads. Invalidate the context here. */
1690  ABTD_ythread_context_invalidate(&p_newthread->ctx);
1691  } else {
1692  /* Create the context. This thread is special, so dynamic promotion
1693  * is not supported. */
1694  size_t stack_size = p_newthread->stacksize;
1695  void *p_stack = p_newthread->p_stack;
1696  ABTD_ythread_context_create(NULL, stack_size, p_stack,
1697  &p_newthread->ctx);
1698  }
1699  } else {
1700 #if ABT_CONFIG_THREAD_TYPE != ABT_THREAD_TYPE_DYNAMIC_PROMOTION
1701  size_t stack_size = p_newthread->stacksize;
1702  void *p_stack = p_newthread->p_stack;
1703  ABTD_ythread_context_create(NULL, stack_size, p_stack,
1704  &p_newthread->ctx);
1705 #else
1706  /* The context is not fully created now. */
1707  ABTD_ythread_context_init(NULL, &p_newthread->ctx);
1708 #endif
1709  }
1710  p_newthread->thread.f_thread = thread_func;
1711  p_newthread->thread.p_arg = arg;
1712 
1713  ABTD_atomic_release_store_int(&p_newthread->thread.state,
1715  ABTD_atomic_release_store_uint32(&p_newthread->thread.request, 0);
1716  p_newthread->thread.p_last_xstream = NULL;
1717  p_newthread->thread.p_parent = NULL;
1718  p_newthread->thread.p_pool = p_pool;
1719  p_newthread->thread.type |= thread_type;
1720  p_newthread->thread.id = ABTI_THREAD_INIT_ID;
1721  if (p_sched && !(thread_type &
1722  (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_MAIN_SCHED))) {
1723  /* Set a destructor for p_sched. */
1724  abt_errno = ABTI_ktable_set_unsafe(p_local, &p_keytable,
1725  &g_thread_sched_key, p_sched);
1726  if (ABTI_IS_ERROR_CHECK_ENABLED &&
1727  ABTU_unlikely(abt_errno != ABT_SUCCESS)) {
1728  if (p_keytable)
1729  ABTI_ktable_free(p_local, p_keytable);
1730  ABTI_mem_free_thread(p_local, &p_newthread->thread);
1731  return abt_errno;
1732  }
1733  }
1734  ABTD_atomic_relaxed_store_ptr(&p_newthread->thread.p_keytable, p_keytable);
1735 
1736 #ifdef ABT_CONFIG_USE_DEBUG_LOG
1737  ABT_unit_id thread_id = ABTI_thread_get_id(&p_newthread->thread);
1738  if (thread_type & ABTI_THREAD_TYPE_MAIN) {
1739  LOG_DEBUG("[U%" PRIu64 "] main ULT created\n", thread_id);
1740  } else if (thread_type & ABTI_THREAD_TYPE_MAIN_SCHED) {
1741  LOG_DEBUG("[U%" PRIu64 "] main sched ULT created\n", thread_id);
1742  } else {
1743  LOG_DEBUG("[U%" PRIu64 "] created\n", thread_id);
1744  }
1745 #endif
1746 
1747  /* Invoke a thread creation event. */
1748  ABTI_tool_event_thread_create(p_local, &p_newthread->thread,
1749  ABTI_local_get_xstream_or_null(p_local)
1750  ? ABTI_local_get_xstream(p_local)
1751  ->p_thread
1752  : NULL,
1753  push_pool ? p_pool : NULL);
1754 
1755  /* Create a wrapper unit */
1756  h_newthread = ABTI_ythread_get_handle(p_newthread);
1757  if (push_pool) {
1758  p_newthread->thread.unit = p_pool->u_create_from_thread(h_newthread);
1759  /* Add this thread to the pool */
1760  ABTI_pool_push(p_pool, p_newthread->thread.unit);
1761  } else {
1762  p_newthread->thread.unit = ABT_UNIT_NULL;
1763  }
1764 
1765  /* Return value */
1766  *pp_newthread = p_newthread;
1767  return ABT_SUCCESS;
1768 }
1769 
1770 #ifndef ABT_CONFIG_DISABLE_MIGRATION
1771 ABTU_ret_err static int thread_migrate_to_pool(ABTI_local **pp_local,
1772  ABTI_thread *p_thread,
1773  ABTI_pool *p_pool)
1774 {
1775  /* checking for cases when migration is not allowed */
1776  ABTI_CHECK_TRUE(!(p_thread->type &
1777  (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_MAIN_SCHED)),
1779  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
1782 
1783  /* checking for migration to the same pool */
1784  ABTI_CHECK_TRUE(p_thread->p_pool != p_pool, ABT_ERR_MIGRATION_TARGET);
1785 
1786  /* adding request to the thread. p_migration_pool must be updated before
1787  * setting the request since the target thread would read p_migration_pool
1788  * after ABTI_THREAD_REQ_MIGRATE. The update must be "atomic" (but does not
1789  * require acq-rel) since two threads can update the pointer value
1790  * simultaneously. */
1791  ABTI_thread_mig_data *p_mig_data;
1792  int abt_errno = ABTI_thread_get_mig_data(*pp_local, p_thread, &p_mig_data);
1793  ABTI_CHECK_ERROR(abt_errno);
1794  ABTD_atomic_relaxed_store_ptr(&p_mig_data->p_migration_pool,
1795  (void *)p_pool);
1796 
1797  ABTI_thread_set_request(p_thread, ABTI_THREAD_REQ_MIGRATE);
1798 
1799  /* yielding if it is the same thread */
1800  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
1801  if ((!ABTI_IS_EXT_THREAD_ENABLED || p_local_xstream) &&
1802  p_thread == p_local_xstream->p_thread) {
1803  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1804  if (p_ythread) {
1805  ABTI_ythread_yield(&p_local_xstream, p_ythread,
1807  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1808  }
1809  }
1810  return ABT_SUCCESS;
1811 }
1812 #endif
1813 
1814 static inline void thread_free(ABTI_local *p_local, ABTI_thread *p_thread,
1815  ABT_bool free_unit)
1816 {
1817  /* Invoke a thread freeing event. */
1818  ABTI_tool_event_thread_free(p_local, p_thread,
1819  ABTI_local_get_xstream_or_null(p_local)
1820  ? ABTI_local_get_xstream(p_local)->p_thread
1821  : NULL);
1822 
1823  /* Free the unit */
1824  if (free_unit) {
1825  p_thread->p_pool->u_free(&p_thread->unit);
1826  }
1827 
1828  /* Free the key-value table */
1829  ABTI_ktable *p_ktable = ABTD_atomic_acquire_load_ptr(&p_thread->p_keytable);
1830  /* No parallel access to TLS is allowed. */
1831  ABTI_ASSERT(p_ktable != ABTI_KTABLE_LOCKED);
1832  if (p_ktable) {
1833  ABTI_ktable_free(p_local, p_ktable);
1834  }
1835 
1836  /* Free ABTI_thread (stack will also be freed) */
1837  ABTI_mem_free_thread(p_local, p_thread);
1838 }
1839 
1840 static void thread_key_destructor_stackable_sched(void *p_value)
1841 {
1842  /* This destructor should be called in ABTI_ythread_free(), so it should not
1843  * free the thread again. */
1844  ABTI_sched *p_sched = (ABTI_sched *)p_value;
1845  p_sched->used = ABTI_SCHED_NOT_USED;
1846  if (p_sched->automatic == ABT_TRUE) {
1847  p_sched->p_ythread = NULL;
1848  ABTI_sched_free(ABTI_local_get_local_uninlined(), p_sched, ABT_FALSE);
1849  }
1850 }
1851 
1852 static void thread_key_destructor_migration(void *p_value)
1853 {
1854  ABTI_thread_mig_data *p_mig_data = (ABTI_thread_mig_data *)p_value;
1855  ABTU_free(p_mig_data);
1856 }
1857 
1858 static void thread_join_busywait(ABTI_thread *p_thread)
1859 {
1860  while (ABTD_atomic_acquire_load_int(&p_thread->state) !=
1862  ABTD_atomic_pause();
1863  }
1864  ABTI_tool_event_thread_join(NULL, p_thread, NULL);
1865 }
1866 
1867 static void thread_join_yield_ythread(ABTI_xstream **pp_local_xstream,
1868  ABTI_ythread *p_self,
1869  ABTI_ythread *p_ythread)
1870 {
1871  while (ABTD_atomic_acquire_load_int(&p_ythread->thread.state) !=
1873  ABTI_ythread_yield(pp_local_xstream, p_self,
1874  ABT_SYNC_EVENT_TYPE_THREAD_JOIN, (void *)p_ythread);
1875  }
1876  ABTI_tool_event_thread_join(ABTI_xstream_get_local(*pp_local_xstream),
1877  &p_ythread->thread, &p_self->thread);
1878 }
1879 
1880 static void thread_join_yield_task(ABTI_xstream **pp_local_xstream,
1881  ABTI_ythread *p_self, ABTI_thread *p_task)
1882 {
1883  while (ABTD_atomic_acquire_load_int(&p_task->state) !=
1885  ABTI_ythread_yield(pp_local_xstream, p_self,
1886  ABT_SYNC_EVENT_TYPE_TASK_JOIN, (void *)p_task);
1887  }
1888  ABTI_tool_event_thread_join(ABTI_xstream_get_local(*pp_local_xstream),
1889  p_task, &p_self->thread);
1890 }
1891 
1892 static inline void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
1893 {
1894  if (ABTD_atomic_acquire_load_int(&p_thread->state) ==
1896  ABTI_tool_event_thread_join(*pp_local, p_thread,
1897  ABTI_local_get_xstream_or_null(*pp_local)
1898  ? ABTI_local_get_xstream(*pp_local)
1899  ->p_thread
1900  : NULL);
1901  return;
1902  }
1903  /* The main ULT cannot be joined. */
1904  ABTI_ASSERT(!(p_thread->type & ABTI_THREAD_TYPE_MAIN));
1905 
1906  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
1907  if (ABTI_IS_EXT_THREAD_ENABLED && !p_local_xstream) {
1908  thread_join_busywait(p_thread);
1909  return;
1910  }
1911 
1912  ABTI_thread *p_self_thread = p_local_xstream->p_thread;
1913 
1914  ABTI_ythread *p_self = ABTI_thread_get_ythread_or_null(p_self_thread);
1915  if (!p_self) {
1916  thread_join_busywait(p_thread);
1917  return;
1918  }
1919 
1920  /* The target ULT should be different. */
1921  ABTI_ASSERT(p_thread != p_self_thread);
1922 
1923  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
1924  if (!p_ythread) {
1925  thread_join_yield_task(&p_local_xstream, p_self, p_thread);
1926  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1927  return;
1928  }
1929 
1930  ABT_pool_access access = p_self->thread.p_pool->access;
1931 
1932  if ((p_self->thread.p_pool == p_ythread->thread.p_pool) &&
1933  (access == ABT_POOL_ACCESS_PRIV || access == ABT_POOL_ACCESS_MPSC ||
1934  access == ABT_POOL_ACCESS_SPSC) &&
1935  (ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
1937 
1938  ABTI_xstream *p_xstream = p_self->thread.p_last_xstream;
1939 
1940  /* If other ES is calling ABTI_ythread_set_ready(), p_ythread may not
1941  * have been added to the pool yet because ABTI_ythread_set_ready()
1942  * changes the state first followed by pushing p_ythread to the pool.
1943  * Therefore, we have to check whether p_ythread is in the pool, and if
1944  * not, we need to wait until it is added. */
1945  while (p_ythread->thread.p_pool->u_is_in_pool(p_ythread->thread.unit) !=
1946  ABT_TRUE) {
1947  }
1948 
1949  /* This is corresponding to suspension. */
1950  ABTI_tool_event_ythread_suspend(p_local_xstream, p_self,
1951  p_self->thread.p_parent,
1953  (void *)p_ythread);
1954 
1955  /* Increase the number of blocked units. Be sure to execute
1956  * ABTI_pool_inc_num_blocked before ABTI_POOL_REMOVE in order not to
1957  * underestimate the number of units in a pool. */
1958  ABTI_pool_inc_num_blocked(p_self->thread.p_pool);
1959  /* Remove the target ULT from the pool */
1960  int abt_errno =
1961  ABTI_pool_remove(p_ythread->thread.p_pool, p_ythread->thread.unit);
1962  /* This failure is fatal. */
1963  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
1964 
1965  /* Set the link in the context for the target ULT. Since p_link will be
1966  * referenced by p_self, this update does not require release store. */
1967  ABTD_atomic_relaxed_store_ythread_context_ptr(&p_ythread->ctx.p_link,
1968  &p_self->ctx);
1969  /* Set the last ES */
1970  p_ythread->thread.p_last_xstream = p_xstream;
1971  ABTD_atomic_release_store_int(&p_ythread->thread.state,
1973 
1974  /* Make the current ULT BLOCKED */
1975  ABTD_atomic_release_store_int(&p_self->thread.state,
1977 
1978  LOG_DEBUG("[U%" PRIu64 ":E%d] blocked to join U%" PRIu64 "\n",
1979  ABTI_thread_get_id(&p_self->thread),
1980  p_self->thread.p_last_xstream->rank,
1981  ABTI_thread_get_id(&p_ythread->thread));
1982  LOG_DEBUG("[U%" PRIu64 ":E%d] start running\n",
1983  ABTI_thread_get_id(&p_ythread->thread),
1984  p_ythread->thread.p_last_xstream->rank);
1985 
1986  /* Switch the context */
1987  ABTI_ythread *p_prev =
1988  ABTI_ythread_context_switch_to_sibling(&p_local_xstream, p_self,
1989  p_ythread);
1990  *pp_local = ABTI_xstream_get_local(p_local_xstream);
1991  ABTI_tool_event_thread_run(p_local_xstream, &p_self->thread,
1992  &p_prev->thread, p_self->thread.p_parent);
1993 
1994  } else if ((p_self->thread.p_pool != p_ythread->thread.p_pool) &&
1995  (access == ABT_POOL_ACCESS_PRIV ||
1996  access == ABT_POOL_ACCESS_SPSC)) {
1997  /* FIXME: once we change the suspend/resume mechanism (i.e., asking the
1998  * scheduler to wake up the blocked ULT), we will be able to handle all
1999  * access modes. */
2000  thread_join_yield_ythread(&p_local_xstream, p_self, p_ythread);
2001  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2002  return;
2003 
2004  } else {
2005  /* Tell p_ythread that there has been a join request. */
2006  /* If request already has ABTI_THREAD_REQ_JOIN, p_ythread is
2007  * terminating. We can't block p_self in this case. */
2008  uint32_t req = ABTD_atomic_fetch_or_uint32(&p_ythread->thread.request,
2009  ABTI_THREAD_REQ_JOIN);
2010  if (req & ABTI_THREAD_REQ_JOIN) {
2011  thread_join_yield_ythread(&p_local_xstream, p_self, p_ythread);
2012  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2013  return;
2014  }
2015 
2016  ABTI_ythread_set_blocked(p_self);
2017  LOG_DEBUG("[U%" PRIu64 ":E%d] blocked to join U%" PRIu64 "\n",
2018  ABTI_thread_get_id(&p_self->thread),
2019  p_self->thread.p_last_xstream->rank,
2020  ABTI_thread_get_id(&p_ythread->thread));
2021 
2022  /* Set the link in the context of the target ULT. This p_link might be
2023  * read by p_ythread running on another ES in parallel, so release-store
2024  * is needed here. */
2025  ABTD_atomic_release_store_ythread_context_ptr(&p_ythread->ctx.p_link,
2026  &p_self->ctx);
2027 
2028  /* Suspend the current ULT */
2029  ABTI_ythread_suspend(&p_local_xstream, p_self,
2031  (void *)p_ythread);
2032  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2033  }
2034 
2035  /* Resume */
2036  /* If p_self's state is BLOCKED, the target ULT has terminated on the same
2037  * ES as p_self's ES and the control has come from the target ULT.
2038  * Otherwise, the target ULT had been migrated to a different ES, p_self
2039  * has been resumed by p_self's scheduler. In the latter case, we don't
2040  * need to change p_self's state. */
2041  if (ABTD_atomic_relaxed_load_int(&p_self->thread.state) ==
2043  ABTD_atomic_release_store_int(&p_self->thread.state,
2045  ABTI_pool_dec_num_blocked(p_self->thread.p_pool);
2046  LOG_DEBUG("[U%" PRIu64 ":E%d] resume after join\n",
2047  ABTI_thread_get_id(&p_self->thread),
2048  p_self->thread.p_last_xstream->rank);
2049  ABTI_tool_event_thread_join(*pp_local, p_thread, &p_self->thread);
2050  } else {
2051  /* Use a yield-based method. */
2052  thread_join_yield_ythread(&p_local_xstream, p_self, p_ythread);
2053  *pp_local = ABTI_xstream_get_local(p_local_xstream);
2054  return;
2055  }
2056 }
2057 
2058 static void thread_root_func(void *arg)
2059 {
2060  /* root thread is working on a special context, so it should not rely on
2061  * functionality that needs yield. */
2062  ABTI_local *p_local = ABTI_local_get_local();
2063  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
2064  ABTI_ASSERT(ABTD_atomic_relaxed_load_int(&p_local_xstream->state) ==
2066 
2067  ABTI_ythread *p_root_ythread = p_local_xstream->p_root_ythread;
2068  p_local_xstream->p_thread = &p_root_ythread->thread;
2069  ABTI_pool *p_root_pool = p_local_xstream->p_root_pool;
2070 
2071  do {
2072  ABT_unit unit = ABTI_pool_pop(p_root_pool);
2073  if (unit != ABT_UNIT_NULL) {
2074  ABTI_xstream *p_xstream = p_local_xstream;
2075  ABTI_xstream_run_unit(&p_xstream, unit, p_root_pool);
2076  /* The root thread must be executed on the same execution stream. */
2077  ABTI_ASSERT(p_xstream == p_local_xstream);
2078  }
2079  } while (ABTD_atomic_acquire_load_int(
2080  &p_local_xstream->p_main_sched->p_ythread->thread.state) !=
2082  /* The main scheduler thread finishes. */
2083 
2084  /* Set the ES's state as TERMINATED */
2085  ABTD_atomic_release_store_int(&p_local_xstream->state,
2087 
2088  if (p_local_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) {
2089  /* Let us jump back to the main thread (then finalize Argobots) */
2090  ABTD_ythread_finish_context(&p_root_ythread->ctx,
2091  &gp_ABTI_global->p_main_ythread->ctx);
2092  }
2093 }
2094 
2095 static void thread_main_sched_func(void *arg)
2096 {
2097  ABTI_local *p_local = ABTI_local_get_local();
2098  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream(p_local);
2099 
2100  while (1) {
2101  /* Execute the run function of scheduler */
2102  ABTI_sched *p_sched = p_local_xstream->p_main_sched;
2103  ABTI_ASSERT(p_local_xstream->p_thread == &p_sched->p_ythread->thread);
2104 
2105  LOG_DEBUG("[S%" PRIu64 "] start\n", p_sched->id);
2106  p_sched->run(ABTI_sched_get_handle(p_sched));
2107  /* From here the main scheduler can have been already replaced. */
2108  /* The main scheduler must be executed on the same execution stream. */
2109  ABTI_ASSERT(p_local == ABTI_local_get_local_uninlined());
2110  LOG_DEBUG("[S%" PRIu64 "] end\n", p_sched->id);
2111 
2112  p_sched = p_local_xstream->p_main_sched;
2113  uint32_t request = ABTD_atomic_acquire_load_uint32(
2114  &p_sched->p_ythread->thread.request);
2115 
2116  /* If there is an exit or a cancel request, the ES terminates
2117  * regardless of remaining work units. */
2118  if (request & (ABTI_THREAD_REQ_TERMINATE | ABTI_THREAD_REQ_CANCEL))
2119  break;
2120 
2121  /* When join is requested, the ES terminates after finishing
2122  * execution of all work units. */
2123  if ((ABTD_atomic_relaxed_load_uint32(&p_sched->request) &
2124  ABTI_SCHED_REQ_FINISH) &&
2125  ABTI_sched_get_effective_size(p_local, p_sched) == 0) {
2126  break;
2127  }
2128  }
2129  /* Finish this thread and goes back to the root thread. */
2130 }
2131 
2132 #ifndef ABT_CONFIG_DISABLE_MIGRATION
2133 ABTU_ret_err static int thread_migrate_to_xstream(ABTI_local **pp_local,
2134  ABTI_thread *p_thread,
2135  ABTI_xstream *p_xstream)
2136 {
2137  /* checking for cases when migration is not allowed */
2138  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_xstream->state) !=
2141  ABTI_CHECK_TRUE(!(p_thread->type &
2142  (ABTI_THREAD_TYPE_MAIN | ABTI_THREAD_TYPE_MAIN_SCHED)),
2144  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_thread->state) !=
2147 
2148  /* We need to find the target scheduler */
2149  /* We check the state of the ES */
2150  ABTI_CHECK_TRUE(ABTD_atomic_acquire_load_int(&p_xstream->state) !=
2153  /* The migration target should be the main scheduler since it is
2154  * hard to guarantee the lifetime of the stackable scheduler. */
2155  ABTI_sched *p_sched = p_xstream->p_main_sched;
2156 
2157  /* We check the state of the sched */
2158  /* Find a pool */
2159  ABTI_pool *p_pool = NULL;
2160  int abt_errno;
2161  abt_errno =
2162  ABTI_sched_get_migration_pool(p_sched, p_thread->p_pool, &p_pool);
2163  ABTI_CHECK_ERROR(abt_errno);
2164  /* We set the migration counter to prevent the scheduler from
2165  * stopping */
2166  ABTI_pool_inc_num_migrations(p_pool);
2167 
2168  abt_errno = thread_migrate_to_pool(pp_local, p_thread, p_pool);
2169  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
2170  ABTI_pool_dec_num_migrations(p_pool);
2171  return abt_errno;
2172  }
2173  return ABT_SUCCESS;
2174 }
2175 #endif
2176 
2177 static inline ABT_unit_id thread_get_new_id(void)
2178 {
2179  return (ABT_unit_id)ABTD_atomic_fetch_add_uint64(&g_thread_id, 1);
2180 }
int ABT_thread_set_specific(ABT_thread thread, ABT_key key, void *value) ABT_API_PUBLIC
Set the ULT-specific value associated with the key.
Definition: thread.c:1225
struct ABT_thread_attr_opaque * ABT_thread_attr
Definition: abt.h:345
int ABT_thread_exit(void) ABT_API_PUBLIC
The calling ULT terminates its execution.
Definition: thread.c:409
int ABT_thread_get_last_pool_id(ABT_thread thread, int *id) ABT_API_PUBLIC
Get the last pool&#39;s ID of the ULT.
Definition: thread.c:577
struct ABT_unit_opaque * ABT_unit
Definition: abt.h:337
int ABT_thread_get_state(ABT_thread thread, ABT_thread_state *state) ABT_API_PUBLIC
Return the state of thread.
Definition: thread.c:533
uint64_t ABT_unit_id
Definition: abt.h:341
struct ABT_xstream_opaque * ABT_xstream
Definition: abt.h:313
struct ABT_sched_opaque * ABT_sched
Definition: abt.h:319
int ABT_thread_join_many(int num_threads, ABT_thread *thread_list) ABT_API_PUBLIC
Wait for a number of ULTs to terminate.
Definition: thread.c:386
static void thread_key_destructor_stackable_sched(void *p_value)
Definition: thread.c:1840
#define ABTU_unreachable()
Definition: abtu.h:25
int ABT_thread_cancel(ABT_thread thread) ABT_API_PUBLIC
Request the cancellation of the target thread.
Definition: thread.c:427
#define ABTU_unlikely(cond)
Definition: abtu.h:18
static ABTU_ret_err int thread_migrate_to_xstream(ABTI_local **pp_local, ABTI_thread *p_thread, ABTI_xstream *p_xstream)
Definition: thread.c:2133
static ABTI_key g_thread_mig_data_key
Definition: thread.c:33
static ABTD_atomic_uint64 g_thread_id
Definition: thread.c:1593
#define ABT_UNIT_NULL
Definition: abt.h:415
struct ABT_thread_opaque * ABT_task
Definition: abt.h:353
#define ABT_ERR_INV_THREAD
Definition: abt.h:80
int ABT_thread_free(ABT_thread *thread) ABT_API_PUBLIC
Release the thread object associated with thread handle.
Definition: thread.c:284
int ABT_thread_get_attr(ABT_thread thread, ABT_thread_attr *attr) ABT_API_PUBLIC
Get attributes of the target ULT.
Definition: thread.c:1285
int ABT_thread_resume(ABT_thread thread) ABT_API_PUBLIC
Resume the target ULT.
Definition: thread.c:750
static void thread_root_func(void *arg)
Definition: thread.c:2058
int ABT_bool
Definition: abt.h:373
static void thread_key_destructor_migration(void *p_value)
Definition: thread.c:1852
int ABT_thread_migrate(ABT_thread thread) ABT_API_PUBLIC
Request migration of the thread to an any available ES.
Definition: thread.c:923
static void thread_free(ABTI_local *p_local, ABTI_thread *p_thread, ABT_bool free_unit)
Definition: thread.c:1814
int ABT_thread_migrate_to_xstream(ABT_thread thread, ABT_xstream xstream) ABT_API_PUBLIC
Migrate a thread to a specific ES.
Definition: thread.c:791
struct ABT_pool_opaque * ABT_pool
Definition: abt.h:329
static void thread_join(ABTI_local **pp_local, ABTI_thread *p_thread)
Definition: thread.c:1892
int ABT_thread_create(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread) ABT_API_PUBLIC
Create a new thread and return its handle through newthread.
Definition: thread.c:63
int ABT_thread_set_callback(ABT_thread thread, void(*cb_func)(ABT_thread thread, void *cb_arg), void *cb_arg) ABT_API_PUBLIC
Set the callback function.
Definition: thread.c:973
#define ABT_ERR_THREAD
Definition: abt.h:101
static ABTU_ret_err int ythread_create(ABTI_local *p_local, ABTI_pool *p_pool, void(*thread_func)(void *), void *arg, ABTI_thread_attr *p_attr, ABTI_thread_type thread_type, ABTI_sched *p_sched, ABT_bool push_pool, ABTI_ythread **pp_newthread)
Definition: thread.c:1616
struct ABT_key_opaque * ABT_key
Definition: abt.h:355
#define ABT_FALSE
Definition: abt.h:285
int ABT_thread_yield(void) ABT_API_PUBLIC
Yield the processor from the current running ULT back to the scheduler.
Definition: thread.c:723
int ABT_thread_get_last_pool(ABT_thread thread, ABT_pool *pool) ABT_API_PUBLIC
Return the last pool of ULT.
Definition: thread.c:554
struct ABT_thread_opaque * ABT_thread
Definition: abt.h:343
int ABT_thread_free_many(int num, ABT_thread *thread_list) ABT_API_PUBLIC
Release a set of ULT objects.
Definition: thread.c:329
int ABT_thread_get_stacksize(ABT_thread thread, size_t *stacksize) ABT_API_PUBLIC
Get the ULT&#39;s stack size.
Definition: thread.c:1139
int ABT_thread_is_migratable(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Get the ULT&#39;s migratability.
Definition: thread.c:1043
ABTI_global * gp_ABTI_global
Definition: global.c:18
#define ABT_SUCCESS
Definition: abt.h:64
ABT_pool_access
Definition: abt.h:161
static void thread_main_sched_func(void *arg)
Definition: thread.c:2095
int ABT_thread_is_primary(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Check if the target ULT is the primary ULT.
Definition: thread.c:1073
static void thread_join_busywait(ABTI_thread *p_thread)
Definition: thread.c:1858
#define ABT_TRUE
Definition: abt.h:284
int ABT_thread_create_many(int num, ABT_pool *pool_list, void(**thread_func_list)(void *), void **arg_list, ABT_thread_attr attr, ABT_thread *newthread_list) ABT_API_PUBLIC
Create a set of ULTs.
Definition: thread.c:178
int ABT_thread_set_migratable(ABT_thread thread, ABT_bool flag) ABT_API_PUBLIC
Set the ULT&#39;s migratability.
Definition: thread.c:1009
#define ABT_THREAD_ATTR_NULL
Definition: abt.h:417
int ABT_thread_join(ABT_thread thread) ABT_API_PUBLIC
Wait for thread to terminate.
Definition: thread.c:353
int ABT_thread_self_id(ABT_unit_id *id) ABT_API_PUBLIC
Return the calling ULT&#39;s ID.
Definition: thread.c:493
int ABT_thread_migrate_to_pool(ABT_thread thread, ABT_pool pool) ABT_API_PUBLIC
Migrate a thread to a specific pool.
Definition: thread.c:883
int ABT_thread_set_associated_pool(ABT_thread thread, ABT_pool pool) ABT_API_PUBLIC
Set the associated pool for the target ULT.
Definition: thread.c:604
#define ABT_THREAD_NULL
Definition: abt.h:416
#define ABTU_noreturn
Definition: abtu.h:31
static ABT_unit_id thread_get_new_id(void)
Definition: thread.c:2177
int ABT_thread_set_arg(ABT_thread thread, void *arg) ABT_API_PUBLIC
Set the argument for the ULT function.
Definition: thread.c:1181
#define ABT_ERR_FEATURE_NA
Definition: abt.h:116
#define ABT_ERR_MIGRATION_NA
Definition: abt.h:114
static void thread_join_yield_task(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self, ABTI_thread *p_task)
Definition: thread.c:1880
static ABTI_key g_thread_sched_key
Definition: thread.c:29
int ABT_thread_get_arg(ABT_thread thread, void **arg) ABT_API_PUBLIC
Retrieve the argument for the ULT function.
Definition: thread.c:1203
#define ABT_ERR_MIGRATION_TARGET
Definition: abt.h:113
#define LOG_DEBUG(fmt,...)
Definition: abti_log.h:26
int ABT_thread_create_on_xstream(ABT_xstream xstream, void(*thread_func)(void *), void *arg, ABT_thread_attr attr, ABT_thread *newthread) ABT_API_PUBLIC
Create a new ULT associated with the target ES (xstream).
Definition: thread.c:127
int ABT_thread_yield_to(ABT_thread thread) ABT_API_PUBLIC
Yield the processor from the current running thread to the specific thread.
Definition: thread.c:629
static void thread_join_yield_ythread(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self, ABTI_ythread *p_ythread)
Definition: thread.c:1867
int ABT_thread_is_unnamed(ABT_thread thread, ABT_bool *flag) ABT_API_PUBLIC
Check if the target ULT is unnamed.
Definition: thread.c:1096
int ABT_thread_get_last_xstream(ABT_thread thread, ABT_xstream *xstream) ABT_API_PUBLIC
Get the ES associated with the target thread.
Definition: thread.c:515
ABT_thread_state
Definition: abt.h:125
int ABT_thread_get_id(ABT_thread thread, ABT_unit_id *thread_id) ABT_API_PUBLIC
Get the ULT&#39;s id.
Definition: thread.c:1161
int ABT_thread_get_specific(ABT_thread thread, ABT_key key, void **value) ABT_API_PUBLIC
Get the ULT-specific value associated with the key.
Definition: thread.c:1257
#define ABT_ERR_INV_XSTREAM
Definition: abt.h:68
int ABT_thread_revive(ABT_pool pool, void(*thread_func)(void *), void *arg, ABT_thread *thread) ABT_API_PUBLIC
Revive the ULT.
Definition: thread.c:251
#define ABT_ERR_INV_THREAD_ATTR
Definition: abt.h:81
int ABT_thread_equal(ABT_thread thread1, ABT_thread thread2, ABT_bool *result) ABT_API_PUBLIC
Compare two ULT handles for equality.
Definition: thread.c:1120
static ABTU_ret_err int ABTU_calloc(size_t num, size_t size, void **p_ptr)
Definition: abtu.h:139
static void ABTU_free(void *ptr)
Definition: abtu.h:122
static ABTU_ret_err int thread_migrate_to_pool(ABTI_local **p_local, ABTI_thread *p_thread, ABTI_pool *p_pool)
Definition: thread.c:1771
int ABT_thread_self(ABT_thread *thread) ABT_API_PUBLIC
Return the handle of the calling ULT.
Definition: thread.c:464
#define ABTU_ret_err
Definition: abtu.h:49
int ABT_thread_migrate_to_sched(ABT_thread thread, ABT_sched sched) ABT_API_PUBLIC
Migrate a thread to a specific scheduler.
Definition: thread.c:830