ARGOBOTS  dce6e727ffc4ca5b3ffc04cb9517c6689be51ec5
abti_ythread.h
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #ifndef ABTI_YTHREAD_H_INCLUDED
7 #define ABTI_YTHREAD_H_INCLUDED
8 
9 /* Inlined functions for yieldable threads */
10 
11 static inline ABTI_ythread *ABTI_ythread_get_ptr(ABT_thread thread)
12 {
13 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK
14  ABTI_ythread *p_ythread;
15  if (thread == ABT_THREAD_NULL) {
16  p_ythread = NULL;
17  } else {
18  p_ythread = (ABTI_ythread *)thread;
19  }
20  return p_ythread;
21 #else
22  return (ABTI_ythread *)thread;
23 #endif
24 }
25 
26 static inline ABT_thread ABTI_ythread_get_handle(ABTI_ythread *p_ythread)
27 {
28 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK
29  ABT_thread h_thread;
30  if (p_ythread == NULL) {
31  h_thread = ABT_THREAD_NULL;
32  } else {
33  h_thread = (ABT_thread)p_ythread;
34  }
35  return h_thread;
36 #else
37  return (ABT_thread)p_ythread;
38 #endif
39 }
40 
41 static inline void ABTI_ythread_resume_and_push(ABTI_local *p_local,
42  ABTI_ythread *p_ythread)
43 {
44  /* The ULT must be in BLOCKED state. */
45  ABTI_ASSERT(ABTD_atomic_acquire_load_int(&p_ythread->thread.state) ==
47 
48  ABTI_event_ythread_resume(p_local, p_ythread,
49  ABTI_local_get_xstream_or_null(p_local)
50  ? ABTI_local_get_xstream(p_local)->p_thread
51  : NULL);
52  /* p_ythread->thread.p_pool is loaded before ABTI_POOL_ADD_THREAD to keep
53  * num_blocked consistent. Otherwise, other threads might pop p_ythread
54  * that has been pushed in ABTI_POOL_ADD_THREAD and change
55  * p_ythread->thread.p_pool by ABT_unit_set_associated_pool. */
56  ABTI_pool *p_pool = p_ythread->thread.p_pool;
57 
58  /* Add the ULT to its associated pool */
59  ABTI_pool_add_thread(&p_ythread->thread, ABT_POOL_CONTEXT_OP_THREAD_RESUME);
60 
61  /* Decrease the number of blocked threads */
62  ABTI_pool_dec_num_blocked(p_pool);
63 }
64 
65 static inline ABTI_ythread *
66 ABTI_ythread_context_get_ythread(ABTD_ythread_context *p_ctx)
67 {
68  return (ABTI_ythread *)(((char *)p_ctx) - offsetof(ABTI_ythread, ctx));
69 }
70 
71 ABTU_noreturn static inline void
72 ABTI_ythread_context_jump(ABTI_xstream *p_local_xstream, ABTI_ythread *p_new)
73 {
74  if (ABTD_ythread_context_is_started(&p_new->ctx)) {
75  ABTD_ythread_context_jump(&p_new->ctx);
76  } else {
77  if (!ABTD_ythread_context_has_stack(&p_new->ctx)) {
78  int ret =
79  ABTI_mem_alloc_ythread_mempool_stack(p_local_xstream, p_new);
80  /* FIXME: this error should be propagated to the caller. */
81  ABTI_ASSERT(ret == ABT_SUCCESS);
82  }
83  ABTD_ythread_context_start_and_jump(&p_new->ctx);
84  }
86 }
87 
88 static inline void ABTI_ythread_context_switch(ABTI_xstream *p_local_xstream,
89  ABTI_ythread *p_old,
90  ABTI_ythread *p_new)
91 {
92  if (ABTD_ythread_context_is_started(&p_new->ctx)) {
93  ABTD_ythread_context_switch(&p_old->ctx, &p_new->ctx);
94  } else {
95  if (!ABTD_ythread_context_has_stack(&p_new->ctx)) {
96  int ret =
97  ABTI_mem_alloc_ythread_mempool_stack(p_local_xstream, p_new);
98  /* FIXME: this error should be propagated to the caller. */
99  ABTI_ASSERT(ret == ABT_SUCCESS);
100  }
101  ABTD_ythread_context_start_and_switch(&p_old->ctx, &p_new->ctx);
102  }
103 }
104 
105 ABTU_noreturn static inline void
106 ABTI_ythread_context_jump_with_call(ABTI_xstream *p_local_xstream,
107  ABTI_ythread *p_new, void (*f_cb)(void *),
108  void *cb_arg)
109 {
110  if (ABTD_ythread_context_is_started(&p_new->ctx)) {
111  ABTD_ythread_context_jump_with_call(&p_new->ctx, f_cb, cb_arg);
112  } else {
113  if (!ABTD_ythread_context_has_stack(&p_new->ctx)) {
114  int ret =
115  ABTI_mem_alloc_ythread_mempool_stack(p_local_xstream, p_new);
116  /* FIXME: this error should be propagated to the caller. */
117  ABTI_ASSERT(ret == ABT_SUCCESS);
118  }
119  ABTD_ythread_context_start_and_jump_with_call(&p_new->ctx, f_cb,
120  cb_arg);
121  }
123 }
124 
125 static inline void
126 ABTI_ythread_context_switch_with_call(ABTI_xstream *p_local_xstream,
127  ABTI_ythread *p_old, ABTI_ythread *p_new,
128  void (*f_cb)(void *), void *cb_arg)
129 {
130  if (ABTD_ythread_context_is_started(&p_new->ctx)) {
131  ABTD_ythread_context_switch_with_call(&p_old->ctx, &p_new->ctx, f_cb,
132  cb_arg);
133  } else {
134  if (!ABTD_ythread_context_has_stack(&p_new->ctx)) {
135  int ret =
136  ABTI_mem_alloc_ythread_mempool_stack(p_local_xstream, p_new);
137  /* FIXME: this error should be propagated to the caller. */
138  ABTI_ASSERT(ret == ABT_SUCCESS);
139  }
140  ABTD_ythread_context_start_and_switch_with_call(&p_old->ctx,
141  &p_new->ctx, f_cb,
142  cb_arg);
143  }
144 }
145 
146 static inline void
147 ABTI_ythread_switch_to_child_internal(ABTI_xstream **pp_local_xstream,
148  ABTI_ythread *p_old, ABTI_ythread *p_new)
149 {
150  p_new->thread.p_parent = &p_old->thread;
151  ABTI_xstream *p_local_xstream = *pp_local_xstream;
152  ABTI_event_thread_run(p_local_xstream, &p_new->thread, &p_old->thread,
153  p_new->thread.p_parent);
154  p_local_xstream->p_thread = &p_new->thread;
155  p_new->thread.p_last_xstream = p_local_xstream;
156  /* Context switch starts. */
157  ABTI_ythread_context_switch(p_local_xstream, p_old, p_new);
158  /* Context switch finishes. */
159  *pp_local_xstream = p_old->thread.p_last_xstream;
160 }
161 
162 ABTU_noreturn static inline void
163 ABTI_ythread_jump_to_sibling_internal(ABTI_xstream *p_local_xstream,
164  ABTI_ythread *p_old, ABTI_ythread *p_new,
165  void (*f_cb)(void *), void *cb_arg)
166 {
167  p_new->thread.p_parent = p_old->thread.p_parent;
168  ABTI_event_thread_run(p_local_xstream, &p_new->thread, &p_old->thread,
169  p_new->thread.p_parent);
170  p_local_xstream->p_thread = &p_new->thread;
171  p_new->thread.p_last_xstream = p_local_xstream;
172  ABTI_ythread_context_jump_with_call(p_local_xstream, p_new, f_cb, cb_arg);
174 }
175 
176 static inline void ABTI_ythread_switch_to_sibling_internal(
177  ABTI_xstream **pp_local_xstream, ABTI_ythread *p_old, ABTI_ythread *p_new,
178  void (*f_cb)(void *), void *cb_arg)
179 {
180  p_new->thread.p_parent = p_old->thread.p_parent;
181  ABTI_xstream *p_local_xstream = *pp_local_xstream;
182  ABTI_event_thread_run(p_local_xstream, &p_new->thread, &p_old->thread,
183  p_new->thread.p_parent);
184  p_local_xstream->p_thread = &p_new->thread;
185  p_new->thread.p_last_xstream = p_local_xstream;
186  /* Context switch starts. */
187  ABTI_ythread_context_switch_with_call(p_local_xstream, p_old, p_new, f_cb,
188  cb_arg);
189  /* Context switch finishes. */
190  *pp_local_xstream = p_old->thread.p_last_xstream;
191 }
192 
193 ABTU_noreturn static inline void
194 ABTI_ythread_jump_to_parent_internal(ABTI_xstream *p_local_xstream,
195  ABTI_ythread *p_old, void (*f_cb)(void *),
196  void *cb_arg)
197 {
198  ABTI_ythread *p_new = ABTI_thread_get_ythread(p_old->thread.p_parent);
199  p_local_xstream->p_thread = &p_new->thread;
200  ABTI_ASSERT(p_new->thread.p_last_xstream == p_local_xstream);
201  ABTI_ythread_context_jump_with_call(p_local_xstream, p_new, f_cb, cb_arg);
203 }
204 
205 static inline void
206 ABTI_ythread_switch_to_parent_internal(ABTI_xstream **pp_local_xstream,
207  ABTI_ythread *p_old,
208  void (*f_cb)(void *), void *cb_arg)
209 {
210  ABTI_ythread *p_new = ABTI_thread_get_ythread(p_old->thread.p_parent);
211  ABTI_xstream *p_local_xstream = *pp_local_xstream;
212  p_local_xstream->p_thread = &p_new->thread;
213  ABTI_ASSERT(p_new->thread.p_last_xstream == p_local_xstream);
214  /* Context switch starts. */
215  ABTI_ythread_context_switch_with_call(p_local_xstream, p_old, p_new, f_cb,
216  cb_arg);
217  /* Context switch finishes. */
218  *pp_local_xstream = p_old->thread.p_last_xstream;
219 }
220 
221 static inline ABT_bool ABTI_ythread_context_peek(ABTI_ythread *p_ythread,
222  void (*f_peek)(void *),
223  void *arg)
224 {
225  return ABTD_ythread_context_peek(&p_ythread->ctx, f_peek, arg);
226 }
227 
228 static inline void ABTI_ythread_run_child(ABTI_xstream **pp_local_xstream,
229  ABTI_ythread *p_self,
230  ABTI_ythread *p_child)
231 {
232  ABTD_atomic_release_store_int(&p_child->thread.state,
234  ABTI_ythread_switch_to_child_internal(pp_local_xstream, p_self, p_child);
235 }
236 
237 typedef enum {
238  ABTI_YTHREAD_YIELD_KIND_USER,
239  ABTI_YTHREAD_YIELD_KIND_YIELD_LOOP,
240 } ABTI_ythread_yield_kind;
241 
242 typedef enum {
243  ABTI_YTHREAD_YIELD_TO_KIND_USER,
244  ABTI_YTHREAD_YIELD_TO_KIND_CREATE_TO,
245  ABTI_YTHREAD_YIELD_TO_KIND_REVIVE_TO,
246 } ABTI_ythread_yield_to_kind;
247 
248 void ABTI_ythread_callback_yield_user_yield(void *arg);
249 void ABTI_ythread_callback_yield_loop(void *arg);
250 void ABTI_ythread_callback_yield_user_yield_to(void *arg);
251 void ABTI_ythread_callback_yield_create_to(void *arg);
252 void ABTI_ythread_callback_yield_revive_to(void *arg);
253 
254 static inline void ABTI_ythread_yield(ABTI_xstream **pp_local_xstream,
255  ABTI_ythread *p_self,
256  ABTI_ythread_yield_kind kind,
257  ABT_sync_event_type sync_event_type,
258  void *p_sync)
259 {
260  ABTI_event_ythread_yield(*pp_local_xstream, p_self, p_self->thread.p_parent,
261  sync_event_type, p_sync);
262  if (kind == ABTI_YTHREAD_YIELD_KIND_USER) {
263  ABTI_ythread_switch_to_parent_internal(
264  pp_local_xstream, p_self, ABTI_ythread_callback_yield_user_yield,
265  (void *)p_self);
266  } else {
267  ABTI_UB_ASSERT(kind == ABTI_YTHREAD_YIELD_KIND_YIELD_LOOP);
268  ABTI_ythread_switch_to_parent_internal(pp_local_xstream, p_self,
269  ABTI_ythread_callback_yield_loop,
270  (void *)p_self);
271  }
272 }
273 
274 static inline void
275 ABTI_ythread_yield_to(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self,
276  ABTI_ythread *p_target, ABTI_ythread_yield_to_kind kind,
277  ABT_sync_event_type sync_event_type, void *p_sync)
278 {
279  ABTI_event_ythread_yield(*pp_local_xstream, p_self, p_self->thread.p_parent,
280  sync_event_type, p_sync);
281  ABTD_atomic_release_store_int(&p_target->thread.state,
283  if (kind == ABTI_YTHREAD_YIELD_TO_KIND_USER) {
284  ABTI_ythread_switch_to_sibling_internal(
285  pp_local_xstream, p_self, p_target,
286  ABTI_ythread_callback_yield_user_yield_to, (void *)p_self);
287  } else if (kind == ABTI_YTHREAD_YIELD_TO_KIND_CREATE_TO) {
288  ABTI_ythread_switch_to_sibling_internal(
289  pp_local_xstream, p_self, p_target,
290  ABTI_ythread_callback_yield_create_to, (void *)p_self);
291  } else {
292  ABTI_UB_ASSERT(kind == ABTI_YTHREAD_YIELD_TO_KIND_REVIVE_TO);
293  ABTI_ythread_switch_to_sibling_internal(
294  pp_local_xstream, p_self, p_target,
295  ABTI_ythread_callback_yield_revive_to, (void *)p_self);
296  }
297 }
298 
299 /* Old interface used for ABT_thread_yield_to() */
300 void ABTI_ythread_callback_thread_yield_to(void *arg);
301 
302 static inline void
303 ABTI_ythread_thread_yield_to(ABTI_xstream **pp_local_xstream,
304  ABTI_ythread *p_self, ABTI_ythread *p_target,
305  ABT_sync_event_type sync_event_type, void *p_sync)
306 {
307  ABTI_event_ythread_yield(*pp_local_xstream, p_self, p_self->thread.p_parent,
308  sync_event_type, p_sync);
309  ABTD_atomic_release_store_int(&p_target->thread.state,
311 
312  ABTI_ythread_switch_to_sibling_internal(
313  pp_local_xstream, p_self, p_target,
314  ABTI_ythread_callback_thread_yield_to, (void *)p_self);
315 }
316 
317 typedef struct {
318  ABTI_ythread *p_prev;
319  ABTI_ythread *p_next;
320 } ABTI_ythread_callback_resume_yield_to_arg;
321 
322 void ABTI_ythread_callback_resume_yield_to(void *arg);
323 
324 typedef enum {
325  ABTI_YTHREAD_RESUME_YIELD_TO_KIND_USER,
326 } ABTI_ythread_resume_yield_to_kind;
327 
328 static inline void
329 ABTI_ythread_resume_yield_to(ABTI_xstream **pp_local_xstream,
330  ABTI_ythread *p_self, ABTI_ythread *p_target,
331  ABTI_ythread_resume_yield_to_kind kind,
332  ABT_sync_event_type sync_event_type, void *p_sync)
333 {
334  /* The ULT must be in BLOCKED state. */
335  ABTI_UB_ASSERT(ABTD_atomic_acquire_load_int(&p_target->thread.state) ==
337 
338  ABTI_event_ythread_resume(ABTI_xstream_get_local(*pp_local_xstream),
339  p_target, &p_self->thread);
340  ABTI_event_ythread_yield(*pp_local_xstream, p_self, p_self->thread.p_parent,
341  sync_event_type, p_sync);
342  ABTD_atomic_release_store_int(&p_target->thread.state,
344  ABTI_UB_ASSERT(kind == ABTI_YTHREAD_RESUME_YIELD_TO_KIND_USER);
345  ABTI_ythread_callback_resume_yield_to_arg arg = { p_self, p_target };
346  ABTI_ythread_switch_to_sibling_internal(
347  pp_local_xstream, p_self, p_target,
348  ABTI_ythread_callback_resume_yield_to, (void *)&arg);
349 }
350 
351 void ABTI_ythread_callback_suspend(void *arg);
352 
353 static inline void ABTI_ythread_suspend(ABTI_xstream **pp_local_xstream,
354  ABTI_ythread *p_self,
355  ABT_sync_event_type sync_event_type,
356  void *p_sync)
357 {
358  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
359  p_self->thread.p_parent, sync_event_type,
360  p_sync);
361  ABTI_ythread_switch_to_parent_internal(pp_local_xstream, p_self,
362  ABTI_ythread_callback_suspend,
363  (void *)p_self);
364 }
365 
366 static inline void ABTI_ythread_suspend_to(ABTI_xstream **pp_local_xstream,
367  ABTI_ythread *p_self,
368  ABTI_ythread *p_target,
369  ABT_sync_event_type sync_event_type,
370  void *p_sync)
371 {
372  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
373  p_self->thread.p_parent, sync_event_type,
374  p_sync);
375  ABTI_ythread_switch_to_sibling_internal(pp_local_xstream, p_self, p_target,
376  ABTI_ythread_callback_suspend,
377  (void *)p_self);
378 }
379 
380 typedef struct {
381  ABTI_ythread *p_prev;
382  ABTI_ythread *p_next;
383 } ABTI_ythread_callback_resume_suspend_to_arg;
384 
385 void ABTI_ythread_callback_resume_suspend_to(void *arg);
386 
387 static inline void ABTI_ythread_resume_suspend_to(
388  ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self,
389  ABTI_ythread *p_target, ABT_sync_event_type sync_event_type, void *p_sync)
390 {
391  /* The ULT must be in BLOCKED state. */
392  ABTI_UB_ASSERT(ABTD_atomic_acquire_load_int(&p_target->thread.state) ==
394 
395  ABTI_event_ythread_resume(ABTI_xstream_get_local(*pp_local_xstream),
396  p_target, &p_self->thread);
397  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
398  p_self->thread.p_parent, sync_event_type,
399  p_sync);
400  ABTD_atomic_release_store_int(&p_target->thread.state,
402  ABTI_ythread_callback_resume_suspend_to_arg arg = { p_self, p_target };
403  ABTI_ythread_switch_to_sibling_internal(
404  pp_local_xstream, p_self, p_target,
405  ABTI_ythread_callback_resume_suspend_to, (void *)&arg);
406 }
407 
408 void ABTI_ythread_callback_exit(void *arg);
409 
410 static inline ABTI_ythread *
411 ABTI_ythread_atomic_get_joiner(ABTI_ythread *p_ythread)
412 {
413  ABTD_ythread_context *p_ctx = &p_ythread->ctx;
414  ABTD_ythread_context *p_link =
415  ABTD_atomic_acquire_load_ythread_context_ptr(&p_ctx->p_link);
416  if (!p_link) {
417  uint32_t req = ABTD_atomic_fetch_or_uint32(&p_ythread->thread.request,
418  ABTI_THREAD_REQ_JOIN);
419  if (!(req & ABTI_THREAD_REQ_JOIN)) {
420  /* This case means there is no join request. */
421  return NULL;
422  } else {
423  /* This case means a join request is issued and the joiner is
424  * setting p_link. Wait for it. */
425  do {
426  p_link = ABTD_atomic_acquire_load_ythread_context_ptr(
427  &p_ctx->p_link);
428  } while (!p_link);
429  return ABTI_ythread_context_get_ythread(p_link);
430  }
431  } else {
432  /* There is a join request. */
433  return ABTI_ythread_context_get_ythread(p_link);
434  }
435 }
436 
437 static inline void ABTI_ythread_resume_joiner(ABTI_xstream *p_local_xstream,
438  ABTI_ythread *p_ythread)
439 {
440  ABTI_ythread *p_joiner = ABTI_ythread_atomic_get_joiner(p_ythread);
441  if (p_joiner) {
442 #ifndef ABT_CONFIG_ACTIVE_WAIT_POLICY
443  if (p_joiner->thread.type == ABTI_THREAD_TYPE_EXT) {
444  /* p_joiner is a non-yieldable thread (i.e., external thread). Wake
445  * up the waiter via the futex. Note that p_arg is used to store
446  * futex (see thread_join_futexwait()). */
447  ABTD_futex_single *p_futex =
448  (ABTD_futex_single *)p_joiner->thread.p_arg;
449  ABTD_futex_resume(p_futex);
450  return;
451  }
452 #endif
453  /* p_joiner is a yieldable thread */
454  ABTI_ythread_resume_and_push(ABTI_xstream_get_local(p_local_xstream),
455  p_joiner);
456  }
457 }
458 
459 ABTU_noreturn static inline void
460 ABTI_ythread_exit(ABTI_xstream *p_local_xstream, ABTI_ythread *p_self)
461 {
462  ABTI_event_thread_finish(p_local_xstream, &p_self->thread,
463  p_self->thread.p_parent);
464  ABTI_ythread *p_joiner = ABTI_ythread_atomic_get_joiner(p_self);
465  if (p_joiner) {
466 #ifndef ABT_CONFIG_ACTIVE_WAIT_POLICY
467  if (p_joiner->thread.type == ABTI_THREAD_TYPE_EXT) {
468  /* p_joiner is a non-yieldable thread (i.e., external thread). Wake
469  * up the waiter via the futex. Note that p_arg is used to store
470  * futex (see thread_join_futexwait()). */
471  ABTD_futex_single *p_futex =
472  (ABTD_futex_single *)p_joiner->thread.p_arg;
473  ABTD_futex_resume(p_futex);
474  } else
475 #endif
476  if (p_self->thread.p_last_xstream ==
477  p_joiner->thread.p_last_xstream &&
478  !(p_self->thread.type & ABTI_THREAD_TYPE_MAIN_SCHED)) {
479  /* Only when the current ULT is on the same ES as p_joiner's, we can
480  * jump to the joiner ULT. Note that a parent ULT cannot be a
481  * joiner. */
482  ABTI_pool_dec_num_blocked(p_joiner->thread.p_pool);
483  ABTI_event_ythread_resume(ABTI_xstream_get_local(p_local_xstream),
484  p_joiner, &p_self->thread);
485  ABTD_atomic_release_store_int(&p_joiner->thread.state,
487  ABTI_ythread_jump_to_sibling_internal(p_local_xstream, p_self,
488  p_joiner,
489  ABTI_ythread_callback_exit,
490  (void *)p_self);
492  } else {
493  /* If the current ULT's associated ES is different from p_joiner's,
494  * we can't directly jump to p_joiner. Instead, we wake up p_joiner
495  * here so that p_joiner's scheduler can resume it. Note that the
496  * main scheduler needs to jump back to the root scheduler, so the
497  * main scheduler needs to take this path. */
498  ABTI_ythread_resume_and_push(ABTI_xstream_get_local(
499  p_local_xstream),
500  p_joiner);
501  }
502  }
503  /* The waiter has been resumed. Let's switch to the parent. */
504  ABTI_ythread_jump_to_parent_internal(p_local_xstream, p_self,
505  ABTI_ythread_callback_exit,
506  (void *)p_self);
508 }
509 
510 ABTU_noreturn static inline void
511 ABTI_ythread_exit_to(ABTI_xstream *p_local_xstream, ABTI_ythread *p_self,
512  ABTI_ythread *p_target)
513 {
514  /* If other ULT is blocked to join the canceled ULT, we have to wake up the
515  * joiner ULT. However, unlike the case when the ULT has finished its
516  * execution and calls ythread_terminate/exit, this caller of this function
517  * wants to jump to p_target. Therefore, we should not context switch to
518  * the joiner ULT. */
519  ABTI_ythread_resume_joiner(p_local_xstream, p_self);
520  ABTI_event_thread_finish(p_local_xstream, &p_self->thread,
521  p_self->thread.p_parent);
522  ABTD_atomic_release_store_int(&p_target->thread.state,
524  ABTI_ythread_jump_to_sibling_internal(p_local_xstream, p_self, p_target,
525  ABTI_ythread_callback_exit,
526  (void *)p_self);
528 }
529 
530 ABTU_noreturn static inline void ABTI_ythread_exit_to_primary(
531  ABTI_global *p_global, ABTI_xstream *p_local_xstream, ABTI_ythread *p_self)
532 {
533  /* No need to call a callback function. */
534  ABTI_ythread *p_primary = p_global->p_primary_ythread;
535  p_local_xstream->p_thread = &p_primary->thread;
536  p_primary->thread.p_last_xstream = p_local_xstream;
537  ABTD_atomic_release_store_int(&p_primary->thread.state,
539  ABTI_ythread_context_jump_with_call(p_local_xstream, p_primary,
540  ABTI_ythread_callback_exit, p_self);
542 }
543 
544 typedef struct {
545  ABTI_ythread *p_prev;
546  ABTI_ythread *p_next;
547 } ABTI_ythread_callback_resume_exit_to_arg;
548 
549 void ABTI_ythread_callback_resume_exit_to(void *arg);
550 
551 ABTU_noreturn static inline void
552 ABTI_ythread_resume_exit_to(ABTI_xstream *p_local_xstream, ABTI_ythread *p_self,
553  ABTI_ythread *p_target)
554 {
555  /* The ULT must be in BLOCKED state. */
556  ABTI_UB_ASSERT(ABTD_atomic_acquire_load_int(&p_target->thread.state) ==
558 
559  ABTI_event_ythread_resume(ABTI_xstream_get_local(p_local_xstream), p_target,
560  &p_self->thread);
561  /* Wake up a joiner ULT attached to p_self. */
562  ABTI_ythread_resume_joiner(p_local_xstream, p_self);
563  ABTI_event_thread_finish(p_local_xstream, &p_self->thread,
564  p_self->thread.p_parent);
565  ABTD_atomic_release_store_int(&p_target->thread.state,
567  ABTI_ythread_callback_resume_exit_to_arg arg = { p_self, p_target };
568  ABTI_ythread_jump_to_sibling_internal(p_local_xstream, p_self, p_target,
569  ABTI_ythread_callback_resume_exit_to,
570  (void *)&arg);
572 }
573 
574 typedef struct {
575  ABTI_ythread *p_prev;
576  ABTD_spinlock *p_lock;
577 } ABTI_ythread_callback_suspend_unlock_arg;
578 
579 void ABTI_ythread_callback_suspend_unlock(void *arg);
580 
581 static inline void
582 ABTI_ythread_suspend_unlock(ABTI_xstream **pp_local_xstream,
583  ABTI_ythread *p_self, ABTD_spinlock *p_lock,
584  ABT_sync_event_type sync_event_type, void *p_sync)
585 {
586  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
587  p_self->thread.p_parent, sync_event_type,
588  p_sync);
589  ABTI_ythread_callback_suspend_unlock_arg arg = { p_self, p_lock };
590  ABTI_ythread_switch_to_parent_internal(pp_local_xstream, p_self,
591  ABTI_ythread_callback_suspend_unlock,
592  (void *)&arg);
593 }
594 
595 typedef struct {
596  ABTI_ythread *p_prev;
597  ABTI_ythread *p_target;
598 } ABTI_ythread_callback_suspend_join_arg;
599 
600 void ABTI_ythread_callback_suspend_join(void *arg);
601 
602 static inline void
603 ABTI_ythread_suspend_join(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self,
604  ABTI_ythread *p_target,
605  ABT_sync_event_type sync_event_type, void *p_sync)
606 {
607  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
608  p_self->thread.p_parent, sync_event_type,
609  p_sync);
610  ABTI_ythread_callback_suspend_join_arg arg = { p_self, p_target };
611  ABTI_ythread_switch_to_parent_internal(pp_local_xstream, p_self,
612  ABTI_ythread_callback_suspend_join,
613  (void *)&arg);
614 }
615 
616 typedef struct {
617  ABTI_ythread *p_prev;
618  ABTI_sched *p_main_sched;
619 } ABTI_ythread_callback_suspend_replace_sched_arg;
620 
621 void ABTI_ythread_callback_suspend_replace_sched(void *arg);
622 
623 static inline void ABTI_ythread_suspend_replace_sched(
624  ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self,
625  ABTI_sched *p_main_sched, ABT_sync_event_type sync_event_type, void *p_sync)
626 {
627  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
628  p_self->thread.p_parent, sync_event_type,
629  p_sync);
630  ABTI_ythread_callback_suspend_replace_sched_arg arg = { p_self,
631  p_main_sched };
632  ABTI_ythread_switch_to_parent_internal(
633  pp_local_xstream, p_self, ABTI_ythread_callback_suspend_replace_sched,
634  (void *)&arg);
635 }
636 
637 void ABTI_ythread_callback_orphan(void *arg);
638 
639 static inline void
640 ABTI_ythread_yield_orphan(ABTI_xstream **pp_local_xstream, ABTI_ythread *p_self,
641  ABT_sync_event_type sync_event_type, void *p_sync)
642 {
643  ABTI_event_ythread_suspend(*pp_local_xstream, p_self,
644  p_self->thread.p_parent, sync_event_type,
645  p_sync);
646  ABTI_ythread_switch_to_parent_internal(pp_local_xstream, p_self,
647  ABTI_ythread_callback_orphan,
648  (void *)p_self);
649 }
650 static inline void ABTI_ythread_schedule(ABTI_global *p_global,
651  ABTI_xstream **pp_local_xstream,
652  ABTI_thread *p_thread)
653 {
654  ABTI_xstream *p_local_xstream = *pp_local_xstream;
655  const int request_op = ABTI_thread_handle_request(p_thread, ABT_TRUE);
656  if (ABTU_likely(request_op == ABTI_THREAD_HANDLE_REQUEST_NONE)) {
657  /* Execute p_thread. */
658  ABTI_ythread *p_ythread = ABTI_thread_get_ythread_or_null(p_thread);
659  if (p_ythread) {
660  /* p_thread is yieldable. Let's switch the context. Since the
661  * argument is pp_local_xstream, p_local_xstream->p_thread must be
662  * yieldable. */
663  ABTI_ythread *p_self =
664  ABTI_thread_get_ythread(p_local_xstream->p_thread);
665  ABTI_ythread_run_child(pp_local_xstream, p_self, p_ythread);
666  /* The previous ULT (p_ythread) may not be the same as one to which
667  * the context has been switched. */
668  } else {
669  /* p_thread is not yieldable. */
670  /* Change the task state */
671  ABTD_atomic_release_store_int(&p_thread->state,
673 
674  /* Set the associated ES */
675  p_thread->p_last_xstream = p_local_xstream;
676 
677  /* Execute the task function */
678  ABTI_thread *p_sched_thread = p_local_xstream->p_thread;
679  p_local_xstream->p_thread = p_thread;
680  p_thread->p_parent = p_sched_thread;
681 
682  /* Execute the task function */
683  ABTI_event_thread_run(p_local_xstream, p_thread, p_sched_thread,
684  p_sched_thread);
685  p_thread->f_thread(p_thread->p_arg);
686  ABTI_event_thread_finish(p_local_xstream, p_thread, p_sched_thread);
687 
688  /* Set the current running scheduler's thread */
689  p_local_xstream->p_thread = p_sched_thread;
690 
691  /* Terminate the tasklet */
692  ABTI_thread_terminate(p_global, p_local_xstream, p_thread);
693  }
694  } else if (request_op == ABTI_THREAD_HANDLE_REQUEST_CANCELLED) {
695  /* If p_thread is cancelled, there's nothing to do. */
696  } else if (request_op == ABTI_THREAD_HANDLE_REQUEST_MIGRATED) {
697  /* If p_thread is migrated, let's push p_thread back to its pool. */
698  ABTI_pool_add_thread(p_thread, ABT_POOL_CONTEXT_OP_THREAD_MIGRATE);
699  }
700 }
701 
702 #endif /* ABTI_YTHREAD_H_INCLUDED */
ABT_bool
int ABT_bool
Boolean type.
Definition: abt.h:1043
ABT_thread
struct ABT_thread_opaque * ABT_thread
Work unit handle type.
Definition: abt.h:932
ABTU_noreturn
#define ABTU_noreturn
Definition: abtu.h:127
ABT_THREAD_NULL
#define ABT_THREAD_NULL
Definition: abt.h:1105
ABTU_likely
#define ABTU_likely(cond)
Definition: abtu.h:119
ABTU_unreachable
#define ABTU_unreachable()
Definition: abtu.h:133
ABT_sync_event_type
ABT_sync_event_type
Type of synchronization event.
Definition: abt.h:696
ABT_THREAD_STATE_BLOCKED
@ ABT_THREAD_STATE_BLOCKED
Definition: abt.h:431
ABT_POOL_CONTEXT_OP_THREAD_MIGRATE
#define ABT_POOL_CONTEXT_OP_THREAD_MIGRATE
A flag that hints a push operation for thread migration.
Definition: abt.h:1779
ABT_POOL_CONTEXT_OP_THREAD_RESUME
#define ABT_POOL_CONTEXT_OP_THREAD_RESUME
A flag that hints a push operation in a thread resume routine.
Definition: abt.h:1768
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
ABT_TRUE
#define ABT_TRUE
True constant for ABT_bool.
Definition: abt.h:784
ABT_THREAD_STATE_RUNNING
@ ABT_THREAD_STATE_RUNNING
Definition: abt.h:429