ARGOBOTS  dce6e727ffc4ca5b3ffc04cb9517c6689be51ec5
ythread.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 #ifdef ABT_CONFIG_ENABLE_STACK_UNWIND
9 #define UNW_LOCAL_ONLY
10 #include <libunwind.h>
11 struct unwind_stack_t {
12  FILE *fp;
13 };
14 static void ythread_unwind_stack(void *arg);
15 #endif
16 
17 /*****************************************************************************/
18 /* Private APIs */
19 /*****************************************************************************/
20 
21 static inline void ythread_callback_yield_impl(void *arg,
22  ABT_pool_context context)
23 {
24  ABTI_ythread *p_prev = (ABTI_ythread *)arg;
25  if (ABTI_thread_handle_request(&p_prev->thread, ABT_TRUE) &
26  ABTI_THREAD_HANDLE_REQUEST_CANCELLED) {
27  /* p_prev is terminated. */
28  } else {
29  /* Push p_prev back to the pool. */
30  ABTI_pool_add_thread(&p_prev->thread, context);
31  }
32 }
33 
34 void ABTI_ythread_callback_yield_user_yield(void *arg)
35 {
37 }
38 
39 void ABTI_ythread_callback_yield_loop(void *arg)
40 {
42 }
43 
44 void ABTI_ythread_callback_yield_user_yield_to(void *arg)
45 {
47 }
48 
49 void ABTI_ythread_callback_yield_create_to(void *arg)
50 {
52 }
53 
54 void ABTI_ythread_callback_yield_revive_to(void *arg)
55 {
57 }
58 
59 /* Before yield_to, p_prev->thread.p_pool's num_blocked must be incremented to
60  * avoid making a pool empty. */
61 void ABTI_ythread_callback_thread_yield_to(void *arg)
62 {
63  ABTI_ythread *p_prev = (ABTI_ythread *)arg;
64  /* p_prev->thread.p_pool is loaded before ABTI_pool_add_thread() to keep
65  * num_blocked consistent. Otherwise, other threads might pop p_prev
66  * that has been pushed by ABTI_pool_add_thread() and change
67  * p_prev->thread.p_pool by ABT_unit_set_associated_pool(). */
68  ABTI_pool *p_pool = p_prev->thread.p_pool;
69  if (ABTI_thread_handle_request(&p_prev->thread, ABT_TRUE) &
70  ABTI_THREAD_HANDLE_REQUEST_CANCELLED) {
71  /* p_prev is terminated. */
72  } else {
73  /* Push p_prev back to the pool. */
74  ABTI_pool_add_thread(&p_prev->thread,
76  }
77  /* Decrease the number of blocked threads of the original pool (i.e., before
78  * migration), which has been increased by p_prev to avoid making a pool
79  * size 0. */
80  ABTI_pool_dec_num_blocked(p_pool);
81 }
82 
83 void ABTI_ythread_callback_resume_yield_to(void *arg)
84 {
85  ABTI_ythread_callback_resume_yield_to_arg *p_arg =
86  (ABTI_ythread_callback_resume_yield_to_arg *)arg;
87  /* p_arg might point to the stack of the original ULT, so do not
88  * access it after that ULT becomes resumable. */
89  ABTI_ythread *p_prev = p_arg->p_prev;
90  ABTI_ythread *p_next = p_arg->p_next;
91  if (ABTI_thread_handle_request(&p_prev->thread, ABT_TRUE) &
92  ABTI_THREAD_HANDLE_REQUEST_CANCELLED) {
93  /* p_prev is terminated. */
94  } else {
95  /* Push this thread back to the pool. */
96  ABTI_pool_add_thread(&p_prev->thread,
98  }
99  /* Decrease the number of blocked threads of p_next's pool. */
100  ABTI_pool_dec_num_blocked(p_next->thread.p_pool);
101 }
102 
103 void ABTI_ythread_callback_suspend(void *arg)
104 {
105  ABTI_ythread *p_prev = (ABTI_ythread *)arg;
106  /* Increase the number of blocked threads of the original pool (i.e., before
107  * migration) */
108  ABTI_pool_inc_num_blocked(p_prev->thread.p_pool);
109  /* Request handling. p_prev->thread.p_pool might be changed. */
110  ABTI_thread_handle_request(&p_prev->thread, ABT_FALSE);
111  /* Set this thread's state to BLOCKED. */
112  ABTD_atomic_release_store_int(&p_prev->thread.state,
114 }
115 
116 void ABTI_ythread_callback_resume_suspend_to(void *arg)
117 {
118  ABTI_ythread_callback_resume_suspend_to_arg *p_arg =
119  (ABTI_ythread_callback_resume_suspend_to_arg *)arg;
120  /* p_arg might point to the stack of the original ULT, so do not
121  * access it after that ULT becomes resumable. */
122  ABTI_ythread *p_prev = p_arg->p_prev;
123  ABTI_ythread *p_next = p_arg->p_next;
124  ABTI_pool *p_prev_pool = p_prev->thread.p_pool;
125  ABTI_pool *p_next_pool = p_next->thread.p_pool;
126  if (p_prev_pool != p_next_pool) {
127  /* Increase the number of blocked threads of p_prev's pool */
128  ABTI_pool_inc_num_blocked(p_prev_pool);
129  /* Decrease the number of blocked threads of p_next's pool */
130  ABTI_pool_dec_num_blocked(p_next_pool);
131  }
132  /* Request handling. p_prev->thread.p_pool might be changed. */
133  ABTI_thread_handle_request(&p_prev->thread, ABT_FALSE);
134  /* Set this thread's state to BLOCKED. */
135  ABTD_atomic_release_store_int(&p_prev->thread.state,
137 }
138 
139 void ABTI_ythread_callback_exit(void *arg)
140 {
141  /* Terminate this thread. */
142  ABTI_ythread *p_prev = (ABTI_ythread *)arg;
143  ABTI_thread_terminate(ABTI_global_get_global(),
144  p_prev->thread.p_last_xstream, &p_prev->thread);
145 }
146 
147 void ABTI_ythread_callback_resume_exit_to(void *arg)
148 {
149  ABTI_ythread_callback_resume_exit_to_arg *p_arg =
150  (ABTI_ythread_callback_resume_exit_to_arg *)arg;
151  /* p_arg might point to the stack of the original ULT, so do not
152  * access it after that ULT becomes resumable. */
153  ABTI_ythread *p_prev = p_arg->p_prev;
154  ABTI_ythread *p_next = p_arg->p_next;
155  /* Terminate this thread. */
156  ABTI_thread_terminate(ABTI_global_get_global(),
157  p_prev->thread.p_last_xstream, &p_prev->thread);
158  /* Decrease the number of blocked threads. */
159  ABTI_pool_dec_num_blocked(p_next->thread.p_pool);
160 }
161 
162 void ABTI_ythread_callback_suspend_unlock(void *arg)
163 {
164  ABTI_ythread_callback_suspend_unlock_arg *p_arg =
165  (ABTI_ythread_callback_suspend_unlock_arg *)arg;
166  /* p_arg might point to the stack of the original ULT, so do not
167  * access it after that ULT becomes resumable. */
168  ABTI_ythread *p_prev = p_arg->p_prev;
169  ABTD_spinlock *p_lock = p_arg->p_lock;
170  /* Increase the number of blocked threads */
171  ABTI_pool_inc_num_blocked(p_prev->thread.p_pool);
172  /* Request handling. p_prev->thread.p_pool might be changed. */
173  ABTI_thread_handle_request(&p_prev->thread, ABT_FALSE);
174  /* Set this thread's state to BLOCKED. */
175  ABTD_atomic_release_store_int(&p_prev->thread.state,
177  /* Release the lock. */
178  ABTD_spinlock_release(p_lock);
179 }
180 
181 void ABTI_ythread_callback_suspend_join(void *arg)
182 {
183  ABTI_ythread_callback_suspend_join_arg *p_arg =
184  (ABTI_ythread_callback_suspend_join_arg *)arg;
185  /* p_arg might point to the stack of the original ULT, so do not
186  * access it after that ULT becomes resumable. */
187  ABTI_ythread *p_prev = p_arg->p_prev;
188  ABTI_ythread *p_target = p_arg->p_target;
189  /* Increase the number of blocked threads */
190  ABTI_pool_inc_num_blocked(p_prev->thread.p_pool);
191  /* Request handling. p_prev->thread.p_pool might be changed. */
192  ABTI_thread_handle_request(&p_prev->thread, ABT_FALSE);
193  /* Set this thread's state to BLOCKED. */
194  ABTD_atomic_release_store_int(&p_prev->thread.state,
196  /* Set the link in the context of the target ULT. This p_link might be
197  * read by p_target running on another ES in parallel, so release-store
198  * is needed here. */
199  ABTD_atomic_release_store_ythread_context_ptr(&p_target->ctx.p_link,
200  &p_prev->ctx);
201 }
202 
203 void ABTI_ythread_callback_suspend_replace_sched(void *arg)
204 {
205  ABTI_ythread_callback_suspend_replace_sched_arg *p_arg =
206  (ABTI_ythread_callback_suspend_replace_sched_arg *)arg;
207  /* p_arg might point to the stack of the original ULT, so do not
208  * access it after that ULT becomes resumable. */
209  ABTI_ythread *p_prev = p_arg->p_prev;
210  ABTI_sched *p_main_sched = p_arg->p_main_sched;
211  /* Increase the number of blocked threads */
212  ABTI_pool_inc_num_blocked(p_prev->thread.p_pool);
213  /* Request handling. p_prev->thread.p_pool might be changed. */
214  ABTI_thread_handle_request(&p_prev->thread, ABT_FALSE);
215  /* Set this thread's state to BLOCKED. */
216  ABTD_atomic_release_store_int(&p_prev->thread.state,
218  /* Ask the current main scheduler to replace its scheduler */
219  ABTI_sched_set_request(p_main_sched, ABTI_SCHED_REQ_REPLACE);
220 }
221 
222 void ABTI_ythread_callback_orphan(void *arg)
223 {
224  /* It's a special operation, so request handling is unnecessary. */
225  ABTI_ythread *p_prev = (ABTI_ythread *)arg;
226  ABTI_thread_unset_associated_pool(ABTI_global_get_global(),
227  &p_prev->thread);
228 }
229 
230 ABTU_no_sanitize_address void ABTI_ythread_print_stack(ABTI_global *p_global,
231  ABTI_ythread *p_ythread,
232  FILE *p_os)
233 {
234  ABTD_ythread_print_context(p_ythread, p_os, 0);
235  fprintf(p_os,
236  "stacktop : %p\n"
237  "stacksize : %" PRIu64 "\n",
238  ABTD_ythread_context_get_stacktop(&p_ythread->ctx),
239  (uint64_t)ABTD_ythread_context_get_stacksize(&p_ythread->ctx));
240 
241 #ifdef ABT_CONFIG_ENABLE_STACK_UNWIND
242  {
243  /* Peeking a running context is specially forbidden. Though it is
244  * incomplete, let's quickly check if a thread is running. */
245  ABT_thread_state state = (ABT_thread_state)ABTD_atomic_acquire_load_int(
246  &p_ythread->thread.state);
247  if (state == ABT_THREAD_STATE_READY ||
248  state == ABT_THREAD_STATE_BLOCKED) {
249  struct unwind_stack_t arg;
250  arg.fp = p_os;
251  ABT_bool succeeded =
252  ABTI_ythread_context_peek(p_ythread, ythread_unwind_stack,
253  &arg);
254  if (!succeeded) {
255  fprintf(p_os, "not executed yet.\n");
256  }
257  } else {
258  fprintf(p_os, "failed to unwind a stack.\n");
259  }
260  }
261 #endif
262 
263  void *p_stacktop = ABTD_ythread_context_get_stacktop(&p_ythread->ctx);
264  size_t i, j,
265  stacksize = ABTD_ythread_context_get_stacksize(&p_ythread->ctx);
266  if (stacksize == 0 || p_stacktop == NULL) {
267  /* Some threads do not have p_stack (e.g., the main thread) */
268  fprintf(p_os, "no stack\n");
269  fflush(0);
270  return;
271  }
272  if (p_global->print_raw_stack) {
273  void *p_stack = (void *)(((char *)p_stacktop) - stacksize);
274  char buffer[32];
275  const size_t value_width = 8;
276  const int num_bytes = sizeof(buffer);
277  static const char zero[sizeof(buffer)];
278  ABT_bool full_zeroes = ABT_FALSE, multi_lines = ABT_FALSE;
279 
280  for (i = 0; i < stacksize; i += num_bytes) {
281  if (stacksize >= i + num_bytes) {
282  memcpy(buffer, &((uint8_t *)p_stack)[i], num_bytes);
283  } else {
284  memset(buffer, 0, num_bytes);
285  memcpy(buffer, &((uint8_t *)p_stack)[i], stacksize - i);
286  }
287 
288  /* pack full lines of zeroes */
289  if (!memcmp(zero, buffer, sizeof(buffer))) {
290  if (!full_zeroes) {
291  full_zeroes = ABT_TRUE;
292  } else {
293  multi_lines = ABT_TRUE;
294  continue;
295  }
296  } else {
297  full_zeroes = ABT_FALSE;
298  if (multi_lines) {
299  fprintf(p_os, "*\n");
300  multi_lines = ABT_FALSE;
301  }
302  }
303 
304  /* Print the stack address */
305 #if SIZEOF_VOID_P == 8
306  fprintf(p_os, "%016" PRIxPTR ":",
307  (uintptr_t)(&((uint8_t *)p_stack)[i]));
308 #elif SIZEOF_VOID_P == 4
309  fprintf(p_os, "%08" PRIxPTR ":",
310  (uintptr_t)(&((uint8_t *)p_stack)[i]));
311 #else
312 #error "unknown pointer size"
313 #endif
314  /* Print the raw stack data */
315  for (j = 0; j < num_bytes / value_width; j++) {
316  if (value_width == 8) {
317  uint64_t val = ((uint64_t *)buffer)[j];
318  fprintf(p_os, " %016" PRIx64, val);
319  } else if (value_width == 4) {
320  uint32_t val = ((uint32_t *)buffer)[j];
321  fprintf(p_os, " %08" PRIx32, val);
322  } else if (value_width == 2) {
323  uint16_t val = ((uint16_t *)buffer)[j];
324  fprintf(p_os, " %04" PRIx16, val);
325  } else {
326  uint8_t val = ((uint8_t *)buffer)[j];
327  fprintf(p_os, " %02" PRIx8, val);
328  }
329  if (j == (num_bytes / value_width) - 1)
330  fprintf(p_os, "\n");
331  }
332  }
333  }
334  fflush(p_os);
335 }
336 
337 /*****************************************************************************/
338 /* Internal static functions */
339 /*****************************************************************************/
340 
341 #ifdef ABT_CONFIG_ENABLE_STACK_UNWIND
342 ABTU_no_sanitize_address static int ythread_unwind_stack_impl(FILE *fp)
343 {
344  unw_cursor_t cursor;
345  unw_context_t uc;
346  unw_word_t ip, sp;
347  int ret, level = -1;
348 
349  ret = unw_getcontext(&uc);
350  if (ret != 0)
351  return ABT_ERR_OTHER;
352 
353  ret = unw_init_local(&cursor, &uc);
354  if (ret != 0)
355  return ABT_ERR_OTHER;
356 
357  while (unw_step(&cursor) > 0 && level < 50) {
358  level++;
359 
360  ret = unw_get_reg(&cursor, UNW_REG_IP, &ip);
361  if (ret != 0)
362  return ABT_ERR_OTHER;
363 
364  ret = unw_get_reg(&cursor, UNW_REG_SP, &sp);
365  if (ret != 0)
366  return ABT_ERR_OTHER;
367 
368  char proc_name[256];
369  unw_word_t offset;
370  ret = unw_get_proc_name(&cursor, proc_name, 256, &offset);
371  if (ret != 0)
372  return ABT_ERR_OTHER;
373 
374  /* Print function stack. */
375  fprintf(fp, "#%d %p in %s () <+%d> (%s = %p)\n", level,
376  (void *)((uintptr_t)ip), proc_name, (int)offset,
377  unw_regname(UNW_REG_SP), (void *)((uintptr_t)sp));
378  }
379  return ABT_SUCCESS;
380 }
381 
382 static void ythread_unwind_stack(void *arg)
383 {
384  struct unwind_stack_t *p_arg = (struct unwind_stack_t *)arg;
385  if (ythread_unwind_stack_impl(p_arg->fp) != ABT_SUCCESS) {
386  fprintf(p_arg->fp, "libunwind error\n");
387  }
388 }
389 
390 #endif /* ABT_CONFIG_ENABLE_STACK_UNWIND */
ABT_bool
int ABT_bool
Boolean type.
Definition: abt.h:1043
ABT_POOL_CONTEXT_OP_THREAD_CREATE_TO
#define ABT_POOL_CONTEXT_OP_THREAD_CREATE_TO
A flag that hints a push operation in a thread creation routine with a yield operation.
Definition: abt.h:1686
ABT_pool_context
uint64_t ABT_pool_context
A pool context value.
Definition: abt.h:1566
ABT_THREAD_STATE_READY
@ ABT_THREAD_STATE_READY
Definition: abt.h:427
ABT_thread_state
ABT_thread_state
State of a work unit.
Definition: abt.h:425
ABT_THREAD_STATE_BLOCKED
@ ABT_THREAD_STATE_BLOCKED
Definition: abt.h:431
abti.h
ABT_POOL_CONTEXT_OP_THREAD_REVIVE_TO
#define ABT_POOL_CONTEXT_OP_THREAD_REVIVE_TO
A flag that hints a push operation in a thread revival routine with a yield operation.
Definition: abt.h:1711
ABT_POOL_CONTEXT_OP_THREAD_YIELD_TO
#define ABT_POOL_CONTEXT_OP_THREAD_YIELD_TO
A flag that hints a push operation in a thread yield-to routine.
Definition: abt.h:1733
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
ythread_callback_yield_impl
static void ythread_callback_yield_impl(void *arg, ABT_pool_context context)
Definition: ythread.c:21
ABT_TRUE
#define ABT_TRUE
True constant for ABT_bool.
Definition: abt.h:784
ABT_FALSE
#define ABT_FALSE
False constant for ABT_bool.
Definition: abt.h:786
ABTU_no_sanitize_address
#define ABTU_no_sanitize_address
Definition: abtu.h:211
ABT_ERR_OTHER
#define ABT_ERR_OTHER
Error code: other error.
Definition: abt.h:109
ABT_POOL_CONTEXT_OP_THREAD_YIELD_LOOP
#define ABT_POOL_CONTEXT_OP_THREAD_YIELD_LOOP
A flag that hints a push operation in a yield operation in a synchronization loop.
Definition: abt.h:1757
ABT_POOL_CONTEXT_OP_THREAD_YIELD
#define ABT_POOL_CONTEXT_OP_THREAD_YIELD
A flag that hints a push operation in a thread yield routine.
Definition: abt.h:1722
ABT_POOL_CONTEXT_OP_THREAD_RESUME_YIELD_TO
#define ABT_POOL_CONTEXT_OP_THREAD_RESUME_YIELD_TO
A flag that hints a push operation in a thread resume-yield-to routine.
Definition: abt.h:1745