ARGOBOTS
abtd_thread.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 static inline void ABTD_thread_terminate_thread(ABTI_local *p_local,
9  ABTI_thread *p_thread);
10 static inline void ABTD_thread_terminate_sched(ABTI_local *p_local,
11  ABTI_thread *p_thread);
12 
13 void ABTD_thread_func_wrapper_thread(void *p_arg)
14 {
15  ABTD_thread_context *p_ctx = (ABTD_thread_context *)p_arg;
16  void (*thread_func)(void *) = p_ctx->f_thread;
17 
18  thread_func(p_ctx->p_arg);
19 
20  /* NOTE: ctx is located in the beginning of ABTI_thread */
21  ABTI_thread *p_thread = (ABTI_thread *)p_ctx;
22 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
23  ABTI_ASSERT(p_thread->is_sched == NULL);
24 #endif
25 
26  ABTI_local *p_local = ABTI_local_get_local();
27  ABTD_thread_terminate_thread(p_local, p_thread);
28 }
29 
30 void ABTD_thread_func_wrapper_sched(void *p_arg)
31 {
32  ABTD_thread_context *p_ctx = (ABTD_thread_context *)p_arg;
33  void (*thread_func)(void *) = p_ctx->f_thread;
34 
35  thread_func(p_ctx->p_arg);
36 
37  /* NOTE: ctx is located in the beginning of ABTI_thread */
38  ABTI_thread *p_thread = (ABTI_thread *)p_ctx;
39 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
40  ABTI_ASSERT(p_thread->is_sched != NULL);
41 #endif
42 
43  ABTI_local *p_local = ABTI_local_get_local();
44  ABTD_thread_terminate_sched(p_local, p_thread);
45 }
46 
47 void ABTD_thread_exit(ABTI_local *p_local, ABTI_thread *p_thread)
48 {
49 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
50  if (p_thread->is_sched) {
51  ABTD_thread_terminate_sched(p_local, p_thread);
52  } else {
53 #endif
54  ABTD_thread_terminate_thread(p_local, p_thread);
55 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
56  }
57 #endif
58 }
59 
60 static inline void ABTDI_thread_terminate(ABTI_local *p_local,
61  ABTI_thread *p_thread,
62  ABT_bool is_sched)
63 {
64  ABTD_thread_context *p_ctx = &p_thread->ctx;
65  ABTD_thread_context *p_link =
66  ABTD_atomic_acquire_load_thread_context_ptr(&p_ctx->p_link);
67  if (p_link) {
68  /* If p_link is set, it means that other ULT has called the join. */
69  ABTI_thread *p_joiner = (ABTI_thread *)p_link;
70  if (p_thread->p_last_xstream == p_joiner->p_last_xstream) {
71  /* Only when the current ULT is on the same ES as p_joiner's,
72  * we can jump to the joiner ULT. */
73  ABTD_atomic_release_store_int(&p_thread->state,
75  LOG_EVENT("[U%" PRIu64 ":E%d] terminated\n",
76  ABTI_thread_get_id(p_thread),
77  p_thread->p_last_xstream->rank);
78 
79  /* Note that a scheduler-type ULT cannot be a joiner. If a scheduler
80  * type ULT would be a joiner (=suspend), no scheduler is available
81  * when a running ULT needs suspension. Hence, it always jumps to a
82  * non-scheduler-type ULT. */
83 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
84  if (is_sched) {
85  ABTI_thread_finish_context_sched_to_thread(p_local,
86  p_thread->is_sched,
87  p_joiner);
88  } else {
89 #endif
90  ABTI_thread_finish_context_thread_to_thread(p_local, p_thread,
91  p_joiner);
92 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
93  }
94 #endif
95  return;
96  } else {
97  /* If the current ULT's associated ES is different from p_joiner's,
98  * we can't directly jump to p_joiner. Instead, we wake up
99  * p_joiner here so that p_joiner's scheduler can resume it. */
100  ABTI_thread_set_ready(p_local, p_joiner);
101 
102  /* We don't need to use the atomic OR operation here because the ULT
103  * will be terminated regardless of other requests. */
104  ABTD_atomic_release_store_uint32(&p_thread->request,
105  ABTI_THREAD_REQ_TERMINATE);
106  }
107  } else {
108  uint32_t req =
109  ABTD_atomic_fetch_or_uint32(&p_thread->request,
110  ABTI_THREAD_REQ_JOIN |
111  ABTI_THREAD_REQ_TERMINATE);
112  if (req & ABTI_THREAD_REQ_JOIN) {
113  /* This case means there has been a join request and the joiner has
114  * blocked. We have to wake up the joiner ULT. */
115  do {
116  p_link =
117  ABTD_atomic_acquire_load_thread_context_ptr(&p_ctx->p_link);
118  } while (!p_link);
119  ABTI_thread_set_ready(p_local, (ABTI_thread *)p_link);
120  }
121  }
122 
123  /* No other ULT is waiting or blocked for this ULT. Since a context does not
124  * switch to another context when it finishes, we need to explicitly switch
125  * to the scheduler. */
126  ABTI_sched *p_sched;
127 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
128  if (p_thread->is_sched) {
129  /* If p_thread is a scheduler ULT, we have to context switch to
130  * the parent scheduler. */
131  p_sched = ABTI_xstream_get_parent_sched(p_thread->p_last_xstream);
132  } else {
133 #endif
134  p_sched = ABTI_xstream_get_top_sched(p_thread->p_last_xstream);
135 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
136  }
137 #endif
138 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
139  if (is_sched) {
140  ABTI_thread_finish_context_sched_to_sched(p_thread->is_sched, p_sched);
141  } else {
142 #endif
143  ABTI_thread_finish_context_thread_to_sched(p_thread, p_sched);
144 #ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
145  }
146 #endif
147 }
148 
149 static inline void ABTD_thread_terminate_thread(ABTI_local *p_local,
150  ABTI_thread *p_thread)
151 {
152  ABTDI_thread_terminate(p_local, p_thread, ABT_FALSE);
153 }
154 
155 static inline void ABTD_thread_terminate_sched(ABTI_local *p_local,
156  ABTI_thread *p_thread)
157 {
158  ABTDI_thread_terminate(p_local, p_thread, ABT_TRUE);
159 }
160 
161 #if ABT_CONFIG_THREAD_TYPE == ABT_THREAD_TYPE_DYNAMIC_PROMOTION
162 void ABTD_thread_terminate_thread_no_arg()
163 {
164  ABTI_local *p_local = ABTI_local_get_local();
165  /* This function is called by `return` in ABTD_thread_context_make_and_call,
166  * so it cannot take the argument. We get the thread descriptor from TLS. */
167  ABTI_thread *p_thread = p_local->p_thread;
168  ABTD_thread_terminate_thread(p_local, p_thread);
169 }
170 #endif
171 
172 void ABTD_thread_cancel(ABTI_local *p_local, ABTI_thread *p_thread)
173 {
174  /* When we cancel a ULT, if other ULT is blocked to join the canceled ULT,
175  * we have to wake up the joiner ULT. However, unlike the case when the
176  * ULT has finished its execution and calls ABTD_thread_terminate/exit,
177  * this function is called by the scheduler. Therefore, we should not
178  * context switch to the joiner ULT and need to always wake it up. */
179  ABTD_thread_context *p_ctx = &p_thread->ctx;
180 
181  if (ABTD_atomic_acquire_load_thread_context_ptr(&p_ctx->p_link)) {
182  /* If p_link is set, it means that other ULT has called the join. */
183  ABTI_thread *p_joiner =
184  (ABTI_thread *)ABTD_atomic_relaxed_load_thread_context_ptr(
185  &p_ctx->p_link);
186  ABTI_thread_set_ready(p_local, p_joiner);
187  } else {
188  uint32_t req =
189  ABTD_atomic_fetch_or_uint32(&p_thread->request,
190  ABTI_THREAD_REQ_JOIN |
191  ABTI_THREAD_REQ_TERMINATE);
192  if (req & ABTI_THREAD_REQ_JOIN) {
193  /* This case means there has been a join request and the joiner has
194  * blocked. We have to wake up the joiner ULT. */
195  while (ABTD_atomic_acquire_load_thread_context_ptr(
196  &p_ctx->p_link) == NULL)
197  ;
198  ABTI_thread *p_joiner =
199  (ABTI_thread *)ABTD_atomic_relaxed_load_thread_context_ptr(
200  &p_ctx->p_link);
201  ABTI_thread_set_ready(p_local, p_joiner);
202  }
203  }
204 }
205 
206 void ABTD_thread_print_context(ABTI_thread *p_thread, FILE *p_os, int indent)
207 {
208  char *prefix = ABTU_get_indent_str(indent);
209  ABTD_thread_context *p_ctx = &p_thread->ctx;
210  fprintf(p_os, "%sp_ctx : %p\n", prefix, p_ctx->p_ctx);
211  fprintf(p_os, "%sp_arg : %p\n", prefix, p_ctx->p_arg);
212  fprintf(p_os, "%sp_link : %p\n", prefix,
213  (void *)ABTD_atomic_acquire_load_thread_context_ptr(
214  &p_ctx->p_link));
215  fflush(p_os);
216  ABTU_free(prefix);
217 }
char * ABTU_get_indent_str(int indent)
Definition: util.c:12
int ABT_bool
Definition: abt.h:309
#define ABT_FALSE
Definition: abt.h:224
#define LOG_EVENT(fmt,...)
Definition: abti_log.h:60
#define ABT_TRUE
Definition: abt.h:223
static void ABTDI_thread_terminate(ABTI_local *p_local, ABTI_thread *p_thread, ABT_bool is_sched)
Definition: abtd_thread.c:60
static void ABTU_free(void *ptr)
Definition: abtu.h:32