ARGOBOTS  4dc37e16e1b227a480715ab67dae1dcfb4d2d4e0
unit.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 static void unit_init_hash_table(ABTI_global *p_global);
9 static void unit_finalize_hash_table(ABTI_global *p_global);
10 ABTU_ret_err static inline int
11 unit_map_thread(ABTI_global *p_global, ABT_unit unit, ABTI_thread *p_thread);
12 static inline void unit_unmap_thread(ABTI_global *p_global, ABT_unit unit);
13 static inline ABTI_thread *
14 unit_get_thread_from_user_defined_unit(ABTI_global *p_global, ABT_unit unit);
15 
49 {
50  ABTI_UB_ASSERT(ABTI_initialized());
51 
52  ABTI_pool *p_pool = ABTI_pool_get_ptr(pool);
53  ABTI_CHECK_NULL_POOL_PTR(p_pool);
54  ABTI_CHECK_TRUE(unit != ABT_UNIT_NULL, ABT_ERR_INV_UNIT);
55  return ABT_SUCCESS;
56 }
57 
81 {
82  ABTI_UB_ASSERT(ABTI_initialized());
83  ABTI_UB_ASSERT(thread);
84 
85  ABTI_global *p_global = ABTI_global_get_global();
86  ABTI_CHECK_TRUE(unit != ABT_UNIT_NULL, ABT_ERR_INV_UNIT);
87  ABTI_thread *p_thread = ABTI_unit_get_thread(p_global, unit);
88  *thread = ABTI_thread_get_handle(p_thread);
89  return ABT_SUCCESS;
90 }
91 
92 /*****************************************************************************/
93 /* Private APIs */
94 /*****************************************************************************/
95 
96 void ABTI_unit_init_hash_table(ABTI_global *p_global)
97 {
98  unit_init_hash_table(p_global);
99 }
100 
101 void ABTI_unit_finalize_hash_table(ABTI_global *p_global)
102 {
103  unit_finalize_hash_table(p_global);
104 }
105 
106 ABTU_ret_err int ABTI_unit_map_thread(ABTI_global *p_global, ABT_unit unit,
107  ABTI_thread *p_thread)
108 {
109  return unit_map_thread(p_global, unit, p_thread);
110 }
111 
112 void ABTI_unit_unmap_thread(ABTI_global *p_global, ABT_unit unit)
113 {
114  unit_unmap_thread(p_global, unit);
115 }
116 
117 ABTI_thread *ABTI_unit_get_thread_from_user_defined_unit(ABTI_global *p_global,
118  ABT_unit unit)
119 {
120  return unit_get_thread_from_user_defined_unit(p_global, unit);
121 }
122 
123 /*****************************************************************************/
124 /* Internal static functions */
125 /*****************************************************************************/
126 
127 static inline size_t unit_get_hash_index(ABT_unit unit)
128 {
129  size_t val = (uintptr_t)unit;
130  /* Let's ignore the first 3 bits and use the next 29 bits. */
131  size_t base_val = val >> 3;
132 #if ABTI_UNIT_HASH_TABLE_SIZE_EXP <= 14
133  base_val += val >> (ABTI_UNIT_HASH_TABLE_SIZE_EXP + 3);
134 #endif
135 #if ABTI_UNIT_HASH_TABLE_SIZE_EXP <= 9
136  base_val += val >> (ABTI_UNIT_HASH_TABLE_SIZE_EXP * 2 + 3);
137 #endif
138  return base_val & (ABTI_UNIT_HASH_TABLE_SIZE - 1);
139 }
140 
141 typedef struct atomic_unit {
142  ABTD_atomic_ptr val;
143 } atomic_unit;
144 
145 static inline ABT_unit atomic_relaxed_load_unit(const atomic_unit *p_ptr)
146 {
147  return (ABT_unit)ABTD_atomic_relaxed_load_ptr(&p_ptr->val);
148 }
149 
150 static inline void atomic_relaxed_store_unit(atomic_unit *p_ptr, ABT_unit val)
151 {
152  ABTD_atomic_relaxed_store_ptr(&p_ptr->val, (void *)val);
153 }
154 
155 typedef struct unit_to_thread {
156  /* This is updated in a relaxed manner. Relaxed access is fine since the
157  * semantics guarantees that all operations that "hit" are performed after
158  * map() from the memory order viewpoint; we just need to guarantee that the
159  * other parallel entities that call unmap() and get() (consequently, they
160  * do not "hit") do not see a corrupted value that is neither a new ABT_unit
161  * handle nor ABT_UNIT_NULL. */
163  ABTI_thread *p_thread;
166 
167 static inline unit_to_thread *
168 atomic_acquire_load_unit_to_thread(const ABTI_atomic_unit_to_thread *p_ptr)
169 {
170  return (unit_to_thread *)ABTD_atomic_acquire_load_ptr(&p_ptr->val);
171 }
172 
173 static inline unit_to_thread *
174 atomic_relaxed_load_unit_to_thread(const ABTI_atomic_unit_to_thread *p_ptr)
175 {
176  return (unit_to_thread *)ABTD_atomic_relaxed_load_ptr(&p_ptr->val);
177 }
178 
179 static inline void
180 atomic_release_store_unit_to_thread(ABTI_atomic_unit_to_thread *p_ptr,
181  unit_to_thread *val)
182 {
183  ABTD_atomic_release_store_ptr(&p_ptr->val, (void *)val);
184 }
185 
186 static inline void
187 atomic_relaxed_store_unit_to_thread(ABTI_atomic_unit_to_thread *p_ptr,
188  unit_to_thread *val)
189 {
190  ABTD_atomic_relaxed_store_ptr(&p_ptr->val, (void *)val);
191 }
192 
193 static void unit_init_hash_table(ABTI_global *p_global)
194 {
195  int i;
196  for (i = 0; i < (int)ABTI_UNIT_HASH_TABLE_SIZE; i++) {
197  atomic_relaxed_store_unit_to_thread(&p_global->unit_to_thread_entires[i]
198  .list,
199  NULL);
200  ABTD_spinlock_clear(&p_global->unit_to_thread_entires[i].lock);
201  }
202 }
203 
204 static void unit_finalize_hash_table(ABTI_global *p_global)
205 {
206  int i;
207  for (i = 0; i < (int)ABTI_UNIT_HASH_TABLE_SIZE; i++) {
208  ABTI_ASSERT(!ABTD_spinlock_is_locked(
209  &p_global->unit_to_thread_entires[i].lock));
211  &p_global->unit_to_thread_entires[i].list);
212  while (p_cur) {
213  ABTI_ASSERT(atomic_relaxed_load_unit(&p_cur->unit) ==
214  ABT_UNIT_NULL);
215  unit_to_thread *p_next = p_cur->p_next;
216  ABTU_free(p_cur);
217  p_cur = p_next;
218  }
219  }
220 }
221 
222 ABTU_ret_err static inline int
223 unit_map_thread(ABTI_global *p_global, ABT_unit unit, ABTI_thread *p_thread)
224 {
225  ABTI_ASSERT(!ABTI_unit_is_builtin(unit));
226  size_t hash_index = unit_get_hash_index(unit);
227  ABTI_unit_to_thread_entry *p_entry =
228  &p_global->unit_to_thread_entires[hash_index];
229 
230  ABTD_spinlock_acquire(&p_entry->lock);
231  unit_to_thread *p_cur = atomic_relaxed_load_unit_to_thread(&p_entry->list);
232  while (p_cur) {
233  if (atomic_relaxed_load_unit(&p_cur->unit) == ABT_UNIT_NULL) {
234  /* Empty element has been found. Let's use this. */
236  /* p_cur is associated with this unit. */
237  p_cur->p_thread = p_thread;
238  ABTD_spinlock_release(&p_entry->lock);
239  return ABT_SUCCESS;
240  }
241  p_cur = p_cur->p_next;
242  }
243  /* It seems that all the elements are in use. Let's allocate a new one. */
244  unit_to_thread *p_new;
245  p_cur = atomic_relaxed_load_unit_to_thread(&p_entry->list);
246  /* Let's dynamically allocate memory. */
247  int ret = ABTU_malloc(sizeof(unit_to_thread), (void **)&p_new);
248  if (ret != ABT_SUCCESS) {
249  ABTD_spinlock_release(&p_entry->lock);
250  return ret;
251  }
252  /* Initialize the new unit. */
253  atomic_relaxed_store_unit(&p_new->unit, unit);
254  p_new->p_thread = p_thread;
255  p_new->p_next = p_cur;
256  atomic_release_store_unit_to_thread(&p_entry->list, p_new);
257  ABTD_spinlock_release(&p_entry->lock);
258  return ABT_SUCCESS;
259 }
260 
261 static inline void unit_unmap_thread(ABTI_global *p_global, ABT_unit unit)
262 {
263  ABTI_ASSERT(!ABTI_unit_is_builtin(unit));
264  size_t hash_index = unit_get_hash_index(unit);
265  ABTI_unit_to_thread_entry *p_entry =
266  &p_global->unit_to_thread_entires[hash_index];
267 
268  ABTD_spinlock_acquire(&p_entry->lock);
269  unit_to_thread *p_cur = atomic_relaxed_load_unit_to_thread(&p_entry->list);
270  /* Update the corresponding unit to "NULL". */
271  while (1) {
272  if (atomic_relaxed_load_unit(&p_cur->unit) == unit) {
274  break;
275  }
276  p_cur = p_cur->p_next;
277  ABTI_ASSERT(p_cur); /* unmap() must succeed. */
278  }
279  ABTD_spinlock_release(&p_entry->lock);
280 }
281 
282 static inline ABTI_thread *
284 {
285  ABTI_ASSERT(!ABTI_unit_is_builtin(unit));
286  /* Find an element. */
287  size_t hash_index = unit_get_hash_index(unit);
288  ABTI_unit_to_thread_entry *p_entry =
289  &p_global->unit_to_thread_entires[hash_index];
290  /* The first element must be accessed in a release-acquire manner. The new
291  * element is release-stored to the head, so acquire-load can always get a
292  * valid linked-list chain. */
293  unit_to_thread *p_cur = atomic_acquire_load_unit_to_thread(&p_entry->list);
294  while (1) {
295  ABTI_ASSERT(p_cur); /* get() must succeed. */
296  if (atomic_relaxed_load_unit(&p_cur->unit) == unit) {
297  return p_cur->p_thread;
298  }
299  p_cur = p_cur->p_next;
300  }
301 }
ABT_thread
struct ABT_thread_opaque * ABT_thread
Work unit handle type.
Definition: abt.h:932
unit_map_thread
static ABTU_ret_err int unit_map_thread(ABTI_global *p_global, ABT_unit unit, ABTI_thread *p_thread)
Definition: unit.c:223
ABT_unit_set_associated_pool
int ABT_unit_set_associated_pool(ABT_unit unit, ABT_pool pool)
No operation.
Definition: unit.c:48
atomic_acquire_load_unit_to_thread
static unit_to_thread * atomic_acquire_load_unit_to_thread(const ABTI_atomic_unit_to_thread *p_ptr)
Definition: unit.c:168
unit_get_hash_index
static size_t unit_get_hash_index(ABT_unit unit)
Definition: unit.c:127
ABT_pool
struct ABT_pool_opaque * ABT_pool
Pool handle type.
Definition: abt.h:878
abti.h
atomic_relaxed_store_unit_to_thread
static void atomic_relaxed_store_unit_to_thread(ABTI_atomic_unit_to_thread *p_ptr, unit_to_thread *val)
Definition: unit.c:187
unit_to_thread::p_thread
ABTI_thread * p_thread
Definition: unit.c:163
unit_to_thread::unit
atomic_unit unit
Definition: unit.c:162
ABTU_malloc
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
Definition: abtu.h:235
ABT_ERR_INV_UNIT
#define ABT_ERR_INV_UNIT
Error code: invalid work unit for scheduling.
Definition: abt.h:181
atomic_relaxed_load_unit
static ABT_unit atomic_relaxed_load_unit(const atomic_unit *p_ptr)
Definition: unit.c:145
atomic_relaxed_store_unit
static void atomic_relaxed_store_unit(atomic_unit *p_ptr, ABT_unit val)
Definition: unit.c:150
ABT_unit
struct ABT_unit_opaque * ABT_unit
Work unit handle type for scheduling.
Definition: abt.h:911
unit_unmap_thread
static void unit_unmap_thread(ABTI_global *p_global, ABT_unit unit)
Definition: unit.c:261
unit_to_thread
struct unit_to_thread unit_to_thread
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
ABTU_ret_err
#define ABTU_ret_err
Definition: abtu.h:155
atomic_relaxed_load_unit_to_thread
static unit_to_thread * atomic_relaxed_load_unit_to_thread(const ABTI_atomic_unit_to_thread *p_ptr)
Definition: unit.c:174
unit_get_thread_from_user_defined_unit
static ABTI_thread * unit_get_thread_from_user_defined_unit(ABTI_global *p_global, ABT_unit unit)
Definition: unit.c:283
atomic_unit
struct atomic_unit atomic_unit
ABTU_free
static void ABTU_free(void *ptr)
Definition: abtu.h:228
unit_to_thread
Definition: unit.c:155
atomic_release_store_unit_to_thread
static void atomic_release_store_unit_to_thread(ABTI_atomic_unit_to_thread *p_ptr, unit_to_thread *val)
Definition: unit.c:180
atomic_unit::val
ABTD_atomic_ptr val
Definition: unit.c:142
unit_init_hash_table
static void unit_init_hash_table(ABTI_global *p_global)
Definition: unit.c:193
ABT_UNIT_NULL
#define ABT_UNIT_NULL
Definition: abt.h:1104
unit_finalize_hash_table
static void unit_finalize_hash_table(ABTI_global *p_global)
Definition: unit.c:204
unit_to_thread::p_next
struct unit_to_thread * p_next
Definition: unit.c:164
ABT_unit_get_thread
int ABT_unit_get_thread(ABT_unit unit, ABT_thread *thread)
Get a thread handle of the target work unit.
Definition: unit.c:80
atomic_unit
Definition: unit.c:141