ARGOBOTS  4dc37e16e1b227a480715ab67dae1dcfb4d2d4e0
abti_key.h
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #ifndef ABTI_KEY_H_INCLUDED
7 #define ABTI_KEY_H_INCLUDED
8 
9 /* Inlined functions for thread-specific data key */
10 
11 static inline ABTI_key *ABTI_key_get_ptr(ABT_key key)
12 {
13 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK
14  ABTI_key *p_key;
15  if (key == ABT_KEY_NULL) {
16  p_key = NULL;
17  } else {
18  p_key = (ABTI_key *)key;
19  }
20  return p_key;
21 #else
22  return (ABTI_key *)key;
23 #endif
24 }
25 
26 static inline ABT_key ABTI_key_get_handle(ABTI_key *p_key)
27 {
28 #ifndef ABT_CONFIG_DISABLE_ERROR_CHECK
29  ABT_key h_key;
30  if (p_key == NULL) {
31  h_key = ABT_KEY_NULL;
32  } else {
33  h_key = (ABT_key)p_key;
34  }
35  return h_key;
36 #else
37  return (ABT_key)p_key;
38 #endif
39 }
40 
41 /* Static initializer for ABTI_key */
42 #define ABTI_KEY_STATIC_INITIALIZER(f_destructor, id) \
43  { \
44  f_destructor, id \
45  }
46 
47 #define ABTI_KEY_ID_STACKABLE_SCHED 0
48 #define ABTI_KEY_ID_MIGRATION 1
49 #define ABTI_KEY_ID_END_ 2
50 
51 typedef struct ABTI_ktable_mem_header {
52  struct ABTI_ktable_mem_header *p_next;
53  ABT_bool is_from_mempool;
54 } ABTI_ktable_mem_header;
55 
56 #define ABTI_KTABLE_DESC_SIZE \
57  (ABTI_MEM_POOL_DESC_SIZE - sizeof(ABTI_ktable_mem_header))
58 
59 #define ABTI_KTABLE_LOCKED ((ABTI_ktable *)0x1)
60 static inline int ABTI_ktable_is_valid(ABTI_ktable *p_ktable)
61 {
62  /* Only 0x0 and 0x1 (=ABTI_KTABLE_LOCKED) are special */
63  return (((uintptr_t)(void *)p_ktable) & (~((uintptr_t)(void *)0x1))) !=
64  ((uintptr_t)(void *)0x0);
65 }
66 
67 ABTU_ret_err static inline int ABTI_ktable_create(ABTI_global *p_global,
68  ABTI_local *p_local,
69  ABTI_ktable **pp_ktable)
70 {
71  ABTI_ktable *p_ktable;
72  uint32_t key_table_size = p_global->key_table_size;
73  /* size must be a power of 2. */
74  ABTI_ASSERT((key_table_size & (key_table_size - 1)) == 0);
75  /* max alignment must be a power of 2. */
76  ABTI_STATIC_ASSERT((ABTU_MAX_ALIGNMENT & (ABTU_MAX_ALIGNMENT - 1)) == 0);
77  size_t ktable_size =
78  ABTU_roundup_size(offsetof(ABTI_ktable, p_elems) +
79  sizeof(ABTD_atomic_ptr) * key_table_size,
81  /* Since only one ES can access the memory pool on creation, this uses an
82  * unsafe memory pool without taking a lock. */
83  if (ABTU_likely(ktable_size <= ABTI_KTABLE_DESC_SIZE)) {
84  /* Use memory pool. */
85  void *p_mem;
86  int abt_errno = ABTI_mem_alloc_desc(p_local, &p_mem);
87  ABTI_CHECK_ERROR(abt_errno);
88  ABTI_ktable_mem_header *p_header = (ABTI_ktable_mem_header *)p_mem;
89  p_ktable =
90  (ABTI_ktable *)(((char *)p_mem) + sizeof(ABTI_ktable_mem_header));
91  p_header->p_next = NULL;
92  p_header->is_from_mempool = ABT_TRUE;
93  p_ktable->p_used_mem = p_mem;
94  p_ktable->p_extra_mem = (void *)(((char *)p_ktable) + ktable_size);
95  p_ktable->extra_mem_size = ABTI_KTABLE_DESC_SIZE - ktable_size;
96  } else {
97  /* Use malloc() */
98  void *p_mem;
99  int abt_errno =
100  ABTU_malloc(ktable_size + sizeof(ABTI_ktable_mem_header), &p_mem);
101  ABTI_CHECK_ERROR(abt_errno);
102  ABTI_ktable_mem_header *p_header = (ABTI_ktable_mem_header *)p_mem;
103  p_ktable =
104  (ABTI_ktable *)(((char *)p_mem) + sizeof(ABTI_ktable_mem_header));
105  p_header->p_next = NULL;
106  p_header->is_from_mempool = ABT_FALSE;
107  p_ktable->p_used_mem = p_mem;
108  p_ktable->p_extra_mem = NULL;
109  p_ktable->extra_mem_size = 0;
110  }
111  p_ktable->size = key_table_size;
112  ABTD_spinlock_clear(&p_ktable->lock);
113  memset(p_ktable->p_elems, 0, sizeof(ABTD_atomic_ptr) * key_table_size);
114  *pp_ktable = p_ktable;
115  return ABT_SUCCESS;
116 }
117 
118 ABTU_ret_err static inline int ABTI_ktable_alloc_elem(ABTI_local *p_local,
119  ABTI_ktable *p_ktable,
120  size_t size,
121  void **pp_mem)
122 {
123  ABTI_ASSERT((size & (ABTU_MAX_ALIGNMENT - 1)) == 0);
124  size_t extra_mem_size = p_ktable->extra_mem_size;
125  if (size <= extra_mem_size) {
126  /* Use the extra memory. */
127  void *p_mem = p_ktable->p_extra_mem;
128  p_ktable->p_extra_mem = (void *)(((char *)p_mem) + size);
129  p_ktable->extra_mem_size = extra_mem_size - size;
130  *pp_mem = p_mem;
131  return ABT_SUCCESS;
132  } else if (ABTU_likely(size <= ABTI_KTABLE_DESC_SIZE)) {
133  /* Use memory pool. */
134  void *p_mem;
135  int abt_errno = ABTI_mem_alloc_desc(p_local, &p_mem);
136  ABTI_CHECK_ERROR(abt_errno);
137  ABTI_ktable_mem_header *p_header = (ABTI_ktable_mem_header *)p_mem;
138  p_header->p_next = (ABTI_ktable_mem_header *)p_ktable->p_used_mem;
139  p_header->is_from_mempool = ABT_TRUE;
140  p_ktable->p_used_mem = (void *)p_header;
141  p_mem = (void *)(((char *)p_mem) + sizeof(ABTI_ktable_mem_header));
142  p_ktable->p_extra_mem = (void *)(((char *)p_mem) + size);
143  p_ktable->extra_mem_size = ABTI_KTABLE_DESC_SIZE - size;
144  *pp_mem = p_mem;
145  return ABT_SUCCESS;
146  } else {
147  /* Use malloc() */
148  void *p_mem;
149  int abt_errno =
150  ABTU_malloc(size + sizeof(ABTI_ktable_mem_header), &p_mem);
151  ABTI_CHECK_ERROR(abt_errno);
152  ABTI_ktable_mem_header *p_header = (ABTI_ktable_mem_header *)p_mem;
153  p_header->p_next = (ABTI_ktable_mem_header *)p_ktable->p_used_mem;
154  p_header->is_from_mempool = ABT_FALSE;
155  p_ktable->p_used_mem = (void *)p_header;
156  p_mem = (void *)(((char *)p_mem) + sizeof(ABTI_ktable_mem_header));
157  *pp_mem = p_mem;
158  return ABT_SUCCESS;
159  }
160 }
161 
162 static inline uint32_t ABTI_ktable_get_idx(ABTI_key *p_key, int size)
163 {
164  return p_key->id & (size - 1);
165 }
166 
167 ABTU_ret_err static inline int
168 ABTI_ktable_set_impl(ABTI_local *p_local, ABTI_ktable *p_ktable,
169  ABTI_key *p_key, void *value, ABT_bool is_safe)
170 {
171  uint32_t idx;
172  ABTD_atomic_ptr *pp_elem;
173  ABTI_ktelem *p_elem;
174 
175  /* Look for the same key */
176  idx = ABTI_ktable_get_idx(p_key, p_ktable->size);
177  pp_elem = &p_ktable->p_elems[idx];
178  p_elem = (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(pp_elem);
179  uint32_t key_id = p_key->id;
180  while (p_elem) {
181  if (p_elem->key_id == key_id) {
182  p_elem->value = value;
183  return ABT_SUCCESS;
184  }
185  pp_elem = &p_elem->p_next;
186  p_elem = (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(pp_elem);
187  }
188 
189  /* The table does not have the same key */
190  if (is_safe)
191  ABTD_spinlock_acquire(&p_ktable->lock);
192  /* The linked list might have been extended. */
193  p_elem = (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(pp_elem);
194  while (p_elem) {
195  if (p_elem->key_id == key_id) {
196  if (is_safe)
197  ABTD_spinlock_release(&p_ktable->lock);
198  p_elem->value = value;
199  return ABT_SUCCESS;
200  }
201  pp_elem = &p_elem->p_next;
202  p_elem = (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(pp_elem);
203  }
204  /* Now the pp_elem points to the tail of the list. Add a new element. */
205  ABTI_STATIC_ASSERT((ABTU_MAX_ALIGNMENT & (ABTU_MAX_ALIGNMENT - 1)) == 0);
206  size_t ktelem_size = (sizeof(ABTI_ktelem) + ABTU_MAX_ALIGNMENT - 1) &
207  (~(ABTU_MAX_ALIGNMENT - 1));
208  int abt_errno = ABTI_ktable_alloc_elem(p_local, p_ktable, ktelem_size,
209  (void **)&p_elem);
210  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
211  if (is_safe)
212  ABTD_spinlock_release(&p_ktable->lock);
213  return abt_errno;
214  }
215  p_elem->f_destructor = p_key->f_destructor;
216  p_elem->key_id = p_key->id;
217  p_elem->value = value;
218  ABTD_atomic_relaxed_store_ptr(&p_elem->p_next, NULL);
219  ABTD_atomic_release_store_ptr(pp_elem, p_elem);
220  if (is_safe)
221  ABTD_spinlock_release(&p_ktable->lock);
222  return ABT_SUCCESS;
223 }
224 
225 ABTU_ret_err static inline int ABTI_ktable_set(ABTI_global *p_global,
226  ABTI_local *p_local,
227  ABTD_atomic_ptr *pp_ktable,
228  ABTI_key *p_key, void *value)
229 {
230  int abt_errno;
231  ABTI_ktable *p_ktable = ABTD_atomic_acquire_load_ptr(pp_ktable);
232  if (ABTU_unlikely(!ABTI_ktable_is_valid(p_ktable))) {
233  /* Spinlock pp_ktable */
234  while (1) {
235  if (ABTD_atomic_bool_cas_weak_ptr(pp_ktable, NULL,
236  ABTI_KTABLE_LOCKED)) {
237  /* The lock was acquired, so let's allocate this table. */
238  abt_errno = ABTI_ktable_create(p_global, p_local, &p_ktable);
239  if (abt_errno != ABT_SUCCESS) {
240  ABTD_atomic_release_store_ptr(pp_ktable, NULL);
241  ABTI_HANDLE_ERROR(abt_errno);
242  }
243 
244  /* Write down the value. The lock is released here. */
245  ABTD_atomic_release_store_ptr(pp_ktable, p_ktable);
246  break;
247  } else {
248  /* Failed to take a lock. Check if the value to see if it should
249  * try to take a lock again. */
250  p_ktable = ABTD_atomic_acquire_load_ptr(pp_ktable);
251  if (p_ktable == NULL) {
252  /* Try once more. */
253  continue;
254  }
255  /* It has been locked by another. */
256  while (p_ktable == ABTI_KTABLE_LOCKED) {
257  ABTD_atomic_pause();
258  p_ktable = ABTD_atomic_acquire_load_ptr(pp_ktable);
259  }
260  /* p_ktable has been allocated by another. */
261  break;
262  }
263  }
264  }
265  abt_errno = ABTI_ktable_set_impl(p_local, p_ktable, p_key, value, ABT_TRUE);
266  ABTI_CHECK_ERROR(abt_errno);
267  return ABT_SUCCESS;
268 }
269 
270 ABTU_ret_err static inline int
271 ABTI_ktable_set_unsafe(ABTI_global *p_global, ABTI_local *p_local,
272  ABTI_ktable **pp_ktable, ABTI_key *p_key, void *value)
273 {
274  int abt_errno;
275  ABTI_ktable *p_ktable = *pp_ktable;
276  if (!p_ktable) {
277  abt_errno = ABTI_ktable_create(p_global, p_local, &p_ktable);
278  ABTI_CHECK_ERROR(abt_errno);
279  *pp_ktable = p_ktable;
280  }
281  abt_errno =
282  ABTI_ktable_set_impl(p_local, p_ktable, p_key, value, ABT_FALSE);
283  ABTI_CHECK_ERROR(abt_errno);
284  return ABT_SUCCESS;
285 }
286 
287 static inline void *ABTI_ktable_get(ABTD_atomic_ptr *pp_ktable, ABTI_key *p_key)
288 {
289  ABTI_ktable *p_ktable = ABTD_atomic_acquire_load_ptr(pp_ktable);
290  if (ABTI_ktable_is_valid(p_ktable)) {
291  uint32_t idx;
292  ABTI_ktelem *p_elem;
293 
294  idx = ABTI_ktable_get_idx(p_key, p_ktable->size);
295  p_elem = (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(
296  &p_ktable->p_elems[idx]);
297  uint32_t key_id = p_key->id;
298  while (p_elem) {
299  if (p_elem->key_id == key_id) {
300  return p_elem->value;
301  }
302  p_elem =
303  (ABTI_ktelem *)ABTD_atomic_acquire_load_ptr(&p_elem->p_next);
304  }
305  }
306  return NULL;
307 }
308 
309 #endif /* ABTI_KEY_H_INCLUDED */
ABT_key
struct ABT_key_opaque * ABT_key
Work-unit-specific data key handle type.
Definition: abt.h:980
ABT_bool
int ABT_bool
Boolean type.
Definition: abt.h:1043
ABT_KEY_NULL
#define ABT_KEY_NULL
Definition: abt.h:1108
ABTU_roundup_size
static size_t ABTU_roundup_size(size_t val, size_t multiple)
Definition: abtu.h:95
ABTU_likely
#define ABTU_likely(cond)
Definition: abtu.h:119
ABTU_malloc
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
Definition: abtu.h:235
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
ABTU_ret_err
#define ABTU_ret_err
Definition: abtu.h:155
ABTU_unlikely
#define ABTU_unlikely(cond)
Definition: abtu.h:120
ABT_TRUE
#define ABT_TRUE
True constant for ABT_bool.
Definition: abt.h:784
ABTU_MAX_ALIGNMENT
#define ABTU_MAX_ALIGNMENT
Definition: abtu.h:151
ABT_FALSE
#define ABT_FALSE
False constant for ABT_bool.
Definition: abt.h:786