ARGOBOTS
malloc.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 
8 #ifdef ABT_CONFIG_USE_MEM_POOL
9 /* Currently the total memory allocated for stacks and task block pages is not
10  * shrunk to avoid the thrashing overhead except that ESs are terminated or
11  * ABT_finalize is called. When an ES terminates its execution, stacks and
12  * empty pages that it holds are deallocated. Non-empty pages are added to the
13  * global data. When ABTI_finalize is called, all memory objects that we have
14  * allocated are returned to the higher-level memory allocator. */
15 
16 #include <sys/types.h>
17 #include <sys/mman.h>
18 
19 #define PROTS (PROT_READ | PROT_WRITE)
20 
21 #if defined(HAVE_MAP_ANONYMOUS)
22 #define FLAGS_RP (MAP_PRIVATE | MAP_ANONYMOUS)
23 #elif defined(HAVE_MAP_ANON)
24 #define FLAGS_RP (MAP_PRIVATE | MAP_ANON)
25 #else
26 /* In this case, we don't allow using mmap. We always use malloc. */
27 #define FLAGS_RP (MAP_PRIVATE)
28 #endif
29 
30 #if defined(HAVE_MAP_HUGETLB)
31 #define FLAGS_HP (FLAGS_RP | MAP_HUGETLB)
32 #define FD_HP 0
33 #define MMAP_DBG_MSG "mmap a hugepage"
34 #else
35 /* NOTE: On Mac OS, we tried VM_FLAGS_SUPERPAGE_SIZE_ANY that is defined in
36  * <mach/vm_statistics.h>, but mmap() failed with it and its execution was too
37  * slow. By that reason, we do not support it for now. */
38 #define FLAGS_HP FLAGS_RP
39 #define FD_HP 0
40 #define MMAP_DBG_MSG "mmap regular pages"
41 #endif
42 
43 static inline void ABTI_mem_free_stack_list(ABTI_stack_header *p_stack);
44 static inline void ABTI_mem_free_page_list(ABTI_page_header *p_ph);
45 static inline void ABTI_mem_add_page(ABTI_local *p_local,
46  ABTI_page_header *p_ph);
47 static inline void ABTI_mem_add_pages_to_global(ABTI_page_header *p_head,
48  ABTI_page_header *p_tail);
49 static inline void ABTI_mem_free_sph_list(ABTI_sp_header *p_sph);
50 static ABTD_atomic_uint64 g_sp_id = ABTD_ATOMIC_UINT64_STATIC_INITIALIZER(0);
51 
52 void ABTI_mem_init(ABTI_global *p_global)
53 {
54  p_global->p_mem_stack = NULL;
55  ABTI_spinlock_clear(&p_global->mem_task_lock);
56  p_global->p_mem_task = NULL;
57  p_global->p_mem_sph = NULL;
58 
59  ABTD_atomic_relaxed_store_uint64(&g_sp_id, 0);
60 }
61 
62 void ABTI_mem_init_local(ABTI_local *p_local)
63 {
64  /* TODO: preallocate some stacks? */
65  p_local->num_stacks = 0;
66  p_local->p_mem_stack = NULL;
67 
68  /* TODO: preallocate some task blocks? */
69  p_local->p_mem_task_head = NULL;
70  p_local->p_mem_task_tail = NULL;
71 }
72 
73 void ABTI_mem_finalize(ABTI_global *p_global)
74 {
75  /* Free all remaining stacks */
76  ABTI_mem_free_stack_list(p_global->p_mem_stack);
77  p_global->p_mem_stack = NULL;
78 
79  /* Free all task blocks */
80  ABTI_mem_free_page_list(p_global->p_mem_task);
81  p_global->p_mem_task = NULL;
82 
83  /* Free all stack pages */
84  ABTI_mem_free_sph_list(p_global->p_mem_sph);
85  p_global->p_mem_sph = NULL;
86 }
87 
88 void ABTI_mem_finalize_local(ABTI_local *p_local)
89 {
90  /* Free all remaining stacks */
91  ABTI_mem_free_stack_list(p_local->p_mem_stack);
92  p_local->num_stacks = 0;
93  p_local->p_mem_stack = NULL;
94 
95  /* Free all task block pages */
96  ABTI_page_header *p_rem_head = NULL;
97  ABTI_page_header *p_rem_tail = NULL;
98  ABTI_page_header *p_cur = p_local->p_mem_task_head;
99  while (p_cur) {
100  ABTI_page_header *p_tmp = p_cur;
101  p_cur = p_cur->p_next;
102 
103  size_t num_free_blks =
104  p_tmp->num_empty_blks +
105  ABTD_atomic_acquire_load_uint32(&p_tmp->num_remote_free);
106  if (num_free_blks == p_tmp->num_total_blks) {
107  if (p_tmp->is_mmapped == ABT_TRUE) {
108  munmap(p_tmp, gp_ABTI_global->mem_page_size);
109  } else {
110  ABTU_free(p_tmp);
111  }
112  } else {
113  if (p_tmp->p_free) {
114  ABTI_mem_take_free(p_tmp);
115  }
116 
117  p_tmp->owner_id = 0;
118  p_tmp->p_prev = NULL;
119  p_tmp->p_next = p_rem_head;
120  p_rem_head = p_tmp;
121  if (p_rem_tail == NULL) {
122  p_rem_tail = p_tmp;
123  }
124  }
125 
126  if (p_cur == p_local->p_mem_task_head)
127  break;
128  }
129  p_local->p_mem_task_head = NULL;
130  p_local->p_mem_task_tail = NULL;
131 
132  /* If there are pages that have not been fully freed, we move them to the
133  * global task page list. */
134  if (p_rem_head) {
135  ABTI_mem_add_pages_to_global(p_rem_head, p_rem_tail);
136  }
137 }
138 
139 int ABTI_mem_check_lp_alloc(int lp_alloc)
140 {
141  size_t sp_size = gp_ABTI_global->mem_sp_size;
142  size_t pg_size = gp_ABTI_global->mem_page_size;
143  size_t alignment;
144  void *p_page = NULL;
145 
146  switch (lp_alloc) {
147  case ABTI_MEM_LP_MMAP_RP:
148  p_page = mmap(NULL, pg_size, PROTS, FLAGS_RP, 0, 0);
149  if (p_page != MAP_FAILED) {
150  munmap(p_page, pg_size);
151  } else {
152  lp_alloc = ABTI_MEM_LP_MALLOC;
153  }
154  break;
155 
156  case ABTI_MEM_LP_MMAP_HP_RP:
157  p_page = mmap(NULL, sp_size, PROTS, FLAGS_HP, 0, 0);
158  if (p_page != MAP_FAILED) {
159  munmap(p_page, sp_size);
160  } else {
161  p_page = mmap(NULL, pg_size, PROTS, FLAGS_RP, 0, 0);
162  if (p_page != MAP_FAILED) {
163  munmap(p_page, pg_size);
164  lp_alloc = ABTI_MEM_LP_MMAP_RP;
165  } else {
166  lp_alloc = ABTI_MEM_LP_MALLOC;
167  }
168  }
169  break;
170 
171  case ABTI_MEM_LP_MMAP_HP_THP:
172  p_page = mmap(NULL, sp_size, PROTS, FLAGS_HP, 0, 0);
173  if (p_page != MAP_FAILED) {
174  munmap(p_page, sp_size);
175  } else {
176  alignment = gp_ABTI_global->huge_page_size;
177  p_page = ABTU_memalign(alignment, pg_size);
178  if (p_page) {
179  ABTU_free(p_page);
180  lp_alloc = ABTI_MEM_LP_THP;
181  } else {
182  lp_alloc = ABTI_MEM_LP_MALLOC;
183  }
184  }
185  break;
186 
187  case ABTI_MEM_LP_THP:
188  alignment = gp_ABTI_global->huge_page_size;
189  p_page = ABTU_memalign(alignment, pg_size);
190  if (p_page) {
191  ABTU_free(p_page);
192  lp_alloc = ABTI_MEM_LP_THP;
193  } else {
194  lp_alloc = ABTI_MEM_LP_MALLOC;
195  }
196  break;
197 
198  default:
199  break;
200  }
201 
202  return lp_alloc;
203 }
204 
205 static inline void ABTI_mem_free_stack_list(ABTI_stack_header *p_stack)
206 {
207  ABTI_stack_header *p_cur, *p_tmp;
208 
209  p_cur = p_stack;
210  while (p_cur) {
211  p_tmp = p_cur;
212  p_cur = p_cur->p_next;
213  ABTD_atomic_fetch_add_uint32(&p_tmp->p_sph->num_empty_stacks, 1);
214  }
215 }
216 
217 static inline void ABTI_mem_free_page_list(ABTI_page_header *p_ph)
218 {
219  ABTI_page_header *p_cur, *p_tmp;
220 
221  p_cur = p_ph;
222  while (p_cur) {
223  p_tmp = p_cur;
224  p_cur = p_cur->p_next;
225  if (p_tmp->is_mmapped == ABT_TRUE) {
226  munmap(p_tmp, gp_ABTI_global->mem_page_size);
227  } else {
228  ABTU_free(p_tmp);
229  }
230  }
231 }
232 
233 static inline void ABTI_mem_add_page(ABTI_local *p_local,
234  ABTI_page_header *p_ph)
235 {
236  p_ph->owner_id = ABTI_self_get_native_thread_id(p_local);
237 
238  /* Add the page to the head */
239  if (p_local->p_mem_task_head != NULL) {
240  p_ph->p_prev = p_local->p_mem_task_tail;
241  p_ph->p_next = p_local->p_mem_task_head;
242  p_local->p_mem_task_head->p_prev = p_ph;
243  p_local->p_mem_task_tail->p_next = p_ph;
244  p_local->p_mem_task_head = p_ph;
245  } else {
246  p_ph->p_prev = p_ph;
247  p_ph->p_next = p_ph;
248  p_local->p_mem_task_head = p_ph;
249  p_local->p_mem_task_tail = p_ph;
250  }
251 }
252 
253 static inline void ABTI_mem_add_pages_to_global(ABTI_page_header *p_head,
254  ABTI_page_header *p_tail)
255 {
256  ABTI_global *p_global = gp_ABTI_global;
257 
258  /* Add the page list to the global list */
259  ABTI_spinlock_acquire(&p_global->mem_task_lock);
260  p_tail->p_next = p_global->p_mem_task;
261  p_global->p_mem_task = p_head;
262  ABTI_spinlock_release(&p_global->mem_task_lock);
263 }
264 
265 char *ABTI_mem_take_global_stack(ABTI_local *p_local)
266 {
267  ABTI_global *p_global = gp_ABTI_global;
268  ABTI_stack_header *p_sh, *p_cur;
269  uint32_t cnt_stacks = 0;
270 
271  ABTD_atomic_ptr *ptr;
272  void *old;
273  do {
274  p_sh = (ABTI_stack_header *)ABTD_atomic_acquire_load_ptr(
275  (ABTD_atomic_ptr *)&p_global->p_mem_stack);
276  ptr = (ABTD_atomic_ptr *)&p_global->p_mem_stack;
277  old = (void *)p_sh;
278  } while (!ABTD_atomic_bool_cas_weak_ptr(ptr, old, NULL));
279 
280  if (p_sh == NULL)
281  return NULL;
282 
283  /* TODO: need a better counting method */
284  /* TODO: if there are too many stacks in the global stack pool, we should
285  * only take some of them (e.g., take max_stacks count) and add the rest
286  * back to the global stack pool. */
287  p_cur = p_sh;
288  while (p_cur->p_next) {
289  p_cur = p_cur->p_next;
290  cnt_stacks++;
291  }
292 
293  /* Return the first one and keep the rest in p_local */
294  p_local->num_stacks = cnt_stacks;
295  p_local->p_mem_stack = p_sh->p_next;
296 
297  return (char *)p_sh - sizeof(ABTI_thread);
298 }
299 
300 void ABTI_mem_add_stack_to_global(ABTI_stack_header *p_sh)
301 {
302  ABTI_global *p_global = gp_ABTI_global;
303  ABTD_atomic_ptr *ptr;
304  void *old, *new;
305 
306  do {
307  ABTI_stack_header *p_mem_stack =
308  (ABTI_stack_header *)ABTD_atomic_acquire_load_ptr(
309  (ABTD_atomic_ptr *)&p_global->p_mem_stack);
310  p_sh->p_next = p_mem_stack;
311  ptr = (ABTD_atomic_ptr *)&p_global->p_mem_stack;
312  old = (void *)p_mem_stack;
313  new = (void *)p_sh;
314  } while (!ABTD_atomic_bool_cas_weak_ptr(ptr, old, new));
315 }
316 
317 static char *ABTI_mem_alloc_large_page(int pgsize, ABT_bool *p_is_mmapped)
318 {
319  char *p_page = NULL;
320 
321  switch (gp_ABTI_global->mem_lp_alloc) {
322  case ABTI_MEM_LP_MALLOC:
323  *p_is_mmapped = ABT_FALSE;
324  p_page = (char *)ABTU_malloc(pgsize);
325  LOG_DEBUG("malloc a regular page (%d): %p\n", pgsize, p_page);
326  break;
327 
328  case ABTI_MEM_LP_MMAP_RP:
329  p_page = (char *)mmap(NULL, pgsize, PROTS, FLAGS_RP, 0, 0);
330  if ((void *)p_page != MAP_FAILED) {
331  *p_is_mmapped = ABT_TRUE;
332  LOG_DEBUG("mmap a regular page (%d): %p\n", pgsize, p_page);
333  } else {
334  /* mmap failed and thus we fall back to malloc. */
335  p_page = (char *)ABTU_malloc(pgsize);
336  *p_is_mmapped = ABT_FALSE;
337  LOG_DEBUG("fall back to malloc a regular page (%d): %p\n",
338  pgsize, p_page);
339  }
340  break;
341 
342  case ABTI_MEM_LP_MMAP_HP_RP:
343  /* We first try to mmap a huge page, and then if it fails, we mmap
344  * a regular page. */
345  p_page = (char *)mmap(NULL, pgsize, PROTS, FLAGS_HP, 0, 0);
346  if ((void *)p_page != MAP_FAILED) {
347  *p_is_mmapped = ABT_TRUE;
348  LOG_DEBUG(MMAP_DBG_MSG " (%d): %p\n", pgsize, p_page);
349  } else {
350  /* Huge pages are run out of. Use a normal mmap. */
351  p_page = (char *)mmap(NULL, pgsize, PROTS, FLAGS_RP, 0, 0);
352  if ((void *)p_page != MAP_FAILED) {
353  *p_is_mmapped = ABT_TRUE;
354  LOG_DEBUG("fall back to mmap regular pages (%d): %p\n",
355  pgsize, p_page);
356  } else {
357  /* mmap failed and thus we fall back to malloc. */
358  p_page = (char *)ABTU_malloc(pgsize);
359  *p_is_mmapped = ABT_FALSE;
360  LOG_DEBUG("fall back to malloc a regular page (%d): %p\n",
361  pgsize, p_page);
362  }
363  }
364  break;
365 
366  case ABTI_MEM_LP_MMAP_HP_THP:
367  /* We first try to mmap a huge page, and then if it fails, try to
368  * use a THP. */
369  p_page = (char *)mmap(NULL, pgsize, PROTS, FLAGS_HP, 0, 0);
370  if ((void *)p_page != MAP_FAILED) {
371  *p_is_mmapped = ABT_TRUE;
372  LOG_DEBUG(MMAP_DBG_MSG " (%d): %p\n", pgsize, p_page);
373  } else {
374  *p_is_mmapped = ABT_FALSE;
375  size_t alignment = gp_ABTI_global->huge_page_size;
376  p_page = (char *)ABTU_memalign(alignment, pgsize);
377  LOG_DEBUG("memalign a THP (%d): %p\n", pgsize, p_page);
378  }
379  break;
380 
381  case ABTI_MEM_LP_THP:
382  *p_is_mmapped = ABT_FALSE;
383  {
384  size_t alignment = gp_ABTI_global->huge_page_size;
385  p_page = (char *)ABTU_memalign(alignment, pgsize);
386  LOG_DEBUG("memalign a THP (%d): %p\n", pgsize, p_page);
387  }
388  break;
389 
390  default:
391  ABTI_ASSERT(0);
392  break;
393  }
394 
395  return p_page;
396 }
397 
398 ABTI_page_header *ABTI_mem_alloc_page(ABTI_local *p_local, size_t blk_size)
399 {
400  int i;
401  ABTI_page_header *p_ph;
402  ABTI_blk_header *p_cur;
403  ABTI_global *p_global = gp_ABTI_global;
404  const uint32_t clsize = ABT_CONFIG_STATIC_CACHELINE_SIZE;
405  size_t pgsize = p_global->mem_page_size;
406  ABT_bool is_mmapped;
407 
408  /* Make the page header size a multiple of cache line size */
409  const size_t ph_size =
410  (sizeof(ABTI_page_header) + clsize) / clsize * clsize;
411 
412  uint32_t num_blks = (pgsize - ph_size) / blk_size;
413  char *p_page = ABTI_mem_alloc_large_page(pgsize, &is_mmapped);
414 
415  /* Set the page header */
416  p_ph = (ABTI_page_header *)p_page;
417  p_ph->blk_size = blk_size;
418  p_ph->num_total_blks = num_blks;
419  p_ph->num_empty_blks = num_blks;
420  ABTD_atomic_relaxed_store_uint32(&p_ph->num_remote_free, 0);
421  p_ph->p_head = (ABTI_blk_header *)(p_page + ph_size);
422  p_ph->p_free = NULL;
423  ABTI_mem_add_page(p_local, p_ph);
424  p_ph->is_mmapped = is_mmapped;
425 
426  /* Make a liked list of all free blocks */
427  p_cur = p_ph->p_head;
428  for (i = 0; i < num_blks - 1; i++) {
429  p_cur->p_ph = p_ph;
430  p_cur->p_next = (ABTI_blk_header *)((char *)p_cur + blk_size);
431  p_cur = p_cur->p_next;
432  }
433  p_cur->p_ph = p_ph;
434  p_cur->p_next = NULL;
435 
436  return p_ph;
437 }
438 
439 void ABTI_mem_free_page(ABTI_local *p_local, ABTI_page_header *p_ph)
440 {
441  /* We keep one page for future use. */
442  if (p_local->p_mem_task_head == p_local->p_mem_task_tail)
443  return;
444 
445  uint32_t num_free_blks =
446  p_ph->num_empty_blks +
447  ABTD_atomic_acquire_load_uint32(&p_ph->num_remote_free);
448  if (num_free_blks == p_ph->num_total_blks) {
449  /* All blocks in the page have been freed */
450  /* Remove from the list and free the page */
451  p_ph->p_prev->p_next = p_ph->p_next;
452  p_ph->p_next->p_prev = p_ph->p_prev;
453  if (p_ph == p_local->p_mem_task_head) {
454  p_local->p_mem_task_head = p_ph->p_next;
455  } else if (p_ph == p_local->p_mem_task_tail) {
456  p_local->p_mem_task_tail = p_ph->p_prev;
457  }
458  if (p_ph->is_mmapped == ABT_TRUE) {
459  munmap(p_ph, gp_ABTI_global->mem_page_size);
460  } else {
461  ABTU_free(p_ph);
462  }
463  }
464 }
465 
466 void ABTI_mem_take_free(ABTI_page_header *p_ph)
467 {
468  /* Decrease the number of remote free blocks. */
469  /* NOTE: p_ph->num_empty_blks p_ph->num_remote_free do not need to be
470  * accurate as long as their sum is the same as the actual number of free
471  * blocks. We keep these variables to avoid chasing the linked list to count
472  * the number of free blocks. */
473  uint32_t num_remote_free =
474  ABTD_atomic_acquire_load_uint32(&p_ph->num_remote_free);
475  ABTD_atomic_ptr *ptr;
476  void *old;
477 
478  ABTD_atomic_fetch_sub_uint32(&p_ph->num_remote_free, num_remote_free);
479  p_ph->num_empty_blks += num_remote_free;
480 
481  /* Take the remote free pointer */
482  do {
483  ABTI_blk_header *p_free =
484  (ABTI_blk_header *)ABTD_atomic_acquire_load_ptr(
485  (ABTD_atomic_ptr *)&p_ph->p_free);
486  p_ph->p_head = p_free;
487  ptr = (ABTD_atomic_ptr *)&p_ph->p_free;
488  old = (void *)p_free;
489  } while (!ABTD_atomic_bool_cas_weak_ptr(ptr, old, NULL));
490 }
491 
492 void ABTI_mem_free_remote(ABTI_page_header *p_ph, ABTI_blk_header *p_bh)
493 {
494  ABTD_atomic_ptr *ptr;
495  void *old, *new;
496  do {
497  ABTI_blk_header *p_free =
498  (ABTI_blk_header *)ABTD_atomic_acquire_load_ptr(
499  (ABTD_atomic_ptr *)&p_ph->p_free);
500  p_bh->p_next = p_free;
501  ptr = (ABTD_atomic_ptr *)&p_ph->p_free;
502  old = (void *)p_free;
503  new = (void *)p_bh;
504  } while (!ABTD_atomic_bool_cas_weak_ptr(ptr, old, new));
505 
506  /* Increase the number of remote free blocks */
507  ABTD_atomic_fetch_add_uint32(&p_ph->num_remote_free, 1);
508 }
509 
510 ABTI_page_header *ABTI_mem_take_global_page(ABTI_local *p_local)
511 {
512  ABTI_global *p_global = gp_ABTI_global;
513  ABTI_page_header *p_ph = NULL;
514 
515  /* Take the first page out */
516  ABTI_spinlock_acquire(&p_global->mem_task_lock);
517  if (p_global->p_mem_task) {
518  p_ph = p_global->p_mem_task;
519  p_global->p_mem_task = p_ph->p_next;
520  }
521  ABTI_spinlock_release(&p_global->mem_task_lock);
522 
523  if (p_ph) {
524  ABTI_mem_add_page(p_local, p_ph);
525  if (p_ph->p_free)
526  ABTI_mem_take_free(p_ph);
527  if (p_ph->p_head == NULL)
528  p_ph = NULL;
529  }
530 
531  return p_ph;
532 }
533 
534 static inline void ABTI_mem_free_sph_list(ABTI_sp_header *p_sph)
535 {
536  ABTI_sp_header *p_cur, *p_tmp;
537 
538  p_cur = p_sph;
539  while (p_cur) {
540  p_tmp = p_cur;
541  p_cur = p_cur->p_next;
542 
543  if (p_tmp->num_total_stacks !=
544  ABTD_atomic_acquire_load_uint32(&p_tmp->num_empty_stacks)) {
545  LOG_DEBUG("%u ULTs are not freed\n",
546  p_tmp->num_total_stacks - ABTD_atomic_acquire_load_uint32(
547  &p_tmp->num_empty_stacks));
548  }
549 
550  if (p_tmp->is_mmapped == ABT_TRUE) {
551  if (munmap(p_tmp->p_sp, gp_ABTI_global->mem_sp_size)) {
552  ABTI_ASSERT(0);
553  }
554  } else {
555  ABTU_free(p_tmp->p_sp);
556  }
557  ABTU_free(p_tmp);
558  }
559 }
560 
561 /* Allocate a stack page and divide it to multiple stacks by making a liked
562  * list. Then, the first stack is returned. */
563 char *ABTI_mem_alloc_sp(ABTI_local *p_local, size_t stacksize)
564 {
565  char *p_sp, *p_first;
566  ABTI_sp_header *p_sph;
567  ABTI_stack_header *p_sh, *p_next;
568  uint32_t num_stacks;
569  int i;
570 
571  uint32_t header_size = ABTI_MEM_SH_SIZE;
572  uint32_t sp_size = gp_ABTI_global->mem_sp_size;
573  size_t actual_stacksize = stacksize - header_size;
574  void *p_stack = NULL;
575 
576  /* Allocate a stack page header */
577  p_sph = (ABTI_sp_header *)ABTU_malloc(sizeof(ABTI_sp_header));
578  num_stacks = sp_size / stacksize;
579  p_sph->num_total_stacks = num_stacks;
580  ABTD_atomic_relaxed_store_uint32(&p_sph->num_empty_stacks, 0);
581  p_sph->stacksize = stacksize;
582  p_sph->id = ABTD_atomic_fetch_add_uint64(&g_sp_id, 1);
583 
584  /* Allocate a stack page */
585  p_sp = ABTI_mem_alloc_large_page(sp_size, &p_sph->is_mmapped);
586 
587  /* Save the stack page pointer */
588  p_sph->p_sp = p_sp;
589 
590  /* First stack */
591  int first_pos = p_sph->id % num_stacks;
592  p_first = p_sp + actual_stacksize * first_pos;
593  p_sh = (ABTI_stack_header *)(p_first + sizeof(ABTI_thread));
594  p_sh->p_sph = p_sph;
595  p_stack = (first_pos == 0) ? (void *)(p_first + header_size * num_stacks)
596  : (void *)p_sp;
597  p_sh->p_stack = p_stack;
598 
599  if (num_stacks > 1) {
600  /* Make a linked list with remaining stacks */
601  p_sh = (ABTI_stack_header *)((char *)p_sh + header_size);
602 
603  p_local->num_stacks = num_stacks - 1;
604  p_local->p_mem_stack = p_sh;
605 
606  for (i = 1; i < num_stacks; i++) {
607  p_next = (i + 1) < num_stacks
608  ? (ABTI_stack_header *)((char *)p_sh + header_size)
609  : NULL;
610  p_sh->p_next = p_next;
611  p_sh->p_sph = p_sph;
612  if (first_pos == 0) {
613  p_sh->p_stack =
614  (void *)((char *)p_stack + i * actual_stacksize);
615  } else {
616  if (i < first_pos) {
617  p_sh->p_stack = (void *)(p_sp + i * actual_stacksize);
618  } else {
619  p_sh->p_stack =
620  (void *)(p_first + header_size * num_stacks +
621  (i - first_pos) * actual_stacksize);
622  }
623  }
624 
625  p_sh = p_next;
626  }
627  }
628 
629  /* Add this stack page to the global stack page list */
630  ABTD_atomic_ptr *ptr = (ABTD_atomic_ptr *)&gp_ABTI_global->p_mem_sph;
631  void *old;
632  do {
633  p_sph->p_next = (ABTI_sp_header *)ABTD_atomic_acquire_load_ptr(ptr);
634  old = (void *)p_sph->p_next;
635  } while (!ABTD_atomic_bool_cas_weak_ptr(ptr, old, (void *)p_sph));
636 
637  return p_first;
638 }
639 
640 #endif /* ABT_CONFIG_USE_MEM_POOL */
static void * ABTU_malloc(size_t size)
Definition: abtu.h:39
int ABT_bool
Definition: abt.h:309
#define ABT_CONFIG_STATIC_CACHELINE_SIZE
Definition: abt_config.h:51
#define ABT_FALSE
Definition: abt.h:224
ABTI_global * gp_ABTI_global
Definition: global.c:14
#define ABT_TRUE
Definition: abt.h:223
#define LOG_DEBUG(fmt,...)
Definition: abti_log.h:61
static void * ABTU_memalign(size_t alignment, size_t size)
Definition: abtu.h:25
static void ABTU_free(void *ptr)
Definition: abtu.h:32