ARGOBOTS  dce6e727ffc4ca5b3ffc04cb9517c6689be51ec5
mem_pool.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 #include <stddef.h>
8 
9 static inline ABTI_mem_pool_page *
10 mem_pool_lifo_elem_to_page(ABTI_sync_lifo_element *lifo_elem)
11 {
12  return (ABTI_mem_pool_page *)(((char *)lifo_elem) -
13  offsetof(ABTI_mem_pool_page, lifo_elem));
14 }
15 
16 static inline ABTI_mem_pool_header *
17 mem_pool_lifo_elem_to_header(ABTI_sync_lifo_element *lifo_elem)
18 {
19  return (
20  ABTI_mem_pool_header *)(((char *)lifo_elem) -
21  (offsetof(ABTI_mem_pool_header, bucket_info) +
22  offsetof(ABTI_mem_pool_header_bucket_info,
23  lifo_elem)));
24 }
25 
26 static ABTU_ret_err int protect_memory(void *addr, size_t size,
27  size_t page_size, ABT_bool protect,
28  ABT_bool adjust_size)
29 {
30  /* Align addr. */
31  void *mprotect_addr = ABTU_roundup_ptr(addr, page_size);
32  if (adjust_size) {
33  size -= ((uintptr_t)mprotect_addr) - ((uintptr_t)addr);
34  }
35  return ABTU_mprotect(mprotect_addr, size, protect);
36 }
37 
38 static void
39 mem_pool_return_partial_bucket(ABTI_mem_pool_global_pool *p_global_pool,
40  ABTI_mem_pool_header *bucket)
41 {
42  int i;
43  const int num_headers_per_bucket = p_global_pool->num_headers_per_bucket;
44  /* Return headers in the last bucket to partial_bucket. */
45  ABTD_spinlock_acquire(&p_global_pool->partial_bucket_lock);
46  if (!p_global_pool->partial_bucket) {
47  p_global_pool->partial_bucket = bucket;
48  } else {
49  int num_headers_in_partial_bucket =
50  p_global_pool->partial_bucket->bucket_info.num_headers;
51  int num_headers_in_bucket = bucket->bucket_info.num_headers;
52  if (num_headers_in_partial_bucket + num_headers_in_bucket <
53  num_headers_per_bucket) {
54  /* Connect partial_bucket + bucket. Still not enough to make
55  * a complete bucket. */
56  ABTI_mem_pool_header *partial_bucket_tail =
57  p_global_pool->partial_bucket;
58  for (i = 1; i < num_headers_in_partial_bucket; i++) {
59  partial_bucket_tail = partial_bucket_tail->p_next;
60  }
61  partial_bucket_tail->p_next = bucket;
62  p_global_pool->partial_bucket->bucket_info.num_headers =
63  num_headers_in_partial_bucket + num_headers_in_bucket;
64  } else {
65  /* partial_bucket + bucket can make a complete bucket. */
66  ABTI_mem_pool_header *partial_bucket_header =
67  p_global_pool->partial_bucket;
68  for (i = 1; i < num_headers_per_bucket - num_headers_in_bucket;
69  i++) {
70  partial_bucket_header = partial_bucket_header->p_next;
71  }
72  ABTI_mem_pool_header *new_partial_bucket = NULL;
73  if (num_headers_in_partial_bucket + num_headers_in_bucket !=
74  num_headers_per_bucket) {
75  new_partial_bucket = partial_bucket_header->p_next;
76  new_partial_bucket->bucket_info.num_headers =
77  num_headers_per_bucket -
78  (num_headers_in_partial_bucket + num_headers_in_bucket);
79  }
80  partial_bucket_header->p_next = bucket;
81  ABTI_mem_pool_return_bucket(p_global_pool,
82  p_global_pool->partial_bucket);
83  p_global_pool->partial_bucket = new_partial_bucket;
84  }
85  }
86  ABTD_spinlock_release(&p_global_pool->partial_bucket_lock);
87 }
88 
89 void ABTI_mem_pool_init_global_pool(
90  ABTI_mem_pool_global_pool *p_global_pool, size_t num_headers_per_bucket,
91  size_t header_size, size_t header_offset, size_t page_size,
92  const ABTU_MEM_LARGEPAGE_TYPE *lp_type_requests,
93  uint32_t num_lp_type_requests, size_t alignment_hint,
94  ABTI_mem_pool_global_pool_mprotect_config *p_mprotect_config)
95 {
96  p_global_pool->num_headers_per_bucket = num_headers_per_bucket;
97  ABTI_ASSERT(header_offset + sizeof(ABTI_mem_pool_header) <= header_size);
98  p_global_pool->header_size = header_size;
99  p_global_pool->header_offset = header_offset;
100  p_global_pool->page_size = page_size;
101  if (p_mprotect_config) {
102  memcpy(&p_global_pool->mprotect_config, p_mprotect_config,
103  sizeof(ABTI_mem_pool_global_pool_mprotect_config));
104  } else {
105  p_global_pool->mprotect_config.enabled = ABT_FALSE;
106  }
107 
108  /* Note that lp_type_requests is a constant-sized array */
109  ABTI_ASSERT(num_lp_type_requests <=
110  sizeof(p_global_pool->lp_type_requests) /
111  sizeof(ABTU_MEM_LARGEPAGE_TYPE));
112  p_global_pool->num_lp_type_requests = num_lp_type_requests;
113  memcpy(p_global_pool->lp_type_requests, lp_type_requests,
114  sizeof(ABTU_MEM_LARGEPAGE_TYPE) * num_lp_type_requests);
115  /* If mprotect_config is set, we should not use a large page. */
116  if (p_global_pool->mprotect_config.enabled) {
117  uint32_t i, idx = 0;
118  for (i = 0; i < num_lp_type_requests; i++) {
119  if (p_global_pool->lp_type_requests[i] !=
121  p_global_pool->lp_type_requests[idx++] =
122  p_global_pool->lp_type_requests[i];
123  }
124  }
125  if (idx == 0) {
126  /* Use a fallback allocation type. */
127  p_global_pool->lp_type_requests[0] = ABTU_MEM_LARGEPAGE_MALLOC;
128  p_global_pool->num_lp_type_requests = 1;
129  } else {
130  p_global_pool->num_lp_type_requests = idx;
131  }
132  }
133  p_global_pool->alignment_hint = alignment_hint;
134 
135  ABTI_sync_lifo_init(&p_global_pool->mem_page_lifo);
136  ABTD_atomic_relaxed_store_ptr(&p_global_pool->p_mem_page_empty, NULL);
137  ABTI_sync_lifo_init(&p_global_pool->bucket_lifo);
138  ABTD_spinlock_clear(&p_global_pool->partial_bucket_lock);
139  p_global_pool->partial_bucket = NULL;
140 }
141 
142 void ABTI_mem_pool_destroy_global_pool(ABTI_mem_pool_global_pool *p_global_pool)
143 {
144  /* All local pools must be released in advance.
145  * Because all headers are from memory pages, each individual header does
146  * not need to be freed. */
147  ABTI_mem_pool_page *p_page;
148  ABTI_sync_lifo_element *p_page_lifo_elem;
149  while ((p_page_lifo_elem =
150  ABTI_sync_lifo_pop_unsafe(&p_global_pool->mem_page_lifo))) {
151  p_page = mem_pool_lifo_elem_to_page(p_page_lifo_elem);
152  if (p_global_pool->mprotect_config.enabled) {
153  /* Undo mprotect() */
154  int abt_errno =
155  protect_memory(p_page->mem, p_page->page_size,
156  p_global_pool->mprotect_config.alignment,
158  /* This should not fail since the allocated region is not newly
159  * split by this operation. */
160  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
161  }
162  ABTU_free_largepage(p_page->mem, p_page->page_size, p_page->lp_type);
163  }
164  p_page = (ABTI_mem_pool_page *)ABTD_atomic_relaxed_load_ptr(
165  &p_global_pool->p_mem_page_empty);
166  while (p_page) {
167  ABTI_mem_pool_page *p_next = p_page->p_next_empty_page;
168  if (p_global_pool->mprotect_config.enabled) {
169  /* Undo mprotect() */
170  int abt_errno =
171  protect_memory(p_page->mem, p_page->page_size,
172  p_global_pool->mprotect_config.alignment,
174  /* This should not fail since the allocated region is not newly
175  * split by this operation. */
176  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
177  }
178  ABTU_free_largepage(p_page->mem, p_page->page_size, p_page->lp_type);
179  p_page = p_next;
180  }
181  ABTI_sync_lifo_destroy(&p_global_pool->bucket_lifo);
182  ABTI_sync_lifo_destroy(&p_global_pool->mem_page_lifo);
183 }
184 
185 ABTU_ret_err int
186 ABTI_mem_pool_init_local_pool(ABTI_mem_pool_local_pool *p_local_pool,
187  ABTI_mem_pool_global_pool *p_global_pool)
188 {
189  p_local_pool->p_global_pool = p_global_pool;
190  p_local_pool->num_headers_per_bucket =
191  p_global_pool->num_headers_per_bucket;
192  /* There must be always at least one header in the local pool.
193  * Let's take one bucket. */
194  int abt_errno =
195  ABTI_mem_pool_take_bucket(p_global_pool, &p_local_pool->buckets[0]);
196  ABTI_CHECK_ERROR(abt_errno);
197  p_local_pool->bucket_index = 0;
198  return ABT_SUCCESS;
199 }
200 
201 void ABTI_mem_pool_destroy_local_pool(ABTI_mem_pool_local_pool *p_local_pool)
202 {
203  /* Return the remaining buckets to the global pool. */
204  int bucket_index = p_local_pool->bucket_index;
205  int i;
206  for (i = 0; i < bucket_index; i++) {
207  ABTI_mem_pool_return_bucket(p_local_pool->p_global_pool,
208  p_local_pool->buckets[i]);
209  }
210  const size_t num_headers_per_bucket = p_local_pool->num_headers_per_bucket;
211  ABTI_mem_pool_header *cur_bucket = p_local_pool->buckets[bucket_index];
212  if (cur_bucket->bucket_info.num_headers == num_headers_per_bucket) {
213  /* The last bucket is also full. Return the last bucket as well. */
214  ABTI_mem_pool_return_bucket(p_local_pool->p_global_pool,
215  p_local_pool->buckets[bucket_index]);
216  } else {
217  mem_pool_return_partial_bucket(p_local_pool->p_global_pool, cur_bucket);
218  }
219 }
220 
221 ABTU_ret_err int
222 ABTI_mem_pool_take_bucket(ABTI_mem_pool_global_pool *p_global_pool,
223  ABTI_mem_pool_header **p_bucket)
224 {
225  /* Try to get a bucket. */
226  ABTI_sync_lifo_element *p_popped_bucket_lifo_elem =
227  ABTI_sync_lifo_pop(&p_global_pool->bucket_lifo);
228  const int num_headers_per_bucket = p_global_pool->num_headers_per_bucket;
229  if (ABTU_likely(p_popped_bucket_lifo_elem)) {
230  /* Use this bucket. */
231  ABTI_mem_pool_header *popped_bucket =
232  mem_pool_lifo_elem_to_header(p_popped_bucket_lifo_elem);
233  popped_bucket->bucket_info.num_headers = num_headers_per_bucket;
234  *p_bucket = popped_bucket;
235  return ABT_SUCCESS;
236  } else {
237  /* Allocate headers by myself */
238  const size_t header_size = p_global_pool->header_size;
239  int num_headers = 0, i;
240  ABTI_mem_pool_header *p_head = NULL;
241  while (1) {
242  ABTI_mem_pool_page *p_page;
243  ABTI_sync_lifo_element *p_page_lifo_elem;
244  /* Before really allocating memory, check if a page has unused
245  * memory. */
246  if ((p_page_lifo_elem =
247  ABTI_sync_lifo_pop(&p_global_pool->mem_page_lifo))) {
248  /* Use a page popped from mem_page_lifo */
249  p_page = mem_pool_lifo_elem_to_page(p_page_lifo_elem);
250  } else {
251  /* Let's allocate memory by myself */
252  const size_t page_size = p_global_pool->page_size;
253  ABTU_MEM_LARGEPAGE_TYPE lp_type;
254  void *p_alloc_mem;
255  int abt_errno =
256  ABTU_alloc_largepage(page_size,
257  p_global_pool->alignment_hint,
258  p_global_pool->lp_type_requests,
259  p_global_pool->num_lp_type_requests,
260  &lp_type, &p_alloc_mem);
261  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
262  /* It fails to take a large page. Let's return. */
263  if (num_headers != 0) {
264  /* p_head has some elements, so let's return them. */
265  p_head->bucket_info.num_headers = num_headers;
266  mem_pool_return_partial_bucket(p_global_pool, p_head);
267  }
268  return abt_errno;
269  }
270  p_page =
271  (ABTI_mem_pool_page *)(((char *)p_alloc_mem) + page_size -
272  sizeof(ABTI_mem_pool_page));
273  p_page->mem = p_alloc_mem;
274  p_page->page_size = page_size;
275  p_page->lp_type = lp_type;
276  p_page->p_mem_extra = p_alloc_mem;
277  p_page->mem_extra_size = page_size - sizeof(ABTI_mem_pool_page);
278  }
279  /* Take some memory left in this page. */
280  int num_provided = p_page->mem_extra_size / header_size;
281  int num_required = num_headers_per_bucket - num_headers;
282  if (num_required < num_provided)
283  num_provided = num_required;
284  ABTI_ASSERT(num_provided != 0);
285 
286  void *p_mem_extra = p_page->p_mem_extra;
287  p_page->p_mem_extra =
288  (void *)(((char *)p_mem_extra) + header_size * num_provided);
289  p_page->mem_extra_size -= header_size * num_provided;
290  /* We've already gotten necessary p_mem_extra from this page. Let's
291  * return it. */
292  if (p_page->mem_extra_size >= header_size) {
293  /* This page still has some extra memory. Someone will use it in
294  * the future. */
295  ABTI_sync_lifo_push(&p_global_pool->mem_page_lifo,
296  &p_page->lifo_elem);
297  } else {
298  /* No extra memory is left in this page. Let's push it to a list
299  * of empty pages. Since mem_page_empty_lifo is push-only and
300  * thus there's no ABA problem, use a simpler lock-free LIFO
301  * algorithm. */
302  void *p_cur_mem_page;
303  do {
304  p_cur_mem_page = ABTD_atomic_acquire_load_ptr(
305  &p_global_pool->p_mem_page_empty);
306  p_page->p_next_empty_page =
307  (ABTI_mem_pool_page *)p_cur_mem_page;
308  } while (!ABTD_atomic_bool_cas_weak_ptr(&p_global_pool
309  ->p_mem_page_empty,
310  p_cur_mem_page,
311  p_page));
312  }
313 
314  size_t header_offset = p_global_pool->header_offset;
315  ABTI_mem_pool_header *p_local_tail =
316  (ABTI_mem_pool_header *)(((char *)p_mem_extra) + header_offset);
317  p_local_tail->p_next = p_head;
318  ABTI_mem_pool_header *p_prev = p_local_tail;
319  if (!p_global_pool->mprotect_config.enabled) {
320  /* Fast path. */
321  for (i = 1; i < num_provided; i++) {
322  ABTI_mem_pool_header *p_cur =
323  (ABTI_mem_pool_header *)(((char *)p_prev) +
324  header_size);
325  p_cur->p_next = p_prev;
326  p_prev = p_cur;
327  }
328  } else {
329  /* Slow path. Use mprotect(). */
330  const ABT_bool check_error =
331  p_global_pool->mprotect_config.check_error;
332  const size_t protect_offset =
333  p_global_pool->mprotect_config.offset;
334  const size_t protect_page_size =
335  p_global_pool->mprotect_config.page_size;
336  const size_t protect_alignment =
337  p_global_pool->mprotect_config.alignment;
338  int abt_errno;
339  abt_errno =
340  protect_memory((void *)(((char *)p_prev) - header_offset +
341  protect_offset),
342  protect_page_size, protect_alignment,
344  if (check_error) {
345  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
346  }
347  for (i = 1; i < num_provided; i++) {
348  ABTI_mem_pool_header *p_cur =
349  (ABTI_mem_pool_header *)(((char *)p_prev) +
350  header_size);
351  p_cur->p_next = p_prev;
352  p_prev = p_cur;
353  abt_errno =
354  protect_memory((void *)(((char *)p_prev) -
355  header_offset + protect_offset),
356  protect_page_size, protect_alignment,
358  if (check_error) {
359  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
360  }
361  }
362  }
363  p_head = p_prev;
364  num_headers += num_provided;
365  if (num_headers == num_headers_per_bucket) {
366  p_head->bucket_info.num_headers = num_headers_per_bucket;
367  *p_bucket = p_head;
368  return ABT_SUCCESS;
369  }
370  }
371  }
372 }
373 
374 void ABTI_mem_pool_return_bucket(ABTI_mem_pool_global_pool *p_global_pool,
375  ABTI_mem_pool_header *bucket)
376 {
377  /* Simply return that bucket to the pool */
378  ABTI_sync_lifo_push(&p_global_pool->bucket_lifo,
379  &bucket->bucket_info.lifo_elem);
380 }
ABT_bool
int ABT_bool
Boolean type.
Definition: abt.h:1043
mem_pool_lifo_elem_to_header
static ABTI_mem_pool_header * mem_pool_lifo_elem_to_header(ABTI_sync_lifo_element *lifo_elem)
Definition: mem_pool.c:17
ABTU_roundup_ptr
static void * ABTU_roundup_ptr(void *ptr, size_t multiple)
Definition: abtu.h:105
ABTU_alloc_largepage
ABTU_ret_err int ABTU_alloc_largepage(size_t size, size_t alignment_hint, const ABTU_MEM_LARGEPAGE_TYPE *requested_types, int num_requested_types, ABTU_MEM_LARGEPAGE_TYPE *p_actual, void **p_ptr)
Definition: largepage.c:90
ABTU_free_largepage
void ABTU_free_largepage(void *ptr, size_t size, ABTU_MEM_LARGEPAGE_TYPE type)
Definition: largepage.c:132
ABTU_mprotect
ABTU_ret_err int ABTU_mprotect(void *addr, size_t size, ABT_bool protect)
Definition: mprotect.c:9
ABTU_likely
#define ABTU_likely(cond)
Definition: abtu.h:119
ABTU_MEM_LARGEPAGE_MMAP_HUGEPAGE
@ ABTU_MEM_LARGEPAGE_MMAP_HUGEPAGE
Definition: abtu.h:312
abti.h
mem_pool_return_partial_bucket
static void mem_pool_return_partial_bucket(ABTI_mem_pool_global_pool *p_global_pool, ABTI_mem_pool_header *bucket)
Definition: mem_pool.c:39
ABTU_MEM_LARGEPAGE_TYPE
ABTU_MEM_LARGEPAGE_TYPE
Definition: abtu.h:308
ABT_SUCCESS
#define ABT_SUCCESS
Error code: the routine returns successfully.
Definition: abt.h:92
ABTU_MEM_LARGEPAGE_MALLOC
@ ABTU_MEM_LARGEPAGE_MALLOC
Definition: abtu.h:309
ABTU_ret_err
#define ABTU_ret_err
Definition: abtu.h:155
protect_memory
static ABTU_ret_err int protect_memory(void *addr, size_t size, size_t page_size, ABT_bool protect, ABT_bool adjust_size)
Definition: mem_pool.c:26
ABT_TRUE
#define ABT_TRUE
True constant for ABT_bool.
Definition: abt.h:784
ABT_FALSE
#define ABT_FALSE
False constant for ABT_bool.
Definition: abt.h:786
mem_pool_lifo_elem_to_page
static ABTI_mem_pool_page * mem_pool_lifo_elem_to_page(ABTI_sync_lifo_element *lifo_elem)
Definition: mem_pool.c:10