ARGOBOTS  c6511494322293e01714f56f341b8d2b22c1e3c1
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
mem_pool.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 #include <stddef.h>
8 
9 static inline ABTI_mem_pool_page *
10 mem_pool_lifo_elem_to_page(ABTI_sync_lifo_element *lifo_elem)
11 {
12  return (ABTI_mem_pool_page *)(((char *)lifo_elem) -
13  offsetof(ABTI_mem_pool_page, lifo_elem));
14 }
15 
16 static inline ABTI_mem_pool_header *
17 mem_pool_lifo_elem_to_header(ABTI_sync_lifo_element *lifo_elem)
18 {
19  return (
20  ABTI_mem_pool_header *)(((char *)lifo_elem) -
21  (offsetof(ABTI_mem_pool_header, bucket_info) +
22  offsetof(ABTI_mem_pool_header_bucket_info,
23  lifo_elem)));
24 }
25 
26 static void
27 mem_pool_return_partial_bucket(ABTI_mem_pool_global_pool *p_global_pool,
28  ABTI_mem_pool_header *bucket)
29 {
30  int i;
31  const int num_headers_per_bucket = p_global_pool->num_headers_per_bucket;
32  /* Return headers in the last bucket to partial_bucket. */
33  ABTI_spinlock_acquire(&p_global_pool->partial_bucket_lock);
34  if (!p_global_pool->partial_bucket) {
35  p_global_pool->partial_bucket = bucket;
36  } else {
37  int num_headers_in_partial_bucket =
38  p_global_pool->partial_bucket->bucket_info.num_headers;
39  int num_headers_in_bucket = bucket->bucket_info.num_headers;
40  if (num_headers_in_partial_bucket + num_headers_in_bucket <
41  num_headers_per_bucket) {
42  /* Connect partial_bucket + bucket. Still not enough to make
43  * a complete bucket. */
44  ABTI_mem_pool_header *partial_bucket_tail =
45  p_global_pool->partial_bucket;
46  for (i = 1; i < num_headers_in_partial_bucket; i++) {
47  partial_bucket_tail = partial_bucket_tail->p_next;
48  }
49  partial_bucket_tail->p_next = bucket;
50  p_global_pool->partial_bucket->bucket_info.num_headers =
51  num_headers_in_partial_bucket + num_headers_in_bucket;
52  } else {
53  /* partial_bucket + bucket can make a complete bucket. */
54  ABTI_mem_pool_header *partial_bucket_header =
55  p_global_pool->partial_bucket;
56  for (i = 1; i < num_headers_per_bucket - num_headers_in_bucket;
57  i++) {
58  partial_bucket_header = partial_bucket_header->p_next;
59  }
60  ABTI_mem_pool_header *new_partial_bucket = NULL;
61  if (num_headers_in_partial_bucket + num_headers_in_bucket !=
62  num_headers_per_bucket) {
63  new_partial_bucket = partial_bucket_header->p_next;
64  new_partial_bucket->bucket_info.num_headers =
65  num_headers_per_bucket -
66  (num_headers_in_partial_bucket + num_headers_in_bucket);
67  }
68  partial_bucket_header->p_next = bucket;
69  ABTI_mem_pool_return_bucket(p_global_pool,
70  p_global_pool->partial_bucket);
71  p_global_pool->partial_bucket = new_partial_bucket;
72  }
73  }
74  ABTI_spinlock_release(&p_global_pool->partial_bucket_lock);
75 }
76 
77 void ABTI_mem_pool_init_global_pool(
78  ABTI_mem_pool_global_pool *p_global_pool, int num_headers_per_bucket,
79  size_t header_size, size_t header_offset, size_t page_size,
80  const ABTU_MEM_LARGEPAGE_TYPE *lp_type_requests, int num_lp_type_requests,
81  size_t alignment_hint)
82 {
83  p_global_pool->num_headers_per_bucket = num_headers_per_bucket;
84  ABTI_ASSERT(header_offset + sizeof(ABTI_mem_pool_header) <= header_size);
85  p_global_pool->header_size = header_size;
86  p_global_pool->header_offset = header_offset;
87  p_global_pool->page_size = page_size;
88 
89  /* Note that lp_type_requests is a constant-sized array */
90  ABTI_ASSERT(num_lp_type_requests <=
91  sizeof(p_global_pool->lp_type_requests) /
92  sizeof(ABTU_MEM_LARGEPAGE_TYPE));
93  p_global_pool->num_lp_type_requests = num_lp_type_requests;
94  memcpy(p_global_pool->lp_type_requests, lp_type_requests,
95  sizeof(ABTU_MEM_LARGEPAGE_TYPE) * num_lp_type_requests);
96  p_global_pool->alignment_hint = alignment_hint;
97 
98  ABTI_sync_lifo_init(&p_global_pool->mem_page_lifo);
99  ABTD_atomic_relaxed_store_ptr(&p_global_pool->p_mem_page_empty, NULL);
100  ABTI_sync_lifo_init(&p_global_pool->bucket_lifo);
101  ABTI_spinlock_clear(&p_global_pool->partial_bucket_lock);
102  p_global_pool->partial_bucket = NULL;
103 }
104 
105 void ABTI_mem_pool_destroy_global_pool(ABTI_mem_pool_global_pool *p_global_pool)
106 {
107  /* All local pools must be released in advance.
108  * Because all headers are from memory pages, each individual header does
109  * not need to be freed. */
110  ABTI_mem_pool_page *p_page;
111  ABTI_sync_lifo_element *p_page_lifo_elem;
112  while ((p_page_lifo_elem =
113  ABTI_sync_lifo_pop_unsafe(&p_global_pool->mem_page_lifo))) {
114  p_page = mem_pool_lifo_elem_to_page(p_page_lifo_elem);
115  ABTU_free_largepage(p_page->mem, p_page->page_size, p_page->lp_type);
116  }
117  p_page = (ABTI_mem_pool_page *)ABTD_atomic_relaxed_load_ptr(
118  &p_global_pool->p_mem_page_empty);
119  while (p_page) {
120  ABTI_mem_pool_page *p_next = p_page->p_next_empty_page;
121  ABTU_free_largepage(p_page->mem, p_page->page_size, p_page->lp_type);
122  p_page = p_next;
123  }
124  ABTI_sync_lifo_destroy(&p_global_pool->bucket_lifo);
125  ABTI_sync_lifo_destroy(&p_global_pool->mem_page_lifo);
126 }
127 
128 void ABTI_mem_pool_init_local_pool(ABTI_mem_pool_local_pool *p_local_pool,
129  ABTI_mem_pool_global_pool *p_global_pool)
130 {
131  p_local_pool->p_global_pool = p_global_pool;
132  p_local_pool->num_headers_per_bucket =
133  p_global_pool->num_headers_per_bucket;
134  /* There must be always at least one header in the local pool.
135  * Let's take one bucket. */
136  int abt_errno =
137  ABTI_mem_pool_take_bucket(p_global_pool, &p_local_pool->buckets[0]);
138  ABTI_ASSERT(abt_errno == ABT_SUCCESS);
139  p_local_pool->bucket_index = 0;
140 }
141 
142 void ABTI_mem_pool_destroy_local_pool(ABTI_mem_pool_local_pool *p_local_pool)
143 {
144  /* Return the remaining buckets to the global pool. */
145  int bucket_index = p_local_pool->bucket_index;
146  int i;
147  for (i = 0; i < bucket_index; i++) {
148  ABTI_mem_pool_return_bucket(p_local_pool->p_global_pool,
149  p_local_pool->buckets[i]);
150  }
151  const int num_headers_per_bucket = p_local_pool->num_headers_per_bucket;
152  ABTI_mem_pool_header *cur_bucket = p_local_pool->buckets[bucket_index];
153  if (cur_bucket->bucket_info.num_headers == num_headers_per_bucket) {
154  /* The last bucket is also full. Return the last bucket as well. */
155  ABTI_mem_pool_return_bucket(p_local_pool->p_global_pool,
156  p_local_pool->buckets[bucket_index]);
157  } else {
158  mem_pool_return_partial_bucket(p_local_pool->p_global_pool, cur_bucket);
159  }
160 }
161 
162 ABTU_ret_err int
163 ABTI_mem_pool_take_bucket(ABTI_mem_pool_global_pool *p_global_pool,
164  ABTI_mem_pool_header **p_bucket)
165 {
166  /* Try to get a bucket. */
167  ABTI_sync_lifo_element *p_popped_bucket_lifo_elem =
168  ABTI_sync_lifo_pop(&p_global_pool->bucket_lifo);
169  const int num_headers_per_bucket = p_global_pool->num_headers_per_bucket;
170  if (ABTU_likely(p_popped_bucket_lifo_elem)) {
171  /* Use this bucket. */
172  ABTI_mem_pool_header *popped_bucket =
173  mem_pool_lifo_elem_to_header(p_popped_bucket_lifo_elem);
174  popped_bucket->bucket_info.num_headers = num_headers_per_bucket;
175  *p_bucket = popped_bucket;
176  return ABT_SUCCESS;
177  } else {
178  /* Allocate headers by myself */
179  const size_t header_size = p_global_pool->header_size;
180  int num_headers = 0, i;
181  ABTI_mem_pool_header *p_head = NULL;
182  while (1) {
183  ABTI_mem_pool_page *p_page;
184  ABTI_sync_lifo_element *p_page_lifo_elem;
185  /* Before really allocating memory, check if a page has unused
186  * memory. */
187  if ((p_page_lifo_elem =
188  ABTI_sync_lifo_pop(&p_global_pool->mem_page_lifo))) {
189  /* Use a page popped from mem_page_lifo */
190  p_page = mem_pool_lifo_elem_to_page(p_page_lifo_elem);
191  } else {
192  /* Let's allocate memory by myself */
193  const size_t page_size = p_global_pool->page_size;
194  ABTU_MEM_LARGEPAGE_TYPE lp_type;
195  void *p_alloc_mem;
196  int abt_errno =
197  ABTU_alloc_largepage(page_size,
198  p_global_pool->alignment_hint,
199  p_global_pool->lp_type_requests,
200  p_global_pool->num_lp_type_requests,
201  &lp_type, &p_alloc_mem);
202  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
203  /* It fails to take a large page. Let's return. */
204  if (num_headers != 0) {
205  /* p_head has some elements, so let's return them. */
206  p_head->bucket_info.num_headers = num_headers;
207  mem_pool_return_partial_bucket(p_global_pool, p_head);
208  }
209  return abt_errno;
210  }
211  p_page =
212  (ABTI_mem_pool_page *)(((char *)p_alloc_mem) + page_size -
213  sizeof(ABTI_mem_pool_page));
214  p_page->mem = p_alloc_mem;
215  p_page->page_size = page_size;
216  p_page->lp_type = lp_type;
217  p_page->p_mem_extra = p_alloc_mem;
218  p_page->mem_extra_size = page_size - sizeof(ABTI_mem_pool_page);
219  }
220  /* Take some memory left in this page. */
221  int num_provided = p_page->mem_extra_size / header_size;
222  int num_required = num_headers_per_bucket - num_headers;
223  if (num_required < num_provided)
224  num_provided = num_required;
225  ABTI_ASSERT(num_provided != 0);
226 
227  void *p_mem_extra = p_page->p_mem_extra;
228  p_page->p_mem_extra =
229  (void *)(((char *)p_mem_extra) + header_size * num_provided);
230  p_page->mem_extra_size -= header_size * num_provided;
231  /* We've already gotten necessary p_mem_extra from this page. Let's
232  * return it. */
233  if (p_page->mem_extra_size >= header_size) {
234  /* This page still has some extra memory. Someone will use it in
235  * the future. */
236  ABTI_sync_lifo_push(&p_global_pool->mem_page_lifo,
237  &p_page->lifo_elem);
238  } else {
239  /* No extra memory is left in this page. Let's push it to a list
240  * of empty pages. Since mem_page_empty_lifo is push-only and
241  * thus there's no ABA problem, use a simpler lock-free LIFO
242  * algorithm. */
243  void *p_cur_mem_page;
244  do {
245  p_cur_mem_page = ABTD_atomic_acquire_load_ptr(
246  &p_global_pool->p_mem_page_empty);
247  p_page->p_next_empty_page =
248  (ABTI_mem_pool_page *)p_cur_mem_page;
249  } while (!ABTD_atomic_bool_cas_weak_ptr(&p_global_pool
250  ->p_mem_page_empty,
251  p_cur_mem_page,
252  p_page));
253  }
254 
255  size_t header_offset = p_global_pool->header_offset;
256  ABTI_mem_pool_header *p_local_tail =
257  (ABTI_mem_pool_header *)(((char *)p_mem_extra) + header_offset);
258  p_local_tail->p_next = p_head;
259  ABTI_mem_pool_header *p_prev = p_local_tail;
260  for (i = 1; i < num_provided; i++) {
261  ABTI_mem_pool_header *p_cur =
262  (ABTI_mem_pool_header *)(((char *)p_prev) + header_size);
263  p_cur->p_next = p_prev;
264  p_prev = p_cur;
265  }
266  p_head = p_prev;
267  num_headers += num_provided;
268  if (num_headers == num_headers_per_bucket) {
269  p_head->bucket_info.num_headers = num_headers_per_bucket;
270  *p_bucket = p_head;
271  return ABT_SUCCESS;
272  }
273  }
274  }
275 }
276 
277 void ABTI_mem_pool_return_bucket(ABTI_mem_pool_global_pool *p_global_pool,
278  ABTI_mem_pool_header *bucket)
279 {
280  /* Simply return that bucket to the pool */
281  ABTI_sync_lifo_push(&p_global_pool->bucket_lifo,
282  &bucket->bucket_info.lifo_elem);
283 }
#define ABTU_likely(cond)
Definition: abtu.h:17
ABTU_ret_err int ABTU_alloc_largepage(size_t size, size_t alignment_hint, const ABTU_MEM_LARGEPAGE_TYPE *requested_types, int num_requested_types, ABTU_MEM_LARGEPAGE_TYPE *p_actual, void **p_ptr)
Definition: largepage.c:90
static ABTI_mem_pool_page * mem_pool_lifo_elem_to_page(ABTI_sync_lifo_element *lifo_elem)
Definition: mem_pool.c:10
#define ABT_SUCCESS
Definition: abt.h:64
void ABTU_free_largepage(void *ptr, size_t size, ABTU_MEM_LARGEPAGE_TYPE type)
Definition: largepage.c:132
static void mem_pool_return_partial_bucket(ABTI_mem_pool_global_pool *p_global_pool, ABTI_mem_pool_header *bucket)
Definition: mem_pool.c:27
ABTU_MEM_LARGEPAGE_TYPE
Definition: abtu.h:203
static ABTI_mem_pool_header * mem_pool_lifo_elem_to_header(ABTI_sync_lifo_element *lifo_elem)
Definition: mem_pool.c:17
#define ABTU_ret_err
Definition: abtu.h:49