ARGOBOTS  dce6e727ffc4ca5b3ffc04cb9517c6689be51ec5
abti_sync_lifo.h
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #ifndef ABTI_SYNC_LIFO_H_INCLUDED
7 #define ABTI_SYNC_LIFO_H_INCLUDED
8 
9 /*
10  * This header implements a list-based LIFO. The implementation is lock-free if
11  * architectures that support atomic operations for two consecutive pointers
12  * (e.g., 128-bit CAS on 64-bit systems), which is required to avoid the ABA
13  * problem. If not, it will use a simple lock and thus become blocking.
14  */
15 
16 typedef struct ABTI_sync_lifo_element {
17  struct ABTI_sync_lifo_element *p_next;
18 } ABTI_sync_lifo_element;
19 
20 typedef __attribute__((aligned(ABT_CONFIG_STATIC_CACHELINE_SIZE))) struct {
21 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
22  ABTD_atomic_tagged_ptr p_top;
23 #else
24  ABTD_spinlock lock;
25  ABTD_atomic_ptr p_top;
26 #endif
27 } ABTI_sync_lifo;
28 
29 static inline void ABTI_sync_lifo_init(ABTI_sync_lifo *p_lifo)
30 {
31  ABTI_ASSERT(p_lifo);
32 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
33  ABTD_atomic_release_store_non_atomic_tagged_ptr(&p_lifo->p_top, NULL, 0);
34 #else
35  ABTD_spinlock_clear(&p_lifo->lock);
36  ABTD_atomic_relaxed_store_ptr(&p_lifo->p_top, NULL);
37 #endif
38 }
39 
40 static inline void ABTI_sync_lifo_destroy(ABTI_sync_lifo *p_lifo)
41 {
42  ; /* Do nothing. */
43 }
44 
45 static inline void ABTI_sync_lifo_push_unsafe(ABTI_sync_lifo *p_lifo,
46  ABTI_sync_lifo_element *p_elem)
47 {
48 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
49  ABTI_sync_lifo_element *p_cur_top;
50  size_t cur_tag;
51  ABTD_atomic_relaxed_load_non_atomic_tagged_ptr(&p_lifo->p_top,
52  (void **)&p_cur_top,
53  &cur_tag);
54  p_elem->p_next = p_cur_top;
55  ABTD_atomic_relaxed_store_non_atomic_tagged_ptr(&p_lifo->p_top, p_elem,
56  cur_tag + 1);
57 #else
58  ABTI_sync_lifo_element *p_cur_top =
59  (ABTI_sync_lifo_element *)ABTD_atomic_relaxed_load_ptr(&p_lifo->p_top);
60  p_elem->p_next = p_cur_top;
61  ABTD_atomic_relaxed_store_ptr(&p_lifo->p_top, p_elem);
62 #endif
63 }
64 
65 static inline ABTI_sync_lifo_element *
66 ABTI_sync_lifo_pop_unsafe(ABTI_sync_lifo *p_lifo)
67 {
68 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
69  ABTI_sync_lifo_element *p_cur_top;
70  size_t cur_tag;
71  ABTD_atomic_relaxed_load_non_atomic_tagged_ptr(&p_lifo->p_top,
72  (void **)&p_cur_top,
73  &cur_tag);
74  if (p_cur_top == NULL)
75  return NULL;
76  ABTI_sync_lifo_element *p_next = p_cur_top->p_next;
77  ABTD_atomic_relaxed_store_non_atomic_tagged_ptr(&p_lifo->p_top, p_next,
78  cur_tag + 1);
79  return p_cur_top;
80 #else
81  ABTI_sync_lifo_element *p_cur_top =
82  (ABTI_sync_lifo_element *)ABTD_atomic_relaxed_load_ptr(&p_lifo->p_top);
83  if (!p_cur_top)
84  return NULL;
85  ABTD_atomic_relaxed_store_ptr(&p_lifo->p_top, p_cur_top->p_next);
86  return p_cur_top;
87 #endif
88 }
89 
90 static inline void ABTI_sync_lifo_push(ABTI_sync_lifo *p_lifo,
91  ABTI_sync_lifo_element *p_elem)
92 {
93 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
94  while (1) {
95  ABTI_sync_lifo_element *p_cur_top;
96  size_t cur_tag;
97  ABTD_atomic_acquire_load_non_atomic_tagged_ptr(&p_lifo->p_top,
98  (void **)&p_cur_top,
99  &cur_tag);
100  p_elem->p_next = p_cur_top;
101  /* tag is incremented to avoid the ABA problem. */
102  if (ABTU_likely(ABTD_atomic_bool_cas_weak_tagged_ptr(&p_lifo->p_top,
103  p_cur_top, cur_tag,
104  p_elem,
105  cur_tag + 1))) {
106  return;
107  }
108  }
109 #else
110  ABTD_spinlock_acquire(&p_lifo->lock);
111  ABTI_sync_lifo_push_unsafe(p_lifo, p_elem);
112  ABTD_spinlock_release(&p_lifo->lock);
113 #endif
114 }
115 
116 static inline ABTI_sync_lifo_element *ABTI_sync_lifo_pop(ABTI_sync_lifo *p_lifo)
117 {
118 #if ABTD_ATOMIC_SUPPORT_TAGGED_PTR
119  while (1) {
120  ABTI_sync_lifo_element *p_cur_top;
121  size_t cur_tag;
122  ABTD_atomic_acquire_load_non_atomic_tagged_ptr(&p_lifo->p_top,
123  (void **)&p_cur_top,
124  &cur_tag);
125  if (p_cur_top == NULL)
126  return NULL;
127  ABTI_sync_lifo_element *p_next = p_cur_top->p_next;
128  /* tag is incremented to avoid the ABA problem. */
129  if (ABTU_likely(ABTD_atomic_bool_cas_weak_tagged_ptr(&p_lifo->p_top,
130  p_cur_top, cur_tag,
131  p_next,
132  cur_tag + 1))) {
133  return p_cur_top;
134  }
135  }
136 #else
137  ABTI_sync_lifo_element *p_ret;
138  ABTD_spinlock_acquire(&p_lifo->lock);
139  p_ret = ABTI_sync_lifo_pop_unsafe(p_lifo);
140  ABTD_spinlock_release(&p_lifo->lock);
141  return p_ret;
142 #endif
143 }
144 
145 #endif /* ABTI_SYNC_LIFO_H_INCLUDED */
ABTU_likely
#define ABTU_likely(cond)
Definition: abtu.h:119
__attribute__
typedef __attribute__((aligned(ABT_CONFIG_STATIC_CACHELINE_SIZE))) struct
Definition: abti_sync_lifo.h:20
ABT_CONFIG_STATIC_CACHELINE_SIZE
#define ABT_CONFIG_STATIC_CACHELINE_SIZE
Definition: abt_config.h:81