ARGOBOTS
abti_thread_htable.h
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #ifndef ABTI_THREAD_HTABLE_H_INCLUDED
7 #define ABTI_THREAD_HTABLE_H_INCLUDED
8 
9 #include "abt_config.h"
10 
11 #if defined(HAVE_LH_LOCK_H)
12 #include <lh_lock.h>
13 #elif defined(HAVE_CLH_H)
14 #include <clh.h>
15 #else
16 #define USE_PTHREAD_MUTEX
17 #endif
18 
19 struct ABTI_thread_queue {
20  ABTD_atomic_uint32 mutex; /* can be initialized by just assigning 0*/
21  uint32_t num_handovers;
22  uint32_t num_threads;
23  uint32_t pad0;
24  ABTI_thread *head;
25  ABTI_thread *tail;
26  char pad1[64 - sizeof(ABTD_atomic_uint32) - sizeof(uint32_t) * 3 -
27  sizeof(ABTI_thread *) * 2];
28 
29  /* low priority queue */
30  ABTD_atomic_uint32 low_mutex; /* can be initialized by just assigning 0*/
31  uint32_t low_num_threads;
32  ABTI_thread *low_head;
33  ABTI_thread *low_tail;
34  char pad2[64 - sizeof(ABTD_atomic_uint32) - sizeof(uint32_t) -
35  sizeof(ABTI_thread *) * 2];
36 
37  /* two doubly-linked lists */
38  ABTI_thread_queue *p_h_next;
39  ABTI_thread_queue *p_h_prev;
40  ABTI_thread_queue *p_l_next;
41  ABTI_thread_queue *p_l_prev;
42  char pad3[64 - sizeof(ABTI_thread_queue *) * 4];
43 };
44 
45 struct ABTI_thread_htable {
46 #if defined(HAVE_LH_LOCK_H)
47  lh_lock_t mutex;
48 #elif defined(HAVE_CLH_H)
49  clh_lock_t mutex;
50 #elif defined(USE_PTHREAD_MUTEX)
51  pthread_mutex_t mutex;
52 #else
53  ABTI_spinlock mutex; /* To protect table */
54 #endif
55  ABTD_atomic_uint32 num_elems;
56  uint32_t num_rows;
57  ABTI_thread_queue *queue;
58 
59  ABTI_thread_queue *h_list; /* list of non-empty high prio. queues */
60  ABTI_thread_queue *l_list; /* list of non-empty low prio. queues */
61 };
62 
63 #if defined(HAVE_LH_LOCK_H)
64 #define ABTI_THREAD_HTABLE_LOCK(m) lh_acquire_lock(&m)
65 #define ABTI_THREAD_HTABLE_UNLOCK(m) lh_release_lock(&m)
66 #elif defined(HAVE_CLH_H)
67 #define ABTI_THREAD_HTABLE_LOCK(m) clh_acquire(&m)
68 #define ABTI_THREAD_HTABLE_UNLOCK(m) clh_release(&m)
69 #elif defined(USE_PTHREAD_MUTEX)
70 #define ABTI_THREAD_HTABLE_LOCK(m) pthread_mutex_lock(&m)
71 #define ABTI_THREAD_HTABLE_UNLOCK(m) pthread_mutex_unlock(&m)
72 #else
73 #define ABTI_THREAD_HTABLE_LOCK(m) ABTI_spinlock_acquire(&m)
74 #define ABTI_THREAD_HTABLE_UNLOCK(m) ABTI_spinlock_release(&m)
75 #endif
76 
77 static inline void ABTI_thread_queue_acquire_mutex(ABTI_thread_queue *p_queue)
78 {
79  while (!ABTD_atomic_bool_cas_weak_uint32(&p_queue->mutex, 0, 1)) {
80  while (ABTD_atomic_acquire_load_uint32(&p_queue->mutex) != 0)
81  ;
82  }
83 }
84 
85 static inline void ABTI_thread_queue_release_mutex(ABTI_thread_queue *p_queue)
86 {
87  ABTD_atomic_release_store_uint32(&p_queue->mutex, 0);
88 }
89 
90 static inline void
91 ABTI_thread_queue_acquire_low_mutex(ABTI_thread_queue *p_queue)
92 {
93  while (!ABTD_atomic_bool_cas_weak_uint32(&p_queue->low_mutex, 0, 1)) {
94  while (ABTD_atomic_acquire_load_uint32(&p_queue->low_mutex) != 0)
95  ;
96  }
97 }
98 
99 static inline void
100 ABTI_thread_queue_release_low_mutex(ABTI_thread_queue *p_queue)
101 {
102  ABTD_atomic_release_store_uint32(&p_queue->low_mutex, 0);
103 }
104 
105 static inline void ABTI_thread_htable_add_h_node(ABTI_thread_htable *p_htable,
106  ABTI_thread_queue *p_node)
107 {
108  ABTI_thread_queue *p_curr = p_htable->h_list;
109  if (!p_curr) {
110  p_node->p_h_next = p_node;
111  p_node->p_h_prev = p_node;
112  p_htable->h_list = p_node;
113  } else if (!p_node->p_h_next) {
114  p_node->p_h_next = p_curr;
115  p_node->p_h_prev = p_curr->p_h_prev;
116  p_curr->p_h_prev->p_h_next = p_node;
117  p_curr->p_h_prev = p_node;
118  }
119 }
120 
121 static inline void ABTI_thread_htable_del_h_head(ABTI_thread_htable *p_htable)
122 {
123  ABTI_thread_queue *p_prev, *p_next;
124  ABTI_thread_queue *p_node = p_htable->h_list;
125 
126  if (p_node == p_node->p_h_next) {
127  p_node->p_h_next = NULL;
128  p_node->p_h_prev = NULL;
129  p_htable->h_list = NULL;
130  } else {
131  p_prev = p_node->p_h_prev;
132  p_next = p_node->p_h_next;
133  p_prev->p_h_next = p_next;
134  p_next->p_h_prev = p_prev;
135  p_node->p_h_next = NULL;
136  p_node->p_h_prev = NULL;
137  p_htable->h_list = p_next;
138  }
139 }
140 
141 static inline void ABTI_thread_htable_add_l_node(ABTI_thread_htable *p_htable,
142  ABTI_thread_queue *p_node)
143 {
144  ABTI_thread_queue *p_curr = p_htable->l_list;
145  if (!p_curr) {
146  p_node->p_l_next = p_node;
147  p_node->p_l_prev = p_node;
148  p_htable->l_list = p_node;
149  } else if (!p_node->p_l_next) {
150  p_node->p_l_next = p_curr;
151  p_node->p_l_prev = p_curr->p_l_prev;
152  p_curr->p_l_prev->p_l_next = p_node;
153  p_curr->p_l_prev = p_node;
154  }
155 }
156 
157 static inline void ABTI_thread_htable_del_l_head(ABTI_thread_htable *p_htable)
158 {
159  ABTI_thread_queue *p_prev, *p_next;
160  ABTI_thread_queue *p_node = p_htable->l_list;
161 
162  if (p_node == p_node->p_l_next) {
163  p_node->p_l_next = NULL;
164  p_node->p_l_prev = NULL;
165  p_htable->l_list = NULL;
166  } else {
167  p_prev = p_node->p_l_prev;
168  p_next = p_node->p_l_next;
169  p_prev->p_l_next = p_next;
170  p_next->p_l_prev = p_prev;
171  p_node->p_l_next = NULL;
172  p_node->p_l_prev = NULL;
173  p_htable->l_list = p_next;
174  }
175 }
176 
177 #endif /* ABTI_THREAD_HTABLE_H_INCLUDED */