ARGOBOTS  ba497793e96f0026edda18743b158278a6a1f4ab
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
abti_ythread_htable.h
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #ifndef ABTI_YTHREAD_HTABLE_H_INCLUDED
7 #define ABTI_YTHREAD_HTABLE_H_INCLUDED
8 
9 #include "abt_config.h"
10 
11 #if defined(HAVE_LH_LOCK_H)
12 #include <lh_lock.h>
13 #elif defined(HAVE_CLH_H)
14 #include <clh.h>
15 #else
16 #define USE_PTHREAD_MUTEX
17 #endif
18 
20  ABTD_atomic_uint32 mutex; /* can be initialized by just assigning 0*/
21  uint32_t num_handovers;
22  uint32_t num_threads;
23  uint32_t pad0;
26  char pad1[64 - sizeof(ABTD_atomic_uint32) - sizeof(uint32_t) * 3 -
27  sizeof(ABTI_ythread *) * 2];
28 
29  /* low priority queue */
30  ABTD_atomic_uint32 low_mutex; /* can be initialized by just assigning 0*/
31  uint32_t low_num_threads;
34  char pad2[64 - sizeof(ABTD_atomic_uint32) - sizeof(uint32_t) -
35  sizeof(ABTI_ythread *) * 2];
36 
37  /* two doubly-linked lists */
42  char pad3[64 - sizeof(ABTI_ythread_queue *) * 4];
43 };
44 
46 #if defined(HAVE_LH_LOCK_H)
47  lh_lock_t mutex;
48 #elif defined(HAVE_CLH_H)
49  clh_lock_t mutex;
50 #elif defined(USE_PTHREAD_MUTEX)
51  pthread_mutex_t mutex;
52 #else
53  ABTI_spinlock mutex; /* To protect table */
54 #endif
56  int num_rows;
58 
59  ABTI_ythread_queue *h_list; /* list of non-empty high prio. queues */
60  ABTI_ythread_queue *l_list; /* list of non-empty low prio. queues */
61 };
62 
63 #if defined(HAVE_LH_LOCK_H)
64 #define ABTI_THREAD_HTABLE_LOCK(m) lh_acquire_lock(&m)
65 #define ABTI_THREAD_HTABLE_UNLOCK(m) lh_release_lock(&m)
66 #elif defined(HAVE_CLH_H)
67 #define ABTI_THREAD_HTABLE_LOCK(m) clh_acquire(&m)
68 #define ABTI_THREAD_HTABLE_UNLOCK(m) clh_release(&m)
69 #elif defined(USE_PTHREAD_MUTEX)
70 #define ABTI_THREAD_HTABLE_LOCK(m) pthread_mutex_lock(&m)
71 #define ABTI_THREAD_HTABLE_UNLOCK(m) pthread_mutex_unlock(&m)
72 #else
73 #define ABTI_THREAD_HTABLE_LOCK(m) ABTI_spinlock_acquire(&m)
74 #define ABTI_THREAD_HTABLE_UNLOCK(m) ABTI_spinlock_release(&m)
75 #endif
76 
78 {
79  while (!ABTD_atomic_bool_cas_weak_uint32(&p_queue->mutex, 0, 1)) {
80  while (ABTD_atomic_acquire_load_uint32(&p_queue->mutex) != 0)
81  ;
82  }
83 }
84 
86 {
88 }
89 
90 static inline void
92 {
93  while (!ABTD_atomic_bool_cas_weak_uint32(&p_queue->low_mutex, 0, 1)) {
94  while (ABTD_atomic_acquire_load_uint32(&p_queue->low_mutex) != 0)
95  ;
96  }
97 }
98 
99 static inline void
101 {
103 }
104 
106  ABTI_ythread_queue *p_node)
107 {
108  ABTI_ythread_queue *p_curr = p_htable->h_list;
109  if (!p_curr) {
110  p_node->p_h_next = p_node;
111  p_node->p_h_prev = p_node;
112  p_htable->h_list = p_node;
113  } else if (!p_node->p_h_next) {
114  p_node->p_h_next = p_curr;
115  p_node->p_h_prev = p_curr->p_h_prev;
116  p_curr->p_h_prev->p_h_next = p_node;
117  p_curr->p_h_prev = p_node;
118  }
119 }
120 
122 {
123  ABTI_ythread_queue *p_prev, *p_next;
124  ABTI_ythread_queue *p_node = p_htable->h_list;
125 
126  if (p_node == p_node->p_h_next) {
127  p_node->p_h_next = NULL;
128  p_node->p_h_prev = NULL;
129  p_htable->h_list = NULL;
130  } else {
131  p_prev = p_node->p_h_prev;
132  p_next = p_node->p_h_next;
133  p_prev->p_h_next = p_next;
134  p_next->p_h_prev = p_prev;
135  p_node->p_h_next = NULL;
136  p_node->p_h_prev = NULL;
137  p_htable->h_list = p_next;
138  }
139 }
140 
142  ABTI_ythread_queue *p_node)
143 {
144  ABTI_ythread_queue *p_curr = p_htable->l_list;
145  if (!p_curr) {
146  p_node->p_l_next = p_node;
147  p_node->p_l_prev = p_node;
148  p_htable->l_list = p_node;
149  } else if (!p_node->p_l_next) {
150  p_node->p_l_next = p_curr;
151  p_node->p_l_prev = p_curr->p_l_prev;
152  p_curr->p_l_prev->p_l_next = p_node;
153  p_curr->p_l_prev = p_node;
154  }
155 }
156 
158 {
159  ABTI_ythread_queue *p_prev, *p_next;
160  ABTI_ythread_queue *p_node = p_htable->l_list;
161 
162  if (p_node == p_node->p_l_next) {
163  p_node->p_l_next = NULL;
164  p_node->p_l_prev = NULL;
165  p_htable->l_list = NULL;
166  } else {
167  p_prev = p_node->p_l_prev;
168  p_next = p_node->p_l_next;
169  p_prev->p_l_next = p_next;
170  p_next->p_l_prev = p_prev;
171  p_node->p_l_next = NULL;
172  p_node->p_l_prev = NULL;
173  p_htable->l_list = p_next;
174  }
175 }
176 
177 #endif /* ABTI_YTHREAD_HTABLE_H_INCLUDED */
static void ABTI_ythread_queue_acquire_mutex(ABTI_ythread_queue *p_queue)
char pad2[64-sizeof(ABTD_atomic_uint32)-sizeof(uint32_t)-sizeof(ABTI_ythread *)*2]
ABTI_ythread_queue * p_l_prev
static void ABTI_ythread_queue_acquire_low_mutex(ABTI_ythread_queue *p_queue)
static void ABTD_atomic_release_store_uint32(ABTD_atomic_uint32 *ptr, uint32_t val)
Definition: abtd_atomic.h:1100
struct ABTD_atomic_uint32 ABTD_atomic_uint32
static void ABTI_ythread_queue_release_mutex(ABTI_ythread_queue *p_queue)
ABTI_ythread * low_head
ABTD_atomic_uint32 num_elems
static void ABTI_ythread_htable_add_h_node(ABTI_ythread_htable *p_htable, ABTI_ythread_queue *p_node)
char pad1[64-sizeof(ABTD_atomic_uint32)-sizeof(uint32_t)*3-sizeof(ABTI_ythread *)*2]
ABTI_ythread_queue * h_list
ABTD_atomic_uint32 mutex
static uint32_t ABTD_atomic_acquire_load_uint32(const ABTD_atomic_uint32 *ptr)
Definition: abtd_atomic.h:928
ABTI_ythread * low_tail
static int ABTD_atomic_bool_cas_weak_uint32(ABTD_atomic_uint32 *ptr, uint32_t oldv, uint32_t newv)
Definition: abtd_atomic.h:361
ABTI_ythread_queue * l_list
ABTI_ythread_queue * queue
ABTD_atomic_uint32 low_mutex
static void ABTI_ythread_htable_del_l_head(ABTI_ythread_htable *p_htable)
ABTI_ythread_queue * p_l_next
static void ABTI_ythread_queue_release_low_mutex(ABTI_ythread_queue *p_queue)
ABTI_ythread_queue * p_h_next
ABTI_ythread_queue * p_h_prev
static void ABTI_ythread_htable_del_h_head(ABTI_ythread_htable *p_htable)
char pad3[64-sizeof(ABTI_ythread_queue *)*4]
static void ABTI_ythread_htable_add_l_node(ABTI_ythread_htable *p_htable, ABTI_ythread_queue *p_node)