ARGOBOTS  ba497793e96f0026edda18743b158278a6a1f4ab
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
ythread_htable.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 #include "abti_ythread_htable.h"
8 
9 ABTU_ret_err int ABTI_ythread_htable_create(uint32_t num_rows,
10  ABTI_ythread_htable **pp_htable)
11 {
12  ABTI_STATIC_ASSERT(sizeof(ABTI_ythread_queue) == 192);
13 
14  int abt_errno;
15  ABTI_ythread_htable *p_htable;
16  size_t q_size = num_rows * sizeof(ABTI_ythread_queue);
17 
18  abt_errno = ABTU_malloc(sizeof(ABTI_ythread_htable), (void **)&p_htable);
19  ABTI_CHECK_ERROR(abt_errno);
20 
21  abt_errno = ABTU_memalign(64, q_size, (void **)&p_htable->queue);
22  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
23  ABTU_free(p_htable);
24  return abt_errno;
25  }
26  memset(p_htable->queue, 0, q_size);
27 
28 #if defined(HAVE_LH_LOCK_H)
29  lh_lock_init(&p_htable->mutex);
30 #elif defined(HAVE_CLH_H)
31  clh_init(&p_htable->mutex);
32 #elif defined(USE_PTHREAD_MUTEX)
33  int ret = pthread_mutex_init(&p_htable->mutex, NULL);
34  if (ret) {
35  ABTU_free(p_htable->queue);
36  ABTU_free(p_htable);
37  return ABT_ERR_OTHER;
38  }
39 #else
40  ABTI_spinlock_clear(&p_htable->mutex);
41 #endif
42  ABTD_atomic_relaxed_store_uint32(&p_htable->num_elems, 0);
43  p_htable->num_rows = num_rows;
44  p_htable->h_list = NULL;
45  p_htable->l_list = NULL;
46  *pp_htable = p_htable;
47  return ABT_SUCCESS;
48 }
49 
50 void ABTI_ythread_htable_free(ABTI_ythread_htable *p_htable)
51 {
52  ABTI_ASSERT(ABTD_atomic_relaxed_load_uint32(&p_htable->num_elems) == 0);
53 
54 #if defined(HAVE_LH_LOCK_H)
55  lh_lock_destroy(&p_htable->mutex);
56 #elif defined(HAVE_CLH_H)
57  clh_destroy(&p_htable->mutex);
58 #elif defined(USE_PTHREAD_MUTEX)
59  int ret = pthread_mutex_destroy(&p_htable->mutex);
60  assert(!ret);
61 #else
62  /* ABTI_spinlock needs no finalization. */
63 #endif
64  ABTU_free(p_htable->queue);
65  ABTU_free(p_htable);
66 }
67 
68 void ABTI_ythread_htable_push(ABTI_ythread_htable *p_htable, int idx,
69  ABTI_ythread *p_ythread)
70 {
71  ABTI_ythread_queue *p_queue;
72 
73  if (idx >= p_htable->num_rows) {
74  ABTI_ASSERT(0);
76  }
77 
78  /* Add p_ythread to the end of the idx-th row */
79  p_queue = &p_htable->queue[idx];
80  ABTI_ythread_queue_acquire_mutex(p_queue);
81  if (p_queue->head == NULL) {
82  p_queue->head = p_ythread;
83  p_queue->tail = p_ythread;
84  } else {
85  p_queue->tail->thread.p_next = &p_ythread->thread;
86  p_queue->tail = p_ythread;
87  }
88  p_queue->num_threads++;
89  ABTI_ythread_queue_release_mutex(p_queue);
90  ABTD_atomic_fetch_add_uint32(&p_htable->num_elems, 1);
91 }
92 
93 void ABTI_ythread_htable_push_low(ABTI_ythread_htable *p_htable, int idx,
94  ABTI_ythread *p_ythread)
95 {
96  ABTI_ythread_queue *p_queue;
97 
98  if (idx >= p_htable->num_rows) {
99  ABTI_ASSERT(0);
101  }
102 
103  /* Add p_ythread to the end of the idx-th row */
104  p_queue = &p_htable->queue[idx];
105  ABTI_ythread_queue_acquire_low_mutex(p_queue);
106  if (p_queue->low_head == NULL) {
107  p_queue->low_head = p_ythread;
108  p_queue->low_tail = p_ythread;
109  } else {
110  p_queue->low_tail->thread.p_next = &p_ythread->thread;
111  p_queue->low_tail = p_ythread;
112  }
113  p_queue->low_num_threads++;
114  ABTI_ythread_queue_release_low_mutex(p_queue);
115  ABTD_atomic_fetch_add_uint32(&p_htable->num_elems, 1);
116 }
117 
118 ABTI_ythread *ABTI_ythread_htable_pop(ABTI_ythread_htable *p_htable,
119  ABTI_ythread_queue *p_queue)
120 {
121  ABTI_ythread *p_ythread = NULL;
122 
123  ABTI_ythread_queue_acquire_mutex(p_queue);
124  if (p_queue->head) {
125  ABTD_atomic_fetch_sub_uint32(&p_htable->num_elems, 1);
126  p_ythread = p_queue->head;
127  if (p_queue->head == p_queue->tail) {
128  p_queue->head = NULL;
129  p_queue->tail = NULL;
130  } else {
131  p_queue->head = ABTI_thread_get_ythread(p_ythread->thread.p_next);
132  }
133 
134  p_queue->num_threads--;
135  }
136  ABTI_ythread_queue_release_mutex(p_queue);
137 
138  return p_ythread;
139 }
140 
141 ABTI_ythread *ABTI_ythread_htable_pop_low(ABTI_ythread_htable *p_htable,
142  ABTI_ythread_queue *p_queue)
143 {
144  ABTI_ythread *p_ythread = NULL;
145 
146  ABTI_ythread_queue_acquire_low_mutex(p_queue);
147  if (p_queue->low_head) {
148  ABTD_atomic_fetch_sub_uint32(&p_htable->num_elems, 1);
149  p_ythread = p_queue->low_head;
150  if (p_queue->low_head == p_queue->low_tail) {
151  p_queue->low_head = NULL;
152  p_queue->low_tail = NULL;
153  } else {
154  p_queue->low_head =
155  ABTI_thread_get_ythread(p_ythread->thread.p_next);
156  }
157 
158  p_queue->low_num_threads--;
159  }
160  ABTI_ythread_queue_release_low_mutex(p_queue);
161 
162  return p_ythread;
163 }
164 
165 ABT_bool ABTI_ythread_htable_switch_low(ABTI_xstream **pp_local_xstream,
166  ABTI_ythread_queue *p_queue,
167  ABTI_ythread *p_ythread,
168  ABTI_ythread_htable *p_htable,
169  ABT_sync_event_type sync_event_type,
170  void *p_sync)
171 {
172  ABTI_ythread *p_target = NULL;
173  ABTI_xstream *p_local_xstream = *pp_local_xstream;
174 
175  ABTI_ythread_queue_acquire_low_mutex(p_queue);
176  if (p_queue->low_head) {
177  p_target = p_queue->low_head;
178 
179  /* Push p_ythread to the queue */
180  ABTD_atomic_release_store_int(&p_ythread->thread.state,
182  ABTI_tool_event_ythread_suspend(p_local_xstream, p_ythread,
183  p_ythread->thread.p_parent,
184  sync_event_type, p_sync);
185  if (p_queue->low_head == p_queue->low_tail) {
186  p_queue->low_head = p_ythread;
187  p_queue->low_tail = p_ythread;
188  } else {
189  p_queue->low_head =
190  ABTI_thread_get_ythread(p_target->thread.p_next);
191  p_queue->low_tail->thread.p_next = &p_ythread->thread;
192  p_queue->low_tail = p_ythread;
193  }
194  }
195  ABTI_ythread_queue_release_low_mutex(p_queue);
196 
197  if (p_target) {
198  LOG_DEBUG("switch -> U%" PRIu64 "\n",
199  ABTI_thread_get_id(&p_target->thread));
200 
201  /* Context-switch to p_target */
202  ABTD_atomic_release_store_int(&p_target->thread.state,
204  ABTI_tool_event_ythread_resume(ABTI_xstream_get_local(p_local_xstream),
205  p_target,
206  p_local_xstream
207  ? p_local_xstream->p_thread
208  : NULL);
209  ABTI_ythread *p_prev =
210  ABTI_ythread_context_switch_to_sibling(pp_local_xstream, p_ythread,
211  p_target);
212  ABTI_tool_event_thread_run(*pp_local_xstream, &p_ythread->thread,
213  &p_prev->thread, p_ythread->thread.p_parent);
214  return ABT_TRUE;
215  } else {
216  return ABT_FALSE;
217  }
218 }
#define ABTU_unreachable()
Definition: abtu.h:116
int ABT_bool
Definition: abt.h:373
#define ABT_ERR_OTHER
Definition: abt.h:67
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
Definition: abtu.h:218
#define ABT_FALSE
Definition: abt.h:285
static ABTU_ret_err int ABTU_memalign(size_t alignment, size_t size, void **p_ptr)
Definition: abtu.h:199
#define ABT_SUCCESS
Definition: abt.h:64
#define ABT_TRUE
Definition: abt.h:284
#define LOG_DEBUG(fmt,...)
Definition: abti_log.h:26
ABT_sync_event_type
Definition: abt.h:244
static void ABTU_free(void *ptr)
Definition: abtu.h:211
#define ABTU_ret_err
Definition: abtu.h:138