ARGOBOTS
mutex.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 #include "abti_thread_htable.h"
8 
36 {
37  int abt_errno = ABT_SUCCESS;
38  ABTI_mutex *p_newmutex;
39 
40  p_newmutex = (ABTI_mutex *)ABTU_calloc(1, sizeof(ABTI_mutex));
41  ABTI_mutex_init(p_newmutex);
42 
43  /* Return value */
44  *newmutex = ABTI_mutex_get_handle(p_newmutex);
45 
46  return abt_errno;
47 }
48 
66 {
67  int abt_errno = ABT_SUCCESS;
68  ABTI_mutex_attr *p_attr = ABTI_mutex_attr_get_ptr(attr);
69  ABTI_CHECK_NULL_MUTEX_ATTR_PTR(p_attr);
70  ABTI_mutex *p_newmutex;
71 
72  p_newmutex = (ABTI_mutex *)ABTU_malloc(sizeof(ABTI_mutex));
73  ABTI_mutex_init(p_newmutex);
74  ABTI_mutex_attr_copy(&p_newmutex->attr, p_attr);
75 
76  /* Return value */
77  *newmutex = ABTI_mutex_get_handle(p_newmutex);
78 
79 fn_exit:
80  return abt_errno;
81 
82 fn_fail:
83  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
84  goto fn_exit;
85 }
86 
103 {
104  int abt_errno = ABT_SUCCESS;
105  ABT_mutex h_mutex = *mutex;
106  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(h_mutex);
107  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
108 
109  ABTI_mutex_fini(p_mutex);
110  ABTU_free(p_mutex);
111 
112  /* Return value */
113  *mutex = ABT_MUTEX_NULL;
114 
115 fn_exit:
116  return abt_errno;
117 
118 fn_fail:
119  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
120  goto fn_exit;
121 }
122 
142 {
143  int abt_errno = ABT_SUCCESS;
144  ABTI_local *p_local = ABTI_local_get_local();
145  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
146  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
147 
148  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
149  /* default attributes */
150  ABTI_mutex_lock(&p_local, p_mutex);
151 
152  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
153  /* recursive mutex */
154  ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
155  if (self_id != p_mutex->attr.owner_id) {
156  ABTI_mutex_lock(&p_local, p_mutex);
157  p_mutex->attr.owner_id = self_id;
158  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
159  } else {
160  p_mutex->attr.nesting_cnt++;
161  }
162 
163  } else {
164  /* unknown attributes */
165  ABTI_mutex_lock(&p_local, p_mutex);
166  }
167 
168 fn_exit:
169  return abt_errno;
170 
171 fn_fail:
172  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
173  goto fn_exit;
174 }
175 
176 static inline void ABTI_mutex_lock_low(ABTI_local **pp_local,
177  ABTI_mutex *p_mutex)
178 {
179 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX
180  ABTI_local *p_local = *pp_local;
181  ABT_unit_type type = ABTI_self_get_type(p_local);
182  if (type == ABT_UNIT_TYPE_THREAD) {
183  LOG_EVENT("%p: lock_low - try\n", p_mutex);
184  while (!ABTD_atomic_bool_cas_weak_uint32(&p_mutex->val, 0, 1)) {
185  ABTI_thread_yield(pp_local, p_local->p_thread);
186  p_local = *pp_local;
187  }
188  LOG_EVENT("%p: lock_low - acquired\n", p_mutex);
189  } else {
190  ABTI_mutex_spinlock(p_mutex);
191  }
192 #else
193  int abt_errno;
194  ABTI_local *p_local = *pp_local;
195  ABT_unit_type type = ABTI_self_get_type(p_local);
196 
197  /* Only ULTs can yield when the mutex has been locked. For others,
198  * just call mutex_spinlock. */
199  if (type == ABT_UNIT_TYPE_THREAD) {
200  LOG_EVENT("%p: lock_low - try\n", p_mutex);
201  int c;
202 
203  /* If other ULTs associated with the same ES are waiting on the
204  * low-mutex queue, we give the header ULT a chance to try to get
205  * the mutex by context switching to it. */
206  ABTI_thread_htable *p_htable = p_mutex->p_htable;
207  ABTI_thread *p_self = p_local->p_thread;
208  ABTI_xstream *p_xstream = p_self->p_last_xstream;
209  int rank = (int)p_xstream->rank;
210  ABTI_thread_queue *p_queue = &p_htable->queue[rank];
211  if (p_queue->low_num_threads > 0) {
212  ABT_bool ret = ABTI_thread_htable_switch_low(pp_local, p_queue,
213  p_self, p_htable);
214  if (ret == ABT_TRUE) {
215  /* This ULT became a waiter in the mutex queue */
216  goto check_handover;
217  }
218  }
219 
220  if ((c = ABTD_atomic_val_cas_strong_uint32(&p_mutex->val, 0, 1)) != 0) {
221  if (c != 2) {
222  c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
223  }
224  while (c != 0) {
225  ABTI_mutex_wait_low(pp_local, p_mutex, 2);
226 
227  check_handover:
228  /* If the mutex has been handed over to the current ULT from
229  * other ULT on the same ES, we don't need to change the mutex
230  * state. */
231  if (p_mutex->p_handover) {
232  if (p_self == p_mutex->p_handover) {
233  p_mutex->p_handover = NULL;
234  ABTD_atomic_release_store_uint32(&p_mutex->val, 2);
235 
236  /* Push the previous ULT to its pool */
237  ABTI_thread *p_giver = p_mutex->p_giver;
238  ABTD_atomic_release_store_int(&p_giver->state,
240  ABTI_POOL_PUSH(p_giver->p_pool, p_giver->unit,
241  ABTI_self_get_native_thread_id(
242  *pp_local));
243  break;
244  }
245  }
246 
247  c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
248  }
249  }
250  LOG_EVENT("%p: lock_low - acquired\n", p_mutex);
251  } else {
252  ABTI_mutex_spinlock(p_mutex);
253  }
254 
255 fn_exit:
256  return;
257 
258 fn_fail:
259  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
260  goto fn_exit;
261 #endif
262 }
263 
278 {
279  int abt_errno = ABT_SUCCESS;
280  ABTI_local *p_local = ABTI_local_get_local();
281  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
282  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
283 
284  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
285  /* default attributes */
286  ABTI_mutex_lock_low(&p_local, p_mutex);
287 
288  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
289  /* recursive mutex */
290  ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
291  if (self_id != p_mutex->attr.owner_id) {
292  ABTI_mutex_lock_low(&p_local, p_mutex);
293  p_mutex->attr.owner_id = self_id;
294  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
295  } else {
296  p_mutex->attr.nesting_cnt++;
297  }
298 
299  } else {
300  /* unknown attributes */
301  ABTI_mutex_lock_low(&p_local, p_mutex);
302  }
303 
304 fn_exit:
305  return abt_errno;
306 
307 fn_fail:
308  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
309  goto fn_exit;
310 }
311 
313 {
314  return ABT_mutex_lock(mutex);
315 }
316 
335 {
336  int abt_errno;
337  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
338  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
339 
340  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
341  /* default attributes */
342  abt_errno = ABTI_mutex_trylock(p_mutex);
343 
344  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
345  /* recursive mutex */
346  ABTI_local *p_local = ABTI_local_get_local();
347  ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
348  if (self_id != p_mutex->attr.owner_id) {
349  abt_errno = ABTI_mutex_trylock(p_mutex);
350  if (abt_errno == ABT_SUCCESS) {
351  p_mutex->attr.owner_id = self_id;
352  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
353  }
354  } else {
355  p_mutex->attr.nesting_cnt++;
356  abt_errno = ABT_SUCCESS;
357  }
358 
359  } else {
360  /* unknown attributes */
361  abt_errno = ABTI_mutex_trylock(p_mutex);
362  }
363 
364 fn_exit:
365  return abt_errno;
366 
367 fn_fail:
368  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
369  goto fn_exit;
370 }
371 
387 {
388  int abt_errno = ABT_SUCCESS;
389  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
390  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
391 
392  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
393  /* default attributes */
394  ABTI_mutex_spinlock(p_mutex);
395 
396  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
397  /* recursive mutex */
398  ABTI_local *p_local = ABTI_local_get_local();
399  ABTI_unit_id self_id = ABTI_self_get_unit_id(p_local);
400  if (self_id != p_mutex->attr.owner_id) {
401  ABTI_mutex_spinlock(p_mutex);
402  p_mutex->attr.owner_id = self_id;
403  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
404  } else {
405  p_mutex->attr.nesting_cnt++;
406  }
407 
408  } else {
409  /* unknown attributes */
410  ABTI_mutex_spinlock(p_mutex);
411  }
412 
413 fn_exit:
414  return abt_errno;
415 
416 fn_fail:
417  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
418  goto fn_exit;
419 }
420 
434 {
435  int abt_errno = ABT_SUCCESS;
436  ABTI_local *p_local = ABTI_local_get_local();
437  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
438  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
439 
440  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
441  /* default attributes */
442  ABTI_mutex_unlock(p_local, p_mutex);
443 
444  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
445  /* recursive mutex */
446  ABTI_CHECK_TRUE(ABTI_self_get_unit_id(p_local) ==
447  p_mutex->attr.owner_id,
449  if (p_mutex->attr.nesting_cnt == 0) {
450  p_mutex->attr.owner_id = 0;
451  ABTI_mutex_unlock(p_local, p_mutex);
452  } else {
453  p_mutex->attr.nesting_cnt--;
454  }
455 
456  } else {
457  /* unknown attributes */
458  ABTI_mutex_unlock(p_local, p_mutex);
459  }
460 
461 fn_exit:
462  return abt_errno;
463 
464 fn_fail:
465  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
466  goto fn_exit;
467 }
468 
469 /* Hand over the mutex to other ULT on the same ES */
470 static inline int ABTI_mutex_unlock_se(ABTI_local **pp_local,
471  ABTI_mutex *p_mutex)
472 {
473  int abt_errno = ABT_SUCCESS;
474 
475 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX
476  ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
477  LOG_EVENT("%p: unlock_se\n", p_mutex);
478  ABTI_local *p_local = *pp_local;
479 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD
480  if (ABTI_self_get_type(p_local) == ABT_UNIT_TYPE_THREAD)
481  ABTI_thread_yield(pp_local, p_local->p_thread);
482 #else
483  ABTI_thread_yield(pp_local, p_local->p_thread);
484 #endif
485 #else
486  int i;
487  ABTI_xstream *p_xstream;
488  ABTI_thread *p_next = NULL;
489  ABTI_thread *p_thread;
490  ABTI_thread_queue *p_queue;
491 
492  /* Unlock the mutex */
493  /* If p_mutex->val is 1 before decreasing it, it means there is no any
494  * waiter in the mutex queue. We can just return. */
495  if (ABTD_atomic_fetch_sub_uint32(&p_mutex->val, 1) == 1) {
496  LOG_EVENT("%p: unlock_se\n", p_mutex);
497 #ifndef ABT_CONFIG_DISABLE_EXT_THREAD
498  if (ABTI_self_get_type(*pp_local) == ABT_UNIT_TYPE_THREAD)
499  ABTI_thread_yield(pp_local, (*pp_local)->p_thread);
500 #else
501  ABTI_thread_yield(pp_local, (*pp_local)->p_thread);
502 #endif
503  return abt_errno;
504  }
505 
506  /* There are ULTs waiting in the mutex queue */
507  ABTI_thread_htable *p_htable = p_mutex->p_htable;
508 
509  p_thread = (*pp_local)->p_thread;
510  p_xstream = p_thread->p_last_xstream;
511  ABTI_ASSERT(p_xstream == (*pp_local)->p_xstream);
512  i = (int)p_xstream->rank;
513  p_queue = &p_htable->queue[i];
514 
515 check_cond:
516  /* Check whether the mutex handover is possible */
517  if (p_queue->num_handovers >= p_mutex->attr.max_handovers) {
518  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
519  LOG_EVENT("%p: unlock_se\n", p_mutex);
520  ABTI_mutex_wake_de(*pp_local, p_mutex);
521  p_queue->num_handovers = 0;
522  ABTI_thread_yield(pp_local, p_thread);
523  return abt_errno;
524  }
525 
526  /* Hand over the mutex to high-priority ULTs */
527  if (p_queue->num_threads <= 1) {
528  if (p_htable->h_list != NULL) {
529  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
530  LOG_EVENT("%p: unlock_se\n", p_mutex);
531  ABTI_mutex_wake_de(*pp_local, p_mutex);
532  ABTI_thread_yield(pp_local, p_thread);
533  return abt_errno;
534  }
535  } else {
536  p_next = ABTI_thread_htable_pop(p_htable, p_queue);
537  if (p_next == NULL)
538  goto check_cond;
539  else
540  goto handover;
541  }
542 
543  /* When we don't have high-priority ULTs and other ESs don't either,
544  * we hand over the mutex to low-priority ULTs. */
545  if (p_queue->low_num_threads <= 1) {
546  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
547  LOG_EVENT("%p: unlock_se\n", p_mutex);
548  ABTI_mutex_wake_de(*pp_local, p_mutex);
549  ABTI_thread_yield(pp_local, p_thread);
550  return abt_errno;
551  } else {
552  p_next = ABTI_thread_htable_pop_low(p_htable, p_queue);
553  if (p_next == NULL)
554  goto check_cond;
555  }
556 
557 handover:
558  /* We don't push p_thread to the pool. Instead, we will yield_to p_thread
559  * directly at the end of this function. */
560  p_queue->num_handovers++;
561 
562  /* We are handing over the mutex */
563  p_mutex->p_handover = p_next;
564  p_mutex->p_giver = p_thread;
565 
566  LOG_EVENT("%p: handover -> U%" PRIu64 "\n", p_mutex,
567  ABTI_thread_get_id(p_next));
568 
569  /* yield_to the next ULT */
570  while (ABTD_atomic_acquire_load_uint32(&p_next->request) &
571  ABTI_THREAD_REQ_BLOCK)
572  ;
573  ABTI_pool_dec_num_blocked(p_next->p_pool);
574  ABTD_atomic_release_store_int(&p_next->state, ABT_THREAD_STATE_RUNNING);
575  ABTI_thread_context_switch_thread_to_thread(pp_local, p_thread, p_next);
576 #endif
577 
578  return abt_errno;
579 }
580 
599 {
600  int abt_errno = ABT_SUCCESS;
601  ABTI_local *p_local = ABTI_local_get_local();
602  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
603  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
604 
605  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
606  /* default attributes */
607  ABTI_mutex_unlock_se(&p_local, p_mutex);
608 
609  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
610  /* recursive mutex */
611  ABTI_CHECK_TRUE(ABTI_self_get_unit_id(p_local) ==
612  p_mutex->attr.owner_id,
614  if (p_mutex->attr.nesting_cnt == 0) {
615  p_mutex->attr.owner_id = 0;
616  ABTI_mutex_unlock_se(&p_local, p_mutex);
617  } else {
618  p_mutex->attr.nesting_cnt--;
619  }
620 
621  } else {
622  /* unknown attributes */
623  ABTI_mutex_unlock_se(&p_local, p_mutex);
624  }
625 
626 fn_exit:
627  return abt_errno;
628 
629 fn_fail:
630  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
631  goto fn_exit;
632 }
633 
635 {
636  int abt_errno = ABT_SUCCESS;
637  ABTI_local *p_local = ABTI_local_get_local();
638  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
639  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
640 
641  ABTI_mutex_unlock(p_local, p_mutex);
642 
643 fn_exit:
644  return abt_errno;
645 
646 fn_fail:
647  HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
648  goto fn_exit;
649 }
650 
666 int ABT_mutex_equal(ABT_mutex mutex1, ABT_mutex mutex2, ABT_bool *result)
667 {
668  ABTI_mutex *p_mutex1 = ABTI_mutex_get_ptr(mutex1);
669  ABTI_mutex *p_mutex2 = ABTI_mutex_get_ptr(mutex2);
670  *result = ABTI_mutex_equal(p_mutex1, p_mutex2);
671  return ABT_SUCCESS;
672 }
673 
674 void ABTI_mutex_wait(ABTI_local **pp_local, ABTI_mutex *p_mutex, int val)
675 {
676  ABTI_local *p_local = *pp_local;
677  ABTI_thread_htable *p_htable = p_mutex->p_htable;
678  ABTI_thread *p_self = p_local->p_thread;
679  ABTI_xstream *p_xstream = p_self->p_last_xstream;
680 
681  int rank = (int)p_xstream->rank;
682  ABTI_ASSERT(rank < p_htable->num_rows);
683  ABTI_thread_queue *p_queue = &p_htable->queue[rank];
684 
685  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
686 
687  if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
688  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
689  return;
690  }
691 
692  if (p_queue->p_h_next == NULL) {
693  ABTI_thread_htable_add_h_node(p_htable, p_queue);
694  }
695 
696  /* Change the ULT's state to BLOCKED */
697  ABTI_thread_set_blocked(p_self);
698 
699  /* Push the current ULT to the queue */
700  ABTI_thread_htable_push(p_htable, rank, p_self);
701 
702  /* Unlock */
703  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
704 
705  /* Suspend the current ULT */
706  ABTI_thread_suspend(pp_local, p_self);
707 }
708 
709 void ABTI_mutex_wait_low(ABTI_local **pp_local, ABTI_mutex *p_mutex, int val)
710 {
711  ABTI_local *p_local = *pp_local;
712  ABTI_thread_htable *p_htable = p_mutex->p_htable;
713  ABTI_thread *p_self = p_local->p_thread;
714  ABTI_xstream *p_xstream = p_self->p_last_xstream;
715 
716  int rank = (int)p_xstream->rank;
717  ABTI_ASSERT(rank < p_htable->num_rows);
718  ABTI_thread_queue *p_queue = &p_htable->queue[rank];
719 
720  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
721 
722  if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
723  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
724  return;
725  }
726 
727  if (p_queue->p_l_next == NULL) {
728  ABTI_thread_htable_add_l_node(p_htable, p_queue);
729  }
730 
731  /* Change the ULT's state to BLOCKED */
732  ABTI_thread_set_blocked(p_self);
733 
734  /* Push the current ULT to the queue */
735  ABTI_thread_htable_push_low(p_htable, rank, p_self);
736 
737  /* Unlock */
738  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
739 
740  /* Suspend the current ULT */
741  ABTI_thread_suspend(pp_local, p_self);
742 }
743 
744 void ABTI_mutex_wake_de(ABTI_local *p_local, ABTI_mutex *p_mutex)
745 {
746  int n;
747  ABTI_thread *p_thread;
748  ABTI_thread_htable *p_htable = p_mutex->p_htable;
749  int num = p_mutex->attr.max_wakeups;
750  ABTI_thread_queue *p_start, *p_curr;
751 
752  /* Wake up num ULTs in a round-robin manner */
753  for (n = 0; n < num; n++) {
754  p_thread = NULL;
755 
756  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
757 
758  if (ABTD_atomic_acquire_load_uint32(&p_htable->num_elems) == 0) {
759  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
760  break;
761  }
762 
763  /* Wake up the high-priority ULTs */
764  p_start = p_htable->h_list;
765  for (p_curr = p_start; p_curr;) {
766  p_thread = ABTI_thread_htable_pop(p_htable, p_curr);
767  if (p_curr->num_threads == 0) {
768  ABTI_thread_htable_del_h_head(p_htable);
769  } else {
770  p_htable->h_list = p_curr->p_h_next;
771  }
772  if (p_thread != NULL)
773  goto done;
774  p_curr = p_htable->h_list;
775  if (p_curr == p_start)
776  break;
777  }
778 
779  /* Wake up the low-priority ULTs */
780  p_start = p_htable->l_list;
781  for (p_curr = p_start; p_curr;) {
782  p_thread = ABTI_thread_htable_pop_low(p_htable, p_curr);
783  if (p_curr->low_num_threads == 0) {
784  ABTI_thread_htable_del_l_head(p_htable);
785  } else {
786  p_htable->l_list = p_curr->p_l_next;
787  }
788  if (p_thread != NULL)
789  goto done;
790  p_curr = p_htable->l_list;
791  if (p_curr == p_start)
792  break;
793  }
794 
795  /* Nothing to wake up */
796  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
797  LOG_EVENT("%p: nothing to wake up\n", p_mutex);
798  break;
799 
800  done:
801  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
802 
803  /* Push p_thread to the scheduler's pool */
804  LOG_EVENT("%p: wake up U%" PRIu64 ":E%d\n", p_mutex,
805  ABTI_thread_get_id(p_thread),
806  ABTI_thread_get_xstream_rank(p_thread));
807  ABTI_thread_set_ready(p_local, p_thread);
808  }
809 }
int ABT_mutex_unlock_se(ABT_mutex mutex)
Hand over the mutex within the ES.
Definition: mutex.c:598
int ABT_mutex_unlock_de(ABT_mutex mutex)
Definition: mutex.c:634
struct ABT_mutex_attr_opaque * ABT_mutex_attr
Definition: abt.h:295
#define ABT_ERR_INV_THREAD
Definition: abt.h:80
int ABT_mutex_free(ABT_mutex *mutex)
Free the mutex object.
Definition: mutex.c:102
int ABT_mutex_lock_low(ABT_mutex mutex)
Lock the mutex with low priority.
Definition: mutex.c:277
int ABT_mutex_create_with_attr(ABT_mutex_attr attr, ABT_mutex *newmutex)
Create a new mutex with attributes.
Definition: mutex.c:65
static void * ABTU_malloc(size_t size)
Definition: abtu.h:39
int ABT_bool
Definition: abt.h:309
struct ABT_mutex_opaque * ABT_mutex
Definition: abt.h:293
int ABT_mutex_create(ABT_mutex *newmutex)
Create a new mutex.
Definition: mutex.c:35
#define HANDLE_ERROR_FUNC_WITH_CODE(n)
Definition: abti_error.h:241
int ABT_mutex_lock_high(ABT_mutex mutex)
Definition: mutex.c:312
#define ABT_SUCCESS
Definition: abt.h:64
#define LOG_EVENT(fmt,...)
Definition: abti_log.h:60
#define ABT_TRUE
Definition: abt.h:223
ABT_unit_type
Definition: abt.h:170
int ABT_mutex_lock(ABT_mutex mutex)
Lock the mutex.
Definition: mutex.c:141
#define ABT_MUTEX_NULL
Definition: abt.h:348
int ABT_mutex_equal(ABT_mutex mutex1, ABT_mutex mutex2, ABT_bool *result)
Compare two mutex handles for equality.
Definition: mutex.c:666
int ABT_mutex_unlock(ABT_mutex mutex)
Unlock the mutex.
Definition: mutex.c:433
int ABT_mutex_trylock(ABT_mutex mutex)
Attempt to lock a mutex without blocking.
Definition: mutex.c:334
int ABT_mutex_spinlock(ABT_mutex mutex)
Lock the mutex without context switch.
Definition: mutex.c:386
static void ABTU_free(void *ptr)
Definition: abtu.h:32
static void * ABTU_calloc(size_t num, size_t size)
Definition: abtu.h:49