ARGOBOTS  66b1c39742507d8df30e8d28c54839b961a14814
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups
mutex.c
Go to the documentation of this file.
1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2 /*
3  * See COPYRIGHT in top-level directory.
4  */
5 
6 #include "abti.h"
7 #include "abti_ythread_htable.h"
8 
9 static inline void mutex_lock_low(ABTI_local **pp_local, ABTI_mutex *p_mutex);
10 static inline void mutex_unlock_se(ABTI_local **pp_local, ABTI_mutex *p_mutex);
11 
39 {
40  int abt_errno;
41  ABTI_mutex *p_newmutex;
42 
43  abt_errno = ABTU_calloc(1, sizeof(ABTI_mutex), (void **)&p_newmutex);
44  ABTI_CHECK_ERROR(abt_errno);
45  abt_errno = ABTI_mutex_init(p_newmutex);
46  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
47  ABTU_free(p_newmutex);
48  ABTI_HANDLE_ERROR(abt_errno);
49  }
50 
51  /* Return value */
52  *newmutex = ABTI_mutex_get_handle(p_newmutex);
53  return ABT_SUCCESS;
54 }
55 
73 {
74  int abt_errno;
75  ABTI_mutex_attr *p_attr = ABTI_mutex_attr_get_ptr(attr);
76  ABTI_CHECK_NULL_MUTEX_ATTR_PTR(p_attr);
77  ABTI_mutex *p_newmutex;
78 
79  abt_errno = ABTU_malloc(sizeof(ABTI_mutex), (void **)&p_newmutex);
80  ABTI_CHECK_ERROR(abt_errno);
81  abt_errno = ABTI_mutex_init(p_newmutex);
82  if (ABTI_IS_ERROR_CHECK_ENABLED && abt_errno != ABT_SUCCESS) {
83  ABTU_free(p_newmutex);
84  ABTI_HANDLE_ERROR(abt_errno);
85  }
86  memcpy(&p_newmutex->attr, p_attr, sizeof(ABTI_mutex_attr));
87 
88  /* Return value */
89  *newmutex = ABTI_mutex_get_handle(p_newmutex);
90  return ABT_SUCCESS;
91 }
92 
109 {
110  ABT_mutex h_mutex = *mutex;
111  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(h_mutex);
112  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
113 
114  ABTI_mutex_fini(p_mutex);
115  ABTU_free(p_mutex);
116 
117  /* Return value */
118  *mutex = ABT_MUTEX_NULL;
119  return ABT_SUCCESS;
120 }
121 
141 {
142  ABTI_local *p_local = ABTI_local_get_local();
143  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
144  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
145 
146  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
147  /* default attributes */
148  ABTI_mutex_lock(&p_local, p_mutex);
149 
150  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
151  /* recursive mutex */
152  ABTI_thread_id self_id = ABTI_self_get_thread_id(p_local);
153  if (self_id != p_mutex->attr.owner_id) {
154  ABTI_mutex_lock(&p_local, p_mutex);
155  p_mutex->attr.owner_id = self_id;
156  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
157  } else {
158  p_mutex->attr.nesting_cnt++;
159  }
160 
161  } else {
162  /* unknown attributes */
163  ABTI_mutex_lock(&p_local, p_mutex);
164  }
165  return ABT_SUCCESS;
166 }
167 
182 {
183  ABTI_local *p_local = ABTI_local_get_local();
184  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
185  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
186 
187  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
188  /* default attributes */
189  mutex_lock_low(&p_local, p_mutex);
190 
191  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
192  /* recursive mutex */
193  ABTI_thread_id self_id = ABTI_self_get_thread_id(p_local);
194  if (self_id != p_mutex->attr.owner_id) {
195  mutex_lock_low(&p_local, p_mutex);
196  p_mutex->attr.owner_id = self_id;
197  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
198  } else {
199  p_mutex->attr.nesting_cnt++;
200  }
201 
202  } else {
203  /* unknown attributes */
204  mutex_lock_low(&p_local, p_mutex);
205  }
206  return ABT_SUCCESS;
207 }
208 
210 {
211  return ABT_mutex_lock(mutex);
212 }
213 
232 {
233  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
234  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
235 
236  int abt_errno;
237  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
238  /* default attributes */
239  abt_errno = ABTI_mutex_trylock(p_mutex);
240 
241  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
242  /* recursive mutex */
243  ABTI_local *p_local = ABTI_local_get_local();
244  ABTI_thread_id self_id = ABTI_self_get_thread_id(p_local);
245  if (self_id != p_mutex->attr.owner_id) {
246  abt_errno = ABTI_mutex_trylock(p_mutex);
247  if (abt_errno == ABT_SUCCESS) {
248  p_mutex->attr.owner_id = self_id;
249  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
250  }
251  } else {
252  p_mutex->attr.nesting_cnt++;
253  abt_errno = ABT_SUCCESS;
254  }
255 
256  } else {
257  /* unknown attributes */
258  abt_errno = ABTI_mutex_trylock(p_mutex);
259  }
260  /* Trylock always needs to return an error code. */
261  return abt_errno;
262 }
263 
279 {
280  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
281  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
282 
283  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
284  /* default attributes */
285  ABTI_mutex_spinlock(p_mutex);
286 
287  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
288  /* recursive mutex */
289  ABTI_local *p_local = ABTI_local_get_local();
290  ABTI_thread_id self_id = ABTI_self_get_thread_id(p_local);
291  if (self_id != p_mutex->attr.owner_id) {
292  ABTI_mutex_spinlock(p_mutex);
293  p_mutex->attr.owner_id = self_id;
294  ABTI_ASSERT(p_mutex->attr.nesting_cnt == 0);
295  } else {
296  p_mutex->attr.nesting_cnt++;
297  }
298 
299  } else {
300  /* unknown attributes */
301  ABTI_mutex_spinlock(p_mutex);
302  }
303  return ABT_SUCCESS;
304 }
305 
319 {
320  ABTI_local *p_local = ABTI_local_get_local();
321  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
322  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
323 
324  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
325  /* default attributes */
326  ABTI_mutex_unlock(p_local, p_mutex);
327 
328  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
329  /* recursive mutex */
330  ABTI_CHECK_TRUE(ABTI_self_get_thread_id(p_local) ==
331  p_mutex->attr.owner_id,
333  if (p_mutex->attr.nesting_cnt == 0) {
334  p_mutex->attr.owner_id = 0;
335  ABTI_mutex_unlock(p_local, p_mutex);
336  } else {
337  p_mutex->attr.nesting_cnt--;
338  }
339 
340  } else {
341  /* unknown attributes */
342  ABTI_mutex_unlock(p_local, p_mutex);
343  }
344  return ABT_SUCCESS;
345 }
346 
365 {
366  ABTI_local *p_local = ABTI_local_get_local();
367  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
368  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
369 
370  if (p_mutex->attr.attrs == ABTI_MUTEX_ATTR_NONE) {
371  /* default attributes */
372  mutex_unlock_se(&p_local, p_mutex);
373 
374  } else if (p_mutex->attr.attrs & ABTI_MUTEX_ATTR_RECURSIVE) {
375  /* recursive mutex */
376  ABTI_CHECK_TRUE(ABTI_self_get_thread_id(p_local) ==
377  p_mutex->attr.owner_id,
379  if (p_mutex->attr.nesting_cnt == 0) {
380  p_mutex->attr.owner_id = 0;
381  mutex_unlock_se(&p_local, p_mutex);
382  } else {
383  p_mutex->attr.nesting_cnt--;
384  }
385 
386  } else {
387  /* unknown attributes */
388  mutex_unlock_se(&p_local, p_mutex);
389  }
390  return ABT_SUCCESS;
391 }
392 
394 {
395  ABTI_local *p_local = ABTI_local_get_local();
396  ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
397  ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);
398 
399  ABTI_mutex_unlock(p_local, p_mutex);
400  return ABT_SUCCESS;
401 }
402 
418 int ABT_mutex_equal(ABT_mutex mutex1, ABT_mutex mutex2, ABT_bool *result)
419 {
420  ABTI_mutex *p_mutex1 = ABTI_mutex_get_ptr(mutex1);
421  ABTI_mutex *p_mutex2 = ABTI_mutex_get_ptr(mutex2);
422  *result = (p_mutex1 == p_mutex2) ? ABT_TRUE : ABT_FALSE;
423  return ABT_SUCCESS;
424 }
425 
426 /*****************************************************************************/
427 /* Private APIs */
428 /*****************************************************************************/
429 
430 void ABTI_mutex_wait(ABTI_xstream **pp_local_xstream, ABTI_mutex *p_mutex,
431  int val)
432 {
433  ABTI_xstream *p_local_xstream = *pp_local_xstream;
434  ABTI_ythread_htable *p_htable = p_mutex->p_htable;
435  ABTI_ythread *p_self = ABTI_thread_get_ythread(p_local_xstream->p_thread);
436 
437  int rank = (int)p_local_xstream->rank;
438  ABTI_ASSERT(rank < p_htable->num_rows);
439  ABTI_ythread_queue *p_queue = &p_htable->queue[rank];
440 
441  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
442 
443  if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
444  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
445  return;
446  }
447 
448  if (p_queue->p_h_next == NULL) {
449  ABTI_ythread_htable_add_h_node(p_htable, p_queue);
450  }
451 
452  /* Change the ULT's state to BLOCKED */
453  ABTI_ythread_set_blocked(p_self);
454 
455  /* Push the current ULT to the queue */
456  ABTI_ythread_htable_push(p_htable, rank, p_self);
457 
458  /* Unlock */
459  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
460 
461  /* Suspend the current ULT */
462  ABTI_ythread_suspend(pp_local_xstream, p_self, ABT_SYNC_EVENT_TYPE_MUTEX,
463  (void *)p_mutex);
464 }
465 
466 void ABTI_mutex_wait_low(ABTI_xstream **pp_local_xstream, ABTI_mutex *p_mutex,
467  int val)
468 {
469  ABTI_xstream *p_local_xstream = *pp_local_xstream;
470  ABTI_ythread_htable *p_htable = p_mutex->p_htable;
471  ABTI_ythread *p_self = ABTI_thread_get_ythread(p_local_xstream->p_thread);
472 
473  int rank = (int)p_local_xstream->rank;
474  ABTI_ASSERT(rank < p_htable->num_rows);
475  ABTI_ythread_queue *p_queue = &p_htable->queue[rank];
476 
477  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
478 
479  if (ABTD_atomic_acquire_load_uint32(&p_mutex->val) != val) {
480  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
481  return;
482  }
483 
484  if (p_queue->p_l_next == NULL) {
485  ABTI_ythread_htable_add_l_node(p_htable, p_queue);
486  }
487 
488  /* Change the ULT's state to BLOCKED */
489  ABTI_ythread_set_blocked(p_self);
490 
491  /* Push the current ULT to the queue */
492  ABTI_ythread_htable_push_low(p_htable, rank, p_self);
493 
494  /* Unlock */
495  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
496 
497  /* Suspend the current ULT */
498  ABTI_ythread_suspend(pp_local_xstream, p_self, ABT_SYNC_EVENT_TYPE_MUTEX,
499  (void *)p_mutex);
500 }
501 
502 void ABTI_mutex_wake_de(ABTI_local *p_local, ABTI_mutex *p_mutex)
503 {
504  int n;
505  ABTI_ythread *p_ythread;
506  ABTI_ythread_htable *p_htable = p_mutex->p_htable;
507  int num = p_mutex->attr.max_wakeups;
508  ABTI_ythread_queue *p_start, *p_curr;
509 
510  /* Wake up num ULTs in a round-robin manner */
511  for (n = 0; n < num; n++) {
512  p_ythread = NULL;
513 
514  ABTI_THREAD_HTABLE_LOCK(p_htable->mutex);
515 
516  if (ABTD_atomic_acquire_load_uint32(&p_htable->num_elems) == 0) {
517  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
518  break;
519  }
520 
521  /* Wake up the high-priority ULTs */
522  p_start = p_htable->h_list;
523  for (p_curr = p_start; p_curr;) {
524  p_ythread = ABTI_ythread_htable_pop(p_htable, p_curr);
525  if (p_curr->num_threads == 0) {
526  ABTI_ythread_htable_del_h_head(p_htable);
527  } else {
528  p_htable->h_list = p_curr->p_h_next;
529  }
530  if (p_ythread != NULL)
531  goto done;
532  p_curr = p_htable->h_list;
533  if (p_curr == p_start)
534  break;
535  }
536 
537  /* Wake up the low-priority ULTs */
538  p_start = p_htable->l_list;
539  for (p_curr = p_start; p_curr;) {
540  p_ythread = ABTI_ythread_htable_pop_low(p_htable, p_curr);
541  if (p_curr->low_num_threads == 0) {
542  ABTI_ythread_htable_del_l_head(p_htable);
543  } else {
544  p_htable->l_list = p_curr->p_l_next;
545  }
546  if (p_ythread != NULL)
547  goto done;
548  p_curr = p_htable->l_list;
549  if (p_curr == p_start)
550  break;
551  }
552 
553  /* Nothing to wake up */
554  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
555  LOG_DEBUG("%p: nothing to wake up\n", p_mutex);
556  break;
557 
558  done:
559  ABTI_THREAD_HTABLE_UNLOCK(p_htable->mutex);
560 
561  /* Push p_ythread to the scheduler's pool */
562  LOG_DEBUG("%p: wake up U%" PRIu64 ":E%d\n", p_mutex,
563  ABTI_thread_get_id(&p_ythread->thread),
564  p_ythread->thread.p_last_xstream
565  ? p_ythread->thread.p_last_xstream->rank
566  : -1);
567  ABTI_ythread_set_ready(p_local, p_ythread);
568  }
569 }
570 
571 /*****************************************************************************/
572 /* Internal static functions */
573 /*****************************************************************************/
574 
575 static inline void mutex_lock_low(ABTI_local **pp_local, ABTI_mutex *p_mutex)
576 {
577  ABTI_ythread *p_ythread = NULL;
578  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
579  if (!ABTI_IS_EXT_THREAD_ENABLED || p_local_xstream) {
580  p_ythread = ABTI_thread_get_ythread_or_null(p_local_xstream->p_thread);
581  }
582 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX
583  if (p_ythread) {
584  LOG_DEBUG("%p: lock_low - try\n", p_mutex);
585  while (!ABTD_atomic_bool_cas_strong_uint32(&p_mutex->val, 0, 1)) {
586  ABTI_ythread_yield(&p_local_xstream, p_ythread,
587  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
588  *pp_local = ABTI_xstream_get_local(p_local_xstream);
589  }
590  LOG_DEBUG("%p: lock_low - acquired\n", p_mutex);
591  } else {
592  ABTI_mutex_spinlock(p_mutex);
593  }
594 #else
595  /* Only ULTs can yield when the mutex has been locked. For others,
596  * just call mutex_spinlock. */
597  if (p_ythread) {
598  int c;
599  LOG_DEBUG("%p: lock_low - try\n", p_mutex);
600  /* If other ULTs associated with the same ES are waiting on the
601  * low-mutex queue, we give the header ULT a chance to try to get
602  * the mutex by context switching to it. */
603  ABTI_ythread_htable *p_htable = p_mutex->p_htable;
604  ABTI_ythread_queue *p_queue = &p_htable->queue[p_local_xstream->rank];
605  if (p_queue->low_num_threads > 0) {
606  ABT_bool ret =
607  ABTI_ythread_htable_switch_low(&p_local_xstream, p_queue,
608  p_ythread, p_htable,
610  (void *)p_mutex);
611  *pp_local = ABTI_xstream_get_local(p_local_xstream);
612  if (ret == ABT_TRUE) {
613  /* This ULT became a waiter in the mutex queue */
614  goto check_handover;
615  }
616  }
617 
618  if ((c = ABTD_atomic_val_cas_strong_uint32(&p_mutex->val, 0, 1)) != 0) {
619  if (c != 2) {
620  c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
621  }
622  while (c != 0) {
623  ABTI_mutex_wait_low(&p_local_xstream, p_mutex, 2);
624  *pp_local = ABTI_xstream_get_local(p_local_xstream);
625 
626  check_handover:
627  /* If the mutex has been handed over to the current ULT from
628  * other ULT on the same ES, we don't need to change the mutex
629  * state. */
630  if (p_mutex->p_handover) {
631  if (p_ythread == p_mutex->p_handover) {
632  p_mutex->p_handover = NULL;
633  ABTD_atomic_release_store_uint32(&p_mutex->val, 2);
634 
635  /* Push the previous ULT to its pool */
636  ABTI_ythread *p_giver = p_mutex->p_giver;
637  ABTD_atomic_release_store_int(&p_giver->thread.state,
639  ABTI_pool_push(p_giver->thread.p_pool,
640  p_giver->thread.unit);
641  break;
642  }
643  }
644 
645  c = ABTD_atomic_exchange_uint32(&p_mutex->val, 2);
646  }
647  }
648  LOG_DEBUG("%p: lock_low - acquired\n", p_mutex);
649  } else {
650  ABTI_mutex_spinlock(p_mutex);
651  }
652 
653  return;
654 #endif
655 }
656 
657 /* Hand over the mutex to other ULT on the same ES */
658 static inline void mutex_unlock_se(ABTI_local **pp_local, ABTI_mutex *p_mutex)
659 {
660  ABTI_ythread *p_ythread = NULL;
661  ABTI_xstream *p_local_xstream = ABTI_local_get_xstream_or_null(*pp_local);
662  if (!ABTI_IS_EXT_THREAD_ENABLED || p_local_xstream) {
663  p_ythread = ABTI_thread_get_ythread_or_null(p_local_xstream->p_thread);
664  }
665 #ifdef ABT_CONFIG_USE_SIMPLE_MUTEX
666  ABTD_atomic_release_store_uint32(&p_mutex->val, 0);
667  LOG_DEBUG("%p: unlock_se\n", p_mutex);
668  if (p_ythread) {
669  ABTI_ythread_yield(&p_local_xstream, p_ythread,
670  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
671  *pp_local = ABTI_xstream_get_local(p_local_xstream);
672  }
673 #else
674  /* If it is run on a non-yieldable thread. just unlock it. */
675  if (!p_ythread) {
676  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
677  LOG_DEBUG("%p: unlock_se\n", p_mutex);
678  ABTI_mutex_wake_de(*pp_local, p_mutex);
679  return;
680  }
681 
682  /* Unlock the mutex */
683  /* If p_mutex->val is 1 before decreasing it, it means there is no any
684  * waiter in the mutex queue. We can just return. */
685  if (ABTD_atomic_fetch_sub_uint32(&p_mutex->val, 1) == 1) {
686  LOG_DEBUG("%p: unlock_se\n", p_mutex);
687  if (p_ythread) {
688  ABTI_ythread_yield(&p_local_xstream, p_ythread,
689  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
690  *pp_local = ABTI_xstream_get_local(p_local_xstream);
691  }
692  return;
693  }
694 
695  /* There are ULTs waiting in the mutex queue */
696  ABTI_ythread_htable *p_htable = p_mutex->p_htable;
697  ABTI_ythread *p_next = NULL;
698  ABTI_ythread_queue *p_queue = &p_htable->queue[(int)p_local_xstream->rank];
699 
700 check_cond:
701  /* Check whether the mutex handover is possible */
702  if (p_queue->num_handovers >= p_mutex->attr.max_handovers) {
703  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
704  LOG_DEBUG("%p: unlock_se\n", p_mutex);
705  ABTI_mutex_wake_de(*pp_local, p_mutex);
706  p_queue->num_handovers = 0;
707  ABTI_ythread_yield(&p_local_xstream, p_ythread,
708  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
709  *pp_local = ABTI_xstream_get_local(p_local_xstream);
710  return;
711  }
712 
713  /* Hand over the mutex to high-priority ULTs */
714  if (p_queue->num_threads <= 1) {
715  if (p_htable->h_list != NULL) {
716  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
717  LOG_DEBUG("%p: unlock_se\n", p_mutex);
718  ABTI_mutex_wake_de(*pp_local, p_mutex);
719  ABTI_ythread_yield(&p_local_xstream, p_ythread,
720  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
721  *pp_local = ABTI_xstream_get_local(p_local_xstream);
722  return;
723  }
724  } else {
725  p_next = ABTI_ythread_htable_pop(p_htable, p_queue);
726  if (p_next == NULL)
727  goto check_cond;
728  else
729  goto handover;
730  }
731 
732  /* When we don't have high-priority ULTs and other ESs don't either,
733  * we hand over the mutex to low-priority ULTs. */
734  if (p_queue->low_num_threads <= 1) {
735  ABTD_atomic_release_store_uint32(&p_mutex->val, 0); /* Unlock */
736  LOG_DEBUG("%p: unlock_se\n", p_mutex);
737  ABTI_mutex_wake_de(*pp_local, p_mutex);
738  ABTI_ythread_yield(&p_local_xstream, p_ythread,
739  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
740  *pp_local = ABTI_xstream_get_local(p_local_xstream);
741  return;
742  } else {
743  p_next = ABTI_ythread_htable_pop_low(p_htable, p_queue);
744  if (p_next == NULL)
745  goto check_cond;
746  }
747 
748 handover:
749  /* We don't push p_ythread to the pool. Instead, we will yield_to p_ythread
750  * directly at the end of this function. */
751  p_queue->num_handovers++;
752 
753  /* We are handing over the mutex */
754  p_mutex->p_handover = p_next;
755  p_mutex->p_giver = p_ythread;
756 
757  LOG_DEBUG("%p: handover -> U%" PRIu64 "\n", p_mutex,
758  ABTI_thread_get_id(&p_next->thread));
759 
760  /* yield_to the next ULT */
761  while (ABTD_atomic_acquire_load_uint32(&p_next->thread.request) &
762  ABTI_THREAD_REQ_BLOCK)
763  ;
764  ABTI_pool_dec_num_blocked(p_next->thread.p_pool);
765  ABTD_atomic_release_store_int(&p_next->thread.state,
767  ABTI_tool_event_ythread_resume(ABTI_xstream_get_local(p_local_xstream),
768  p_next, &p_ythread->thread);
769  /* This works as a "yield" for this thread. */
770  ABTI_tool_event_ythread_yield(p_local_xstream, p_ythread,
771  p_ythread->thread.p_parent,
772  ABT_SYNC_EVENT_TYPE_MUTEX, (void *)p_mutex);
773  ABTI_ythread *p_prev =
774  ABTI_ythread_context_switch_to_sibling(&p_local_xstream, p_ythread,
775  p_next);
776  /* Invoke an event of thread resume and run. */
777  *pp_local = ABTI_xstream_get_local(p_local_xstream);
778  ABTI_tool_event_thread_run(p_local_xstream, &p_ythread->thread,
779  &p_prev->thread, p_ythread->thread.p_parent);
780 #endif
781 }
int ABT_mutex_unlock_se(ABT_mutex mutex) ABT_API_PUBLIC
Hand over the mutex within the ES.
Definition: mutex.c:364
struct ABT_mutex_attr_opaque * ABT_mutex_attr
Definition: abt.h:359
int ABT_mutex_lock_high(ABT_mutex mutex) ABT_API_PUBLIC
Definition: mutex.c:209
#define ABT_ERR_INV_THREAD
Definition: abt.h:80
int ABT_mutex_free(ABT_mutex *mutex) ABT_API_PUBLIC
Free the mutex object.
Definition: mutex.c:108
int ABT_mutex_lock_low(ABT_mutex mutex) ABT_API_PUBLIC
Lock the mutex with low priority.
Definition: mutex.c:181
int ABT_mutex_create_with_attr(ABT_mutex_attr attr, ABT_mutex *newmutex) ABT_API_PUBLIC
Create a new mutex with attributes.
Definition: mutex.c:72
int ABT_bool
Definition: abt.h:373
static void mutex_unlock_se(ABTI_local **pp_local, ABTI_mutex *p_mutex)
Definition: mutex.c:658
struct ABT_mutex_opaque * ABT_mutex
Definition: abt.h:357
static ABTU_ret_err int ABTU_malloc(size_t size, void **p_ptr)
Definition: abtu.h:142
int ABT_mutex_unlock_de(ABT_mutex mutex) ABT_API_PUBLIC
Definition: mutex.c:393
int ABT_mutex_create(ABT_mutex *newmutex) ABT_API_PUBLIC
Create a new mutex.
Definition: mutex.c:38
#define ABT_FALSE
Definition: abt.h:285
#define ABT_SUCCESS
Definition: abt.h:64
#define ABT_TRUE
Definition: abt.h:284
int ABT_mutex_lock(ABT_mutex mutex) ABT_API_PUBLIC
Lock the mutex.
Definition: mutex.c:140
#define ABT_MUTEX_NULL
Definition: abt.h:419
int ABT_mutex_equal(ABT_mutex mutex1, ABT_mutex mutex2, ABT_bool *result) ABT_API_PUBLIC
Compare two mutex handles for equality.
Definition: mutex.c:418
static void mutex_lock_low(ABTI_local **pp_local, ABTI_mutex *p_mutex)
Definition: mutex.c:575
#define LOG_DEBUG(fmt,...)
Definition: abti_log.h:26
int ABT_mutex_unlock(ABT_mutex mutex) ABT_API_PUBLIC
Unlock the mutex.
Definition: mutex.c:318
int ABT_mutex_trylock(ABT_mutex mutex) ABT_API_PUBLIC
Attempt to lock a mutex without blocking.
Definition: mutex.c:231
int ABT_mutex_spinlock(ABT_mutex mutex) ABT_API_PUBLIC
Lock the mutex without context switch.
Definition: mutex.c:278
static ABTU_ret_err int ABTU_calloc(size_t num, size_t size, void **p_ptr)
Definition: abtu.h:152
static void ABTU_free(void *ptr)
Definition: abtu.h:135