Hinted lock (OpenMP 4.5 feature) Updates/Fixes Part 2

* Added a new user TSX lock implementation, RTM, This implementation is a
  light-weight version of the adaptive lock implementation, omitting the
  back-off logic for deciding when to specualte (or not). The fall-back lock is
  still the queuing lock.
* Changed indirect lock table management. The data for indirect lock management
  was encapsulated in the "kmp_indirect_lock_table_t" type. Also, the lock table
  dimension was changed to 2D (was linear), and each entry is a
  kmp_indirect_lock_t object now (was a pointer to an object).
* Some clean up in the critical section code
* Removed the limits of the tuning parameters read from KMP_ADAPTIVE_LOCK_PROPS
* KMP_USE_DYNAMIC_LOCK=1 also turns on these two switches:
  KMP_USE_TSX, KMP_USE_ADAPTIVE_LOCKS

Differential Revision: http://reviews.llvm.org/D15204

llvm-svn: 255375
This commit is contained in:
Jonathan Peyton 2015-12-11 21:57:06 +00:00
parent 4062601cb9
commit dae13d81b4
13 changed files with 376 additions and 222 deletions

View File

@ -88,11 +88,13 @@
/* lock hint type for dynamic user lock */
typedef enum kmp_lock_hint_t {
kmp_lock_hint_none = 0,
kmp_lock_hint_contended,
kmp_lock_hint_uncontended,
kmp_lock_hint_contended,
kmp_lock_hint_nonspeculative,
kmp_lock_hint_speculative,
kmp_lock_hint_adaptive,
kmp_lock_hint_hle,
kmp_lock_hint_rtm,
kmp_lock_hint_adaptive
} kmp_lock_hint_t;
/* hinted lock initializers */

View File

@ -66,7 +66,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -64,7 +64,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -53,7 +53,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -88,11 +88,13 @@
/* lock hint type for dynamic user lock */
typedef enum kmp_lock_hint_t {
kmp_lock_hint_none = 0,
kmp_lock_hint_contended,
kmp_lock_hint_uncontended,
kmp_lock_hint_contended,
kmp_lock_hint_nonspeculative,
kmp_lock_hint_speculative,
kmp_lock_hint_adaptive,
kmp_lock_hint_hle,
kmp_lock_hint_rtm,
kmp_lock_hint_adaptive
} kmp_lock_hint_t;
/* hinted lock initializers */

View File

@ -66,7 +66,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -64,7 +64,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -53,7 +53,9 @@
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_contended = 2
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_nonspeculative = 3
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_speculative = 4
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_hle = 5
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_rtm = 6
integer (kind=kmp_lock_hint_kind), parameter :: kmp_lock_hint_adaptive = 7
interface

View File

@ -916,38 +916,30 @@ __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
#if KMP_USE_DYNAMIC_LOCK
static __forceinline kmp_indirect_lock_t *
__kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_dyna_lockseq_t seq)
static __forceinline void
__kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag)
{
// Code from __kmp_get_critical_section_ptr
// This function returns an indirect lock object instead of a user lock.
kmp_indirect_lock_t **lck, *ret;
// Pointer to the allocated indirect lock is written to crit, while indexing is ignored.
void *idx;
kmp_indirect_lock_t **lck;
lck = (kmp_indirect_lock_t **)crit;
ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
if (ret == NULL) {
void *idx;
kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
ret = ilk;
KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
KMP_SET_I_LOCK_LOCATION(ilk, loc);
KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
KA_TRACE(20, ("__kmp_get_indirect_csptr: initialized indirect lock #%d\n", tag));
kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
KMP_SET_I_LOCK_LOCATION(ilk, loc);
KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag));
#if USE_ITT_BUILD
__kmp_itt_critical_creating(ilk->lock, loc);
__kmp_itt_critical_creating(ilk->lock, loc);
#endif
int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk);
if (status == 0) {
int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk);
if (status == 0) {
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed(ilk->lock);
__kmp_itt_critical_destroyed(ilk->lock);
#endif
// Postponing destroy, to avoid costly dispatch here.
//KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
KMP_DEBUG_ASSERT(ret != NULL);
}
// We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit.
//KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
}
return ret;
KMP_DEBUG_ASSERT(*lck != NULL);
}
// Fast-path acquire tas lock
@ -988,7 +980,7 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
KMP_MB(); \
}
#if KMP_HAS_FUTEX
#if KMP_USE_FUTEX
# include <unistd.h>
# include <sys/syscall.h>
@ -1048,7 +1040,7 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \
}
#endif // KMP_HAS_FUTEX
#endif // KMP_USE_FUTEX
#else // KMP_USE_DYNAMIC_LOCK
@ -1124,34 +1116,41 @@ __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
#if KMP_USE_DYNAMIC_LOCK
// Assumption: all direct locks fit in OMP_CRITICAL_SIZE.
// The global sequence __kmp_user_lock_seq is used unless compiler pushes a value.
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
// The thread that reaches here first needs to tag the lock word.
if (*((kmp_dyna_lock_t *)lck) == 0) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
kmp_dyna_lockseq_t lckseq = __kmp_user_lock_seq;
if (KMP_IS_D_LOCK(lckseq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
# if KMP_USE_FAST_TAS
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_FAST_FUTEX
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
@ -1232,11 +1231,11 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
# if KMP_USE_FAST_TAS
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_FAST_FUTEX
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
} else
@ -1828,7 +1827,7 @@ __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
kmp_dyna_lockseq_t nested_seq;
switch (__kmp_user_lock_seq) {
case lockseq_tas: nested_seq = lockseq_nested_tas; break;
#if KMP_HAS_FUTEX
#if KMP_USE_FUTEX
case lockseq_futex: nested_seq = lockseq_nested_futex; break;
#endif
case lockseq_ticket: nested_seq = lockseq_nested_ticket; break;
@ -2018,11 +2017,11 @@ __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object.
# endif
# if KMP_USE_FAST_TAS
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_FAST_FUTEX
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
} else
@ -2136,11 +2135,11 @@ __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_FAST_TAS
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_FAST_FUTEX
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
} else
@ -2276,11 +2275,11 @@ __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_FAST_TAS
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
} else
# elif KMP_USE_FAST_FUTEX
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
} else
@ -2427,23 +2426,32 @@ __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid,
#if KMP_USE_DYNAMIC_LOCK
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
if (*((kmp_dyna_lock_t *)lck) == 0) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
} else {
kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
KMP_DEBUG_ASSERT(ilk != NULL);
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, ilk->lock, __kmp_user_lock_seq);
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid);
KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK

View File

@ -3014,11 +3014,13 @@ __kmp_set_drdpa_lock_flags( kmp_drdpa_lock_t *lck, kmp_lock_flags_t flags )
# ifndef __OMP_H
typedef enum kmp_lock_hint_t {
kmp_lock_hint_none = 0,
kmp_lock_hint_contended,
kmp_lock_hint_uncontended,
kmp_lock_hint_contended,
kmp_lock_hint_nonspeculative,
kmp_lock_hint_speculative,
kmp_lock_hint_adaptive,
kmp_lock_hint_hle,
kmp_lock_hint_rtm,
kmp_lock_hint_adaptive
} kmp_lock_hint_t;
# endif
@ -3029,7 +3031,7 @@ static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck, kmp_dyna_lockseq_t seq)
KA_TRACE(20, ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
}
#if KMP_HAS_HLE
#if KMP_USE_TSX
// HLE lock functions - imported from the testbed runtime.
#define HLE_ACQUIRE ".byte 0xf2;"
@ -3101,9 +3103,93 @@ __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck, kmp_int32 gtid)
return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
}
#endif // KMP_HAS_HLE
static void
__kmp_init_rtm_lock(kmp_queuing_lock_t *lck)
{
__kmp_init_queuing_lock(lck);
}
// Entry functions for indirect locks (first element of direct_*_ops[]).
static void
__kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck)
{
__kmp_destroy_queuing_lock(lck);
}
static void
__kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
unsigned retries=3, status;
do {
status = _xbegin();
if (status == _XBEGIN_STARTED) {
if (__kmp_is_unlocked_queuing_lock(lck))
return;
_xabort(0xff);
}
if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
// Wait until lock becomes free
while (! __kmp_is_unlocked_queuing_lock(lck))
__kmp_yield(TRUE);
}
else if (!(status & _XABORT_RETRY))
break;
} while (retries--);
// Fall-back non-speculative lock (xchg)
__kmp_acquire_queuing_lock(lck, gtid);
}
static void
__kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
__kmp_acquire_rtm_lock(lck, gtid);
}
static int
__kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
if (__kmp_is_unlocked_queuing_lock(lck)) {
// Releasing from speculation
_xend();
}
else {
// Releasing from a real lock
__kmp_release_queuing_lock(lck, gtid);
}
return KMP_LOCK_RELEASED;
}
static int
__kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
return __kmp_release_rtm_lock(lck, gtid);
}
static int
__kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
unsigned retries=3, status;
do {
status = _xbegin();
if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
return 1;
}
if (!(status & _XABORT_RETRY))
break;
} while (retries--);
return (__kmp_is_unlocked_queuing_lock(lck))? 1: 0;
}
static int
__kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
{
return __kmp_test_rtm_lock(lck, gtid);
}
#endif // KMP_USE_TSX
// Entry functions for indirect locks (first element of direct lock jump tables).
static void __kmp_init_indirect_lock(kmp_dyna_lock_t * l, kmp_dyna_lockseq_t tag);
static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t * lock);
static void __kmp_set_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32);
@ -3191,24 +3277,10 @@ int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
// Lock index table.
kmp_indirect_lock_t **__kmp_indirect_lock_table;
kmp_lock_index_t __kmp_indirect_lock_table_size;
kmp_lock_index_t __kmp_indirect_lock_table_next;
kmp_indirect_lock_table_t __kmp_i_lock_table;
// Size of indirect locks.
static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {
sizeof(kmp_ticket_lock_t), sizeof(kmp_queuing_lock_t),
#if KMP_USE_ADAPTIVE_LOCKS
sizeof(kmp_adaptive_lock_t),
#endif
sizeof(kmp_drdpa_lock_t),
sizeof(kmp_tas_lock_t),
#if KMP_HAS_FUTEX
sizeof(kmp_futex_lock_t),
#endif
sizeof(kmp_ticket_lock_t), sizeof(kmp_queuing_lock_t),
sizeof(kmp_drdpa_lock_t)
};
static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = { 0 };
// Jump tables for lock accessor/modifier.
void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *) = { 0 };
@ -3219,28 +3291,10 @@ kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p) =
// Use different lock pools for different lock types.
static kmp_indirect_lock_t * __kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = { 0 };
// Inserts the given lock ptr to the lock table.
kmp_lock_index_t
__kmp_insert_indirect_lock(kmp_indirect_lock_t *lck)
{
kmp_lock_index_t next = __kmp_indirect_lock_table_next;
// Check capacity and double the size if required
if (next >= __kmp_indirect_lock_table_size) {
kmp_lock_index_t i;
kmp_lock_index_t size = __kmp_indirect_lock_table_size;
kmp_indirect_lock_t **old_table = __kmp_indirect_lock_table;
__kmp_indirect_lock_table = (kmp_indirect_lock_t **)__kmp_allocate(2*next*sizeof(kmp_indirect_lock_t *));
KMP_MEMCPY(__kmp_indirect_lock_table, old_table, next*sizeof(kmp_indirect_lock_t *));
__kmp_free(old_table);
__kmp_indirect_lock_table_size = 2*next;
}
// Insert lck to the table and return the index.
__kmp_indirect_lock_table[next] = lck;
__kmp_indirect_lock_table_next++;
return next;
}
// User lock allocator for dynamically dispatched locks.
// User lock allocator for dynamically dispatched indirect locks.
// Every entry of the indirect lock table holds the address and type of the allocated indrect lock
// (kmp_indirect_lock_t), and the size of the table doubles when it is full. A destroyed indirect lock
// object is returned to the reusable pool of locks, unique to each lock type.
kmp_indirect_lock_t *
__kmp_allocate_indirect_lock(void **user_lock, kmp_int32 gtid, kmp_indirect_locktag_t tag)
{
@ -3250,15 +3304,33 @@ __kmp_allocate_indirect_lock(void **user_lock, kmp_int32 gtid, kmp_indirect_lock
__kmp_acquire_lock(&__kmp_global_lock, gtid);
if (__kmp_indirect_lock_pool[tag] != NULL) {
// Reuse the allocated and destroyed lock object
lck = __kmp_indirect_lock_pool[tag];
if (OMP_LOCK_T_SIZE < sizeof(void *))
idx = lck->lock->pool.index;
__kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n", lck));
} else {
lck = (kmp_indirect_lock_t *)__kmp_allocate(sizeof(kmp_indirect_lock_t));
idx = __kmp_i_lock_table.next;
// Check capacity and double the size if it is full
if (idx == __kmp_i_lock_table.size) {
// Double up the space for block pointers
int row = __kmp_i_lock_table.size/KMP_I_LOCK_CHUNK;
kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
__kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(2*row*sizeof(kmp_indirect_lock_t *));
KMP_MEMCPY(__kmp_i_lock_table.table, old_table, row*sizeof(kmp_indirect_lock_t *));
__kmp_free(old_table);
// Allocate new objects in the new blocks
for (int i = row; i < 2*row; ++i)
*(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)
__kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
__kmp_i_lock_table.size = 2*idx;
}
__kmp_i_lock_table.next++;
lck = KMP_GET_I_LOCK(idx);
// Allocate a new base lock object
lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
if (OMP_LOCK_T_SIZE < sizeof(void *))
idx = __kmp_insert_indirect_lock(lck);
KA_TRACE(20, ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
}
__kmp_release_lock(&__kmp_global_lock, gtid);
@ -3286,10 +3358,10 @@ __kmp_lookup_indirect_lock(void **user_lock, const char *func)
}
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
if (idx < 0 || idx >= __kmp_indirect_lock_table_size) {
if (idx < 0 || idx >= __kmp_i_lock_table.size) {
KMP_FATAL(LockIsUninitialized, func);
}
lck = __kmp_indirect_lock_table[idx];
lck = KMP_GET_I_LOCK(idx);
} else {
lck = *((kmp_indirect_lock_t **)user_lock);
}
@ -3299,7 +3371,7 @@ __kmp_lookup_indirect_lock(void **user_lock, const char *func)
return lck;
} else {
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
return __kmp_indirect_lock_table[KMP_EXTRACT_I_INDEX(user_lock)];
return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
} else {
return *((kmp_indirect_lock_t **)user_lock);
}
@ -3314,11 +3386,16 @@ __kmp_init_indirect_lock(kmp_dyna_lock_t * lock, kmp_dyna_lockseq_t seq)
KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
seq = lockseq_queuing;
}
#endif
#if KMP_USE_TSX
if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
seq = lockseq_queuing;
}
#endif
kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
kmp_indirect_lock_t *l = __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
KMP_I_LOCK_FUNC(l, init)(l->lock);
KA_TRACE(20, ("__kmp_init_indirect_lock: initialized indirect lock, tag = %x\n", l->type));
KA_TRACE(20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n", seq));
}
static void
@ -3395,7 +3472,7 @@ __kmp_init_lock_hinted(void **lock, int hint)
seq = lockseq_tas;
break;
case kmp_lock_hint_speculative:
#if KMP_HAS_HLE
#if KMP_USE_TSX
seq = lockseq_hle;
#else
seq = lockseq_tas;
@ -3408,6 +3485,14 @@ __kmp_init_lock_hinted(void **lock, int hint)
seq = lockseq_queuing;
#endif
break;
#if KMP_USE_TSX
case kmp_lock_hint_hle:
seq = lockseq_hle;
break;
case kmp_lock_hint_rtm:
seq = lockseq_rtm;
break;
#endif
// Defaults to queuing locks.
case kmp_lock_hint_contended:
case kmp_lock_hint_nonspeculative:
@ -3474,7 +3559,6 @@ __kmp_init_nest_lock_hinted(void **lock, int hint)
case kmp_lock_hint_nonspeculative:
default:
seq = lockseq_nested_queuing;
break;
}
KMP_INIT_I_LOCK(lock, seq);
#if USE_ITT_BUILD
@ -3483,27 +3567,6 @@ __kmp_init_nest_lock_hinted(void **lock, int hint)
#endif
}
#if KMP_USE_ADAPTIVE_LOCKS
# define init_lock_func(table, expand) { \
table[locktag_ticket] = expand(ticket); \
table[locktag_queuing] = expand(queuing); \
table[locktag_adaptive] = expand(queuing); \
table[locktag_drdpa] = expand(drdpa); \
table[locktag_nested_ticket] = expand(ticket); \
table[locktag_nested_queuing] = expand(queuing); \
table[locktag_nested_drdpa] = expand(drdpa); \
}
#else
# define init_lock_func(table, expand) { \
table[locktag_ticket] = expand(ticket); \
table[locktag_queuing] = expand(queuing); \
table[locktag_drdpa] = expand(drdpa); \
table[locktag_nested_ticket] = expand(ticket); \
table[locktag_nested_queuing] = expand(queuing); \
table[locktag_nested_drdpa] = expand(drdpa); \
}
#endif // KMP_USE_ADAPTIVE_LOCKS
// Initializes data for dynamic user locks.
void
__kmp_init_dynamic_user_locks()
@ -3527,24 +3590,62 @@ __kmp_init_dynamic_user_locks()
}
// Initialize lock index table
__kmp_indirect_lock_table = (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *)*1024);
__kmp_indirect_lock_table_size = 1024;
__kmp_indirect_lock_table_next = 0;
__kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
__kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
*(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)
__kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
__kmp_i_lock_table.next = 0;
// Indirect lock size
__kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
__kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
#if KMP_USE_ADAPTIVE_LOCKS
__kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
#endif
__kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
#if KMP_USE_TSX
__kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
#endif
__kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
#if KMP_USE_FUTEX
__kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
#endif
__kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
__kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
__kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
// Initialize lock accessor/modifier
// Could have used designated initializer, but -TP /Qstd=c99 did not work with icl.exe.
#define expand_func(l) (void (*)(kmp_user_lock_p, const ident_t *))__kmp_set_##l##_lock_location
init_lock_func(__kmp_indirect_set_location, expand_func);
#undef expand_func
#define expand_func(l) (void (*)(kmp_user_lock_p, kmp_lock_flags_t))__kmp_set_##l##_lock_flags
init_lock_func(__kmp_indirect_set_flags, expand_func);
#undef expand_func
#define expand_func(l) (const ident_t * (*)(kmp_user_lock_p))__kmp_get_##l##_lock_location
init_lock_func(__kmp_indirect_get_location, expand_func);
#undef expand_func
#define expand_func(l) (kmp_lock_flags_t (*)(kmp_user_lock_p))__kmp_get_##l##_lock_flags
init_lock_func(__kmp_indirect_get_flags, expand_func);
#undef expand_func
#define fill_jumps(table, expand, sep) { \
table[locktag##sep##ticket] = expand(ticket); \
table[locktag##sep##queuing] = expand(queuing); \
table[locktag##sep##drdpa] = expand(drdpa); \
}
#if KMP_USE_ADAPTIVE_LOCKS
# define fill_table(table, expand) { \
fill_jumps(table, expand, _); \
table[locktag_adaptive] = expand(queuing); \
fill_jumps(table, expand, _nested_); \
}
#else
# define fill_table(table, expand) { \
fill_jumps(table, expand, _); \
fill_jumps(table, expand, _nested_); \
}
#endif // KMP_USE_ADAPTIVE_LOCKS
#define expand(l) (void (*)(kmp_user_lock_p, const ident_t *))__kmp_set_##l##_lock_location
fill_table(__kmp_indirect_set_location, expand);
#undef expand
#define expand(l) (void (*)(kmp_user_lock_p, kmp_lock_flags_t))__kmp_set_##l##_lock_flags
fill_table(__kmp_indirect_set_flags, expand);
#undef expand
#define expand(l) (const ident_t * (*)(kmp_user_lock_p))__kmp_get_##l##_lock_location
fill_table(__kmp_indirect_get_location, expand);
#undef expand
#define expand(l) (kmp_lock_flags_t (*)(kmp_user_lock_p))__kmp_get_##l##_lock_flags
fill_table(__kmp_indirect_get_flags, expand);
#undef expand
__kmp_init_user_locks = TRUE;
}
@ -3562,25 +3663,25 @@ __kmp_cleanup_indirect_user_locks()
while (l != NULL) {
kmp_indirect_lock_t *ll = l;
l = (kmp_indirect_lock_t *)l->lock->pool.next;
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
__kmp_indirect_lock_table[ll->lock->pool.index] = NULL;
}
KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n", ll));
__kmp_free(ll->lock);
__kmp_free(ll);
ll->lock = NULL;
}
}
// Clean up the remaining undestroyed locks.
for (i = 0; i < __kmp_indirect_lock_table_next; i++) {
kmp_indirect_lock_t *l = __kmp_indirect_lock_table[i];
if (l != NULL) {
for (i = 0; i < __kmp_i_lock_table.next; i++) {
kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
if (l->lock != NULL) {
// Locks not destroyed explicitly need to be destroyed here.
KMP_I_LOCK_FUNC(l, destroy)(l->lock);
KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n", l));
__kmp_free(l->lock);
__kmp_free(l);
}
}
// Free the table
__kmp_free(__kmp_indirect_lock_table);
for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
__kmp_free(__kmp_i_lock_table.table[i]);
__kmp_free(__kmp_i_lock_table.table);
__kmp_init_user_locks = FALSE;
}

View File

@ -594,6 +594,10 @@ enum kmp_lock_kind {
lk_tas,
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
lk_futex,
#endif
#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
lk_hle,
lk_rtm,
#endif
lk_ticket,
lk_queuing,
@ -1030,49 +1034,42 @@ extern void __kmp_cleanup_user_locks();
#if KMP_USE_DYNAMIC_LOCK
#define KMP_HAS_FUTEX (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM))
#define KMP_HAS_HLE (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
#define KMP_USE_FAST_FUTEX 0 && KMP_HAS_FUTEX
#define KMP_USE_FAST_TAS 1 && KMP_HAS_FUTEX
// Shortcuts
#define KMP_USE_FUTEX (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
#define KMP_USE_INLINED_TAS (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
// List of lock definitions; all nested locks are indirect locks.
// hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
// All nested locks are indirect lock types.
#if KMP_HAS_FUTEX
# if KMP_HAS_HLE
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
# define KMP_LAST_D_LOCK lockseq_hle
#if KMP_USE_TSX
# if KMP_USE_FUTEX
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# else
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
# define KMP_LAST_D_LOCK lockseq_futex
# endif // KMP_HAS_HLE
# if KMP_USE_ADAPTIVE_LOCKS
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) \
m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# else
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# endif // KMP_USE_ADAPTIVE_LOCKS
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
m(nested_tas, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# endif // KMP_USE_FUTEX
# define KMP_LAST_D_LOCK lockseq_hle
#else
# if KMP_HAS_HLE
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
# define KMP_LAST_D_LOCK lockseq_hle
# if KMP_USE_FUTEX
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# define KMP_LAST_D_LOCK lockseq_futex
# else
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
# define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
m(nested_tas, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# define KMP_LAST_D_LOCK lockseq_tas
# endif // KMP_HAS_HLE
# if KMP_USE_ADAPTIVE_LOCKS
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) \
m(nested_tas, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# else
# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
m(nested_tas, a) m(nested_ticket, a) \
m(nested_queuing, a) m(nested_drdpa, a)
# endif // KMP_USE_ADAPTIVE_LOCKS
#endif // KMP_HAS_FUTEX
# endif // KMP_USE_FUTEX
#endif // KMP_USE_TSX
// Information used in dynamic dispatch
#define KMP_LOCK_SHIFT 8 // number of low bits to be used as tag for direct locks
@ -1161,9 +1158,6 @@ extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
// Returns lock value after removing (shifting) lock tag.
#define KMP_LOCK_STRIP(v) ((v)>>KMP_LOCK_SHIFT)
// Updates __kmp_user_lock_seq with the give lock type.
#define KMP_STORE_LOCK_SEQ(type) (__kmp_user_lock_seq = lockseq_##type)
// Internal entries for hinted lock initializers.
extern void __kmp_init_lock_hinted(void **, int);
extern void __kmp_init_nest_lock_hinted(void **, int);
@ -1206,24 +1200,29 @@ extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lo
? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
: NULL )
//
#define KMP_I_LOCK_CHUNK 1024 // number of kmp_indirect_lock_t objects to be allocated together
// Lock table for indirect locks.
//
// Simple linear structure is used to keep pointers to allocated indirect locks.
extern kmp_indirect_lock_t **__kmp_indirect_lock_table;
// Current size of the lock table; it may increase but never shrink.
extern kmp_lock_index_t __kmp_indirect_lock_table_size;
// Next index to be used for a new indirect lock (= number of indirect locks allocated).
extern kmp_lock_index_t __kmp_indirect_lock_table_next;
typedef struct kmp_indirect_lock_table {
kmp_indirect_lock_t **table; // blocks of indirect locks allocated
kmp_lock_index_t size; // size of the indirect lock table
kmp_lock_index_t next; // index to the next lock to be allocated
} kmp_indirect_lock_table_t;
extern kmp_indirect_lock_table_t __kmp_i_lock_table;
// Returns the indirect lock associated with the given index.
#define KMP_GET_I_LOCK(index) (*(__kmp_i_lock_table.table + (index)/KMP_I_LOCK_CHUNK) + (index)%KMP_I_LOCK_CHUNK)
// Number of locks in a lock block, which is fixed to "1" now.
// TODO: No lock block implementation now. If we do support, we need to manage lock block data
// structure for each indirect lock type.
extern int __kmp_num_locks_in_block;
// Fast lock table lookup without consistency checking
#define KMP_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *)) \
? __kmp_indirect_lock_table[KMP_EXTRACT_I_INDEX(l)] \
: *((kmp_indirect_lock_t **)l) )
#define KMP_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *)) \
? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
: *((kmp_indirect_lock_t **)(l)) )
// Used once in kmp_error.c
extern kmp_int32
@ -1234,7 +1233,6 @@ __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
# define KMP_LOCK_BUSY(v, type) (v)
# define KMP_LOCK_FREE(type) 0
# define KMP_LOCK_STRIP(v) (v)
# define KMP_STORE_LOCK_SEQ(seq)
#endif // KMP_USE_DYNAMIC_LOCK

View File

@ -693,6 +693,14 @@ typedef void (*microtask_t)( int *gtid, int *npr, ... );
# define KMP_USE_DYNAMIC_LOCK 0
#endif
// Enable TSX if dynamic user lock is turned on
#if KMP_USE_DYNAMIC_LOCK
# define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64)
# ifndef KMP_USE_ADAPTIVE_LOCKS
# define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
# endif
#endif
// Warning levels
enum kmp_warnings_level {
kmp_warnings_off = 0, /* No warnings */

View File

@ -3873,6 +3873,12 @@ __kmp_stg_print_lock_block( kmp_str_buf_t * buffer, char const * name, void * da
// KMP_LOCK_KIND
// -------------------------------------------------------------------------------------------------
#if KMP_USE_DYNAMIC_LOCK
# define KMP_STORE_LOCK_SEQ(a) (__kmp_user_lock_seq = lockseq_##a)
#else
# define KMP_STORE_LOCK_SEQ(a)
#endif
static void
__kmp_stg_parse_lock_kind( char const * name, char const * value, void * data ) {
if ( __kmp_init_user_locks ) {
@ -3934,8 +3940,19 @@ __kmp_stg_parse_lock_kind( char const * name, char const * value, void * data )
}
}
#endif // KMP_USE_ADAPTIVE_LOCKS
#if KMP_USE_DYNAMIC_LOCK
#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
else if ( __kmp_str_match("rtm", 1, value) ) {
if ( __kmp_cpuinfo.rtm ) {
__kmp_user_lock_kind = lk_rtm;
KMP_STORE_LOCK_SEQ(rtm);
} else {
KMP_WARNING( AdaptiveNotSupported, name, value );
__kmp_user_lock_kind = lk_queuing;
KMP_STORE_LOCK_SEQ(queuing);
}
}
else if ( __kmp_str_match("hle", 1, value) ) {
__kmp_user_lock_kind = lk_hle;
KMP_STORE_LOCK_SEQ(hle);
}
#endif
@ -3963,6 +3980,16 @@ __kmp_stg_print_lock_kind( kmp_str_buf_t * buffer, char const * name, void * dat
break;
#endif
#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
case lk_rtm:
value = "rtm";
break;
case lk_hle:
value = "hle";
break;
#endif
case lk_ticket:
value = "ticket";
break;
@ -4046,7 +4073,7 @@ __kmp_stg_parse_adaptive_lock_props( const char *name, const char *value, void *
}
num = __kmp_str_to_int( buf, *next );
if ( num < 1 ) { // The number of retries should be > 0
if ( num < 0 ) { // The number of retries should be >= 0
msg = KMP_I18N_STR( ValueTooSmall );
num = 1;
} else if ( num > KMP_INT_MAX ) {
@ -4070,12 +4097,8 @@ __kmp_stg_parse_adaptive_lock_props( const char *name, const char *value, void *
KMP_WARNING( EnvSyntaxError, name, value );
return;
}
if( max_retries != 0 ) {
__kmp_adaptive_backoff_params.max_soft_retries = max_retries;
}
if( max_badness != 0 ) {
__kmp_adaptive_backoff_params.max_badness = max_badness;
}
__kmp_adaptive_backoff_params.max_soft_retries = max_retries;
__kmp_adaptive_backoff_params.max_badness = max_badness;
}