Appearance
在rust异步编程中,我们有两种锁可以选择,一种是标准库里提供的锁(std::sync::Mutex
),下称标准锁; 另一种可以是异步库提供的锁,比如async_std::sync::Mutex
,简称异步锁, 那么两种锁有何异同,我们该如何选择呢? 本文将会给出答案.
按照我的习惯,先说结论:
- 如果标准锁满足要求,尽量使用标准锁
- 标准锁不能用时,再用异步锁
两种锁的异同
相同点:
- 都能起到同步互斥的效果
不同点:
- 标准锁不能跨await
- 异步锁可以跨await
如果你的代码中需要跨await持有锁,那么你就只能用异步库锁. 比如下面这种情况:
rust
struct SomeSharedStructure {
v: i32,
}
impl SomeSharedStructure {
async fn do1(&mut self) {}
}
#[test]
fn test_mutex_guard_await() {
let s = Arc::new(Mutex::new(SomeSharedStructure { v: 0 }));
async_std::task::spawn(async move {
let mut s = s.lock().await;
s.do1().await;
});
}
#[test]
fn test_mutex_guard_cannot_compile() {
let s = Arc::new(std::sync::Mutex::new(SomeSharedStructure { v: 0 }));
// let s2=s.clone();
async_std::task::spawn(async move {
let mut s = s.lock().unwrap();
s.do1().await;
});
}
test_mutex_guard_cannot_compile 无法编译通过,因为future cannot be sent between threads safely
,准确来说就是std::sync::MutexGuard
不能在线程之间共享. 这个其实很容易理解,Mutex就是为了解决线程同步问题的,已经解锁的数据如果可以在线程之间传递,那么Rust保证的线程安全岂不成了笑话.
而async_std::sync::MutexGuard
却是可以在线程之间传递的,具体可以看async-mutex这个crate的代码:
rust
/// A guard that releases the mutex when dropped.
pub struct MutexGuard<'a, T: ?Sized>(&'a Mutex<T>);
unsafe impl<T: Send + ?Sized> Send for MutexGuard<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for MutexGuard<'_, T> {}
至于async_std::sync::MutexGuard
是如何保证既可以在线程之间传递,还能保证线程安全,我会在其他文章中专门解释.
两种锁的选用
那么问题来了,这两种锁我们适合在什么场景下使用呢?
- 如果需要跨await,只能用异步锁
- 其他情况,两种锁随便用哪个都可以?
tokio中关于异步锁的解释
可以先看看tokio关于异步锁的解释.
Which kind of mutex should you use?
Contrary to popular belief, it is ok and often preferred to use the ordinary Mutex from the standard library in asynchronous code. This section will help you decide on which kind of mutex you should use.
简单来说就是,如果可以用标准锁就用标准锁. 那么为什么呢?
标准库中锁的代价高么?
在没认真阅读代码之前,我以为异步锁的代价要低一些,因为他们是基于atomic的,加锁不会触发系统调用,而标准锁会触发系统调用,代价昂贵. 但是真的是这样的么? 让我们来跟一下标准锁的实现.
1. lock
rust
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
unsafe {
self.inner.raw_lock();
MutexGuard::new(self)
}
}
标准锁执行的是inner的raw_lock
2. raw_lock
rust
pub struct MovableMutex(Box<imp::Mutex>);
/// Locks the mutex blocking the current thread until it is available.
#[inline]
pub fn raw_lock(&self) {
unsafe { self.0.lock() }
}
self.0实际上是imp::Mutex,跟下去就是标准库代码中的:src/sys/unix/mutex.rs
3. 平台相关的lock
这里以linux平台上的实现为例,来跟踪:
rust
#[inline]
pub unsafe fn lock(&self) {
let r = libc::pthread_mutex_lock(self.inner.get());
debug_assert_eq!(r, 0);
}
可以看到最终调用的是libc::pthread_mutex_lock.
4. glibc中的pthread_mutex_lock
这里以我手头的glibc 2.23代码为例来进行跟踪. x86平台下的pthread_mutex_lock.c实际上用得是:#include "nptl/pthread_mutex_lock.c"
c
nt
__pthread_mutex_lock (pthread_mutex_t *mutex)
{
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
LIBC_PROBE (mutex_entry, 1, mutex);
if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
| PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
return __pthread_mutex_lock_full (mutex);
...
LIBC_PROBE (mutex_acquired, 1, mutex);
return 0;
}
这里我们看最简单的情况,没有其他人持有锁,直接就得到锁的情况,简单读一下就可以看到,它实现用得也是atomic的CAS,也就是说根本没有什么系统调用. 显然相比async_std/tokio中的代码,libc中的代码更加久经考验,也更清楚各个平台的优势和缺点.
具体的代码太长了,我就附在下面,感兴趣的自行阅读就行了.
真的是**Talk is cheap. Show me the code
**, 作为码农还是尽量少去猜,多去看代码才是关键.
c
static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
int oldval;
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
switch (PTHREAD_MUTEX_TYPE (mutex))
{
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval = id;
#ifdef NO_INCR
newval |= FUTEX_WAITERS;
#else
newval |= (oldval & FUTEX_WAITERS);
#endif
newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
newval, oldval);
if (newval != oldval)
{
oldval = newval;
goto again;
}
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old
owner has to be discounted. If we are not supposed
to increment __nusers we actually have to decrement
it here. */
#ifdef NO_INCR
--mutex->__data.__nusers;
#endif
return EOWNERDEAD;
}
/* Check whether we already hold the mutex. */
if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_unlock (mutex->__data.__lock,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
/* The PI support requires the Linux futex system call. If that's not
available, pthread_mutex_init should never have allowed the type to
be set. So it will get the default case for an invalid type. */
#ifdef __NR_futex
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_NORMAL_NP:
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
{
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
if (robust)
/* Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
(void *) (((uintptr_t) &mutex->__data.__list.__next)
| 1));
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return EDEADLK;
}
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
int newval = id;
# ifdef NO_INCR
newval |= FUTEX_WAITERS;
# endif
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
newval, 0);
if (oldval != 0)
{
/* The mutex is locked. The kernel will now take care of
everything. */
int private = (robust
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
: PTHREAD_MUTEX_PSHARED (mutex));
INTERNAL_SYSCALL_DECL (__err);
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
__lll_private_flag (FUTEX_LOCK_PI,
private), 1, 0);
if (INTERNAL_SYSCALL_ERROR_P (e, __err)
&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
{
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
/* ESRCH can happen only for non-robust PI mutexes where
the owner of the lock died. */
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
/* Delay the thread indefinitely. */
while (1)
pause_not_cancel ();
}
oldval = mutex->__data.__lock;
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
}
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old owner
has to be discounted. If we are not supposed to
increment __nusers we actually have to decrement it here. */
# ifdef NO_INCR
--mutex->__data.__nusers;
# endif
return EOWNERDEAD;
}
if (robust
&& __builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
INTERNAL_SYSCALL_DECL (__err);
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
__lll_private_flag (FUTEX_UNLOCK_PI,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
0, 0);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
mutex->__data.__count = 1;
if (robust)
{
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
}
}
break;
#endif /* __NR_futex. */
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
case PTHREAD_MUTEX_PP_NORMAL_NP:
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
{
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
return EDEADLK;
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
int oldprio = -1, ceilval;
do
{
int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
if (__pthread_current_priority () > ceiling)
{
if (oldprio != -1)
__pthread_tpp_change_priority (oldprio, -1);
return EINVAL;
}
int retval = __pthread_tpp_change_priority (oldprio, ceiling);
if (retval)
return retval;
ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
oldprio = ceiling;
oldval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
ceilval | 2,
#else
ceilval | 1,
#endif
ceilval);
if (oldval == ceilval)
break;
do
{
oldval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2,
ceilval | 1);
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
break;
if (oldval != ceilval)
lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
PTHREAD_MUTEX_PSHARED (mutex));
}
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2, ceilval)
!= ceilval);
}
while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
}
break;
default:
/* Correct code cannot set any other type. */
return EINVAL;
}
/* Record the ownership. */
mutex->__data.__owner = id;
#ifndef NO_INCR
++mutex->__data.__nusers;
#endif
LIBC_PROBE (mutex_acquired, 1, mutex);
return 0;
}