futex: Rename: queue_{,un}lock()

In order to prepare introducing these symbols into the global
namespace; rename them:

  s/queue_\(un\)*lock/futex_q_\1lock/g

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: André Almeida <andrealmeid@collabora.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: André Almeida <andrealmeid@collabora.com>
Link: https://lore.kernel.org/r/20210923171111.300673-6-andrealmeid@collabora.com
This commit is contained in:
Peter Zijlstra 2021-09-23 14:10:54 -03:00
parent 5622eb2052
commit e7ba9c8fed

View file

@ -132,7 +132,7 @@
* *
* Note that a new waiter is accounted for in (a) even when it is possible that * Note that a new waiter is accounted for in (a) even when it is possible that
* the wait call can return error, in which case we backtrack from it in (b). * the wait call can return error, in which case we backtrack from it in (b).
* Refer to the comment in queue_lock(). * Refer to the comment in futex_q_lock().
* *
* Similarly, in order to account for waiters being requeued on another * Similarly, in order to account for waiters being requeued on another
* address we always increment the waiters for the destination bucket before * address we always increment the waiters for the destination bucket before
@ -2410,7 +2410,7 @@ int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
} }
/* The key must be already stored in q->key. */ /* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) static inline struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
__acquires(&hb->lock) __acquires(&hb->lock)
{ {
struct futex_hash_bucket *hb; struct futex_hash_bucket *hb;
@ -2420,9 +2420,9 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
/* /*
* Increment the counter before taking the lock so that * Increment the counter before taking the lock so that
* a potential waker won't miss a to-be-slept task that is * a potential waker won't miss a to-be-slept task that is
* waiting for the spinlock. This is safe as all queue_lock() * waiting for the spinlock. This is safe as all futex_q_lock()
* users end up calling futex_queue(). Similarly, for housekeeping, * users end up calling futex_queue(). Similarly, for housekeeping,
* decrement the counter at queue_unlock() when some error has * decrement the counter at futex_q_unlock() when some error has
* occurred and we don't end up adding the task to the list. * occurred and we don't end up adding the task to the list.
*/ */
hb_waiters_inc(hb); /* implies smp_mb(); (A) */ hb_waiters_inc(hb); /* implies smp_mb(); (A) */
@ -2434,7 +2434,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
} }
static inline void static inline void
queue_unlock(struct futex_hash_bucket *hb) futex_q_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock) __releases(&hb->lock)
{ {
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
@ -2870,12 +2870,12 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
return ret; return ret;
retry_private: retry_private:
*hb = queue_lock(q); *hb = futex_q_lock(q);
ret = get_futex_value_locked(&uval, uaddr); ret = get_futex_value_locked(&uval, uaddr);
if (ret) { if (ret) {
queue_unlock(*hb); futex_q_unlock(*hb);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (ret) if (ret)
@ -2888,7 +2888,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
} }
if (uval != val) { if (uval != val) {
queue_unlock(*hb); futex_q_unlock(*hb);
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
} }
@ -3006,7 +3006,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
goto out; goto out;
retry_private: retry_private:
hb = queue_lock(&q); hb = futex_q_lock(&q);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
&exiting, 0); &exiting, 0);
@ -3030,7 +3030,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
* exit to complete. * exit to complete.
* - EAGAIN: The user space value changed. * - EAGAIN: The user space value changed.
*/ */
queue_unlock(hb); futex_q_unlock(hb);
/* /*
* Handle the case where the owner is in the middle of * Handle the case where the owner is in the middle of
* exiting. Wait for the exit to complete otherwise * exiting. Wait for the exit to complete otherwise
@ -3126,7 +3126,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
goto out; goto out;
out_unlock_put_key: out_unlock_put_key:
queue_unlock(hb); futex_q_unlock(hb);
out: out:
if (to) { if (to) {
@ -3136,7 +3136,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
return ret != -EINTR ? ret : -ERESTARTNOINTR; return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted: uaddr_faulted:
queue_unlock(hb); futex_q_unlock(hb);
ret = fault_in_user_writeable(uaddr); ret = fault_in_user_writeable(uaddr);
if (ret) if (ret)
@ -3421,7 +3421,7 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* shared futexes. We need to compare the keys: * shared futexes. We need to compare the keys:
*/ */
if (match_futex(&q.key, &key2)) { if (match_futex(&q.key, &key2)) {
queue_unlock(hb); futex_q_unlock(hb);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }