radix-tree,shmem: introduce radix_tree_iter_next()

shmem likes to occasionally drop the lock, schedule, then reacqire the
lock and continue with the iteration from the last place it left off.
This is currently done with a pretty ugly goto.  Introduce
radix_tree_iter_next() and use it throughout shmem.c.

[koct9i@gmail.com: fix bug in radix_tree_iter_next() for tagged iteration]
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox 2016-03-17 14:22:06 -07:00 committed by Linus Torvalds
parent 2cf938aae1
commit 7165092fe5
2 changed files with 19 additions and 9 deletions

View file

@ -402,6 +402,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
return NULL;
}
/**
* radix_tree_iter_next - resume iterating when the chunk may be invalid
* @iter: iterator state
*
* If the iterator needs to release then reacquire a lock, the chunk may
* have been invalidated by an insertion or deletion. Call this function
* to continue the iteration from the next index.
*/
static inline __must_check
void **radix_tree_iter_next(struct radix_tree_iter *iter)
{
iter->next_index = iter->index + 1;
iter->tags = 0;
return NULL;
}
/**
* radix_tree_chunk_size - get current chunk size
*

View file

@ -376,7 +376,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
if (iter.index >= end)
break;
@ -393,8 +392,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
if (need_resched()) {
cond_resched_rcu();
start = iter.index + 1;
goto restart;
slot = radix_tree_iter_next(&iter);
}
}
@ -1944,7 +1942,6 @@ static void shmem_tag_pins(struct address_space *mapping)
start = 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
page = radix_tree_deref_slot(slot);
if (!page || radix_tree_exception(page)) {
@ -1961,8 +1958,7 @@ static void shmem_tag_pins(struct address_space *mapping)
if (need_resched()) {
cond_resched_rcu();
start = iter.index + 1;
goto restart;
slot = radix_tree_iter_next(&iter);
}
}
rcu_read_unlock();
@ -1999,7 +1995,6 @@ static int shmem_wait_for_pins(struct address_space *mapping)
start = 0;
rcu_read_lock();
restart:
radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
start, SHMEM_TAG_PINNED) {
@ -2033,8 +2028,7 @@ static int shmem_wait_for_pins(struct address_space *mapping)
continue_resched:
if (need_resched()) {
cond_resched_rcu();
start = iter.index + 1;
goto restart;
slot = radix_tree_iter_next(&iter);
}
}
rcu_read_unlock();