btrfs: convert find_lock_delalloc_range() to use a folio
Instead of passing in a page for locked_page, pass in the folio instead. We only use the folio itself to validate some range assumptions, and then pass it into other functions. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
dc6c745447
commit
c987f1e6d4
3 changed files with 20 additions and 20 deletions
|
@ -304,8 +304,8 @@ static noinline int lock_delalloc_pages(struct inode *inode,
|
||||||
*/
|
*/
|
||||||
EXPORT_FOR_TESTS
|
EXPORT_FOR_TESTS
|
||||||
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
||||||
struct page *locked_page, u64 *start,
|
struct folio *locked_folio,
|
||||||
u64 *end)
|
u64 *start, u64 *end)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
|
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
|
||||||
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
@ -323,9 +323,9 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
||||||
/* Caller should pass a valid @end to indicate the search range end */
|
/* Caller should pass a valid @end to indicate the search range end */
|
||||||
ASSERT(orig_end > orig_start);
|
ASSERT(orig_end > orig_start);
|
||||||
|
|
||||||
/* The range should at least cover part of the page */
|
/* The range should at least cover part of the folio */
|
||||||
ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
|
ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
|
||||||
orig_end <= page_offset(locked_page)));
|
orig_end <= folio_pos(locked_folio)));
|
||||||
again:
|
again:
|
||||||
/* step one, find a bunch of delalloc bytes starting at start */
|
/* step one, find a bunch of delalloc bytes starting at start */
|
||||||
delalloc_start = *start;
|
delalloc_start = *start;
|
||||||
|
@ -342,25 +342,25 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* start comes from the offset of locked_page. We have to lock
|
* start comes from the offset of locked_folio. We have to lock
|
||||||
* pages in order, so we can't process delalloc bytes before
|
* folios in order, so we can't process delalloc bytes before
|
||||||
* locked_page
|
* locked_folio
|
||||||
*/
|
*/
|
||||||
if (delalloc_start < *start)
|
if (delalloc_start < *start)
|
||||||
delalloc_start = *start;
|
delalloc_start = *start;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* make sure to limit the number of pages we try to lock down
|
* make sure to limit the number of folios we try to lock down
|
||||||
*/
|
*/
|
||||||
if (delalloc_end + 1 - delalloc_start > max_bytes)
|
if (delalloc_end + 1 - delalloc_start > max_bytes)
|
||||||
delalloc_end = delalloc_start + max_bytes - 1;
|
delalloc_end = delalloc_start + max_bytes - 1;
|
||||||
|
|
||||||
/* step two, lock all the pages after the page that has start */
|
/* step two, lock all the folioss after the folios that has start */
|
||||||
ret = lock_delalloc_pages(inode, locked_page,
|
ret = lock_delalloc_pages(inode, &locked_folio->page,
|
||||||
delalloc_start, delalloc_end);
|
delalloc_start, delalloc_end);
|
||||||
ASSERT(!ret || ret == -EAGAIN);
|
ASSERT(!ret || ret == -EAGAIN);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
/* some of the pages are gone, lets avoid looping by
|
/* some of the folios are gone, lets avoid looping by
|
||||||
* shortening the size of the delalloc range we're searching
|
* shortening the size of the delalloc range we're searching
|
||||||
*/
|
*/
|
||||||
free_extent_state(cached_state);
|
free_extent_state(cached_state);
|
||||||
|
@ -384,7 +384,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
|
||||||
|
|
||||||
unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
|
unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
__unlock_for_delalloc(inode, locked_page,
|
__unlock_for_delalloc(inode, &locked_folio->page,
|
||||||
delalloc_start, delalloc_end);
|
delalloc_start, delalloc_end);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
goto again;
|
goto again;
|
||||||
|
@ -1209,7 +1209,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
|
||||||
/* Lock all (subpage) delalloc ranges inside the folio first. */
|
/* Lock all (subpage) delalloc ranges inside the folio first. */
|
||||||
while (delalloc_start < page_end) {
|
while (delalloc_start < page_end) {
|
||||||
delalloc_end = page_end;
|
delalloc_end = page_end;
|
||||||
if (!find_lock_delalloc_range(&inode->vfs_inode, &folio->page,
|
if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
|
||||||
&delalloc_start, &delalloc_end)) {
|
&delalloc_start, &delalloc_end)) {
|
||||||
delalloc_start = delalloc_end + 1;
|
delalloc_start = delalloc_end + 1;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -368,7 +368,7 @@ int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array);
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||||
bool find_lock_delalloc_range(struct inode *inode,
|
bool find_lock_delalloc_range(struct inode *inode,
|
||||||
struct page *locked_page, u64 *start,
|
struct folio *locked_folio, u64 *start,
|
||||||
u64 *end);
|
u64 *end);
|
||||||
#endif
|
#endif
|
||||||
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
|
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
|
|
|
@ -180,7 +180,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
|
||||||
set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
|
set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
|
||||||
start = 0;
|
start = 0;
|
||||||
end = start + PAGE_SIZE - 1;
|
end = start + PAGE_SIZE - 1;
|
||||||
found = find_lock_delalloc_range(inode, locked_page, &start,
|
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
|
||||||
&end);
|
&end);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
test_err("should have found at least one delalloc");
|
test_err("should have found at least one delalloc");
|
||||||
|
@ -211,7 +211,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
|
||||||
set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
|
set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
|
||||||
start = test_start;
|
start = test_start;
|
||||||
end = start + PAGE_SIZE - 1;
|
end = start + PAGE_SIZE - 1;
|
||||||
found = find_lock_delalloc_range(inode, locked_page, &start,
|
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
|
||||||
&end);
|
&end);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
test_err("couldn't find delalloc in our range");
|
test_err("couldn't find delalloc in our range");
|
||||||
|
@ -245,7 +245,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
|
||||||
}
|
}
|
||||||
start = test_start;
|
start = test_start;
|
||||||
end = start + PAGE_SIZE - 1;
|
end = start + PAGE_SIZE - 1;
|
||||||
found = find_lock_delalloc_range(inode, locked_page, &start,
|
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
|
||||||
&end);
|
&end);
|
||||||
if (found) {
|
if (found) {
|
||||||
test_err("found range when we shouldn't have");
|
test_err("found range when we shouldn't have");
|
||||||
|
@ -266,7 +266,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
|
||||||
set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
|
set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
|
||||||
start = test_start;
|
start = test_start;
|
||||||
end = start + PAGE_SIZE - 1;
|
end = start + PAGE_SIZE - 1;
|
||||||
found = find_lock_delalloc_range(inode, locked_page, &start,
|
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
|
||||||
&end);
|
&end);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
test_err("didn't find our range");
|
test_err("didn't find our range");
|
||||||
|
@ -307,7 +307,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
|
||||||
* this changes at any point in the future we will need to fix this
|
* this changes at any point in the future we will need to fix this
|
||||||
* tests expected behavior.
|
* tests expected behavior.
|
||||||
*/
|
*/
|
||||||
found = find_lock_delalloc_range(inode, locked_page, &start,
|
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
|
||||||
&end);
|
&end);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
test_err("didn't find our range");
|
test_err("didn't find our range");
|
||||||
|
|
Loading…
Reference in a new issue