mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-10-02 06:06:17 +10:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: use join_transaction in btrfs_evict_inode() Btrfs - use %pU to print fsid Btrfs: fix extent state leak on failed nodatasum reads btrfs: fix unlocked access of delalloc_inodes Btrfs: avoid stack bloat in btrfs_ioctl_fs_info() btrfs: remove 64bit alignment padding to allow extent_buffer to fit into one fewer cacheline Btrfs: clear current->journal_info on async transaction commit Btrfs: make sure to recheck for bitmaps in clusters btrfs: remove unneeded includes from scrub.c btrfs: reinitialize scrub workers btrfs: scrub: errors in tree enumeration Btrfs: don't map extent buffer if path->skip_locking is set Btrfs: unlock the trans lock properly Btrfs: don't map extent buffer if path->skip_locking is set Btrfs: fix duplicate checking logic Btrfs: fix the allocator loop logic Btrfs: fix bitmap regression Btrfs: don't commit the transaction if we dont have enough pinned bytes Btrfs: noinline the cluster searching functions Btrfs: cache bitmaps when searching for a cluster
This commit is contained in:
commit
3c25fa740e
@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
u32 nr;
|
u32 nr;
|
||||||
u32 blocksize;
|
u32 blocksize;
|
||||||
u32 nscan = 0;
|
u32 nscan = 0;
|
||||||
|
bool map = true;
|
||||||
|
|
||||||
if (level != 1)
|
if (level != 1)
|
||||||
return;
|
return;
|
||||||
@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
|
|
||||||
nritems = btrfs_header_nritems(node);
|
nritems = btrfs_header_nritems(node);
|
||||||
nr = slot;
|
nr = slot;
|
||||||
|
if (node->map_token || path->skip_locking)
|
||||||
|
map = false;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (!node->map_token) {
|
if (map && !node->map_token) {
|
||||||
unsigned long offset = btrfs_node_key_ptr_offset(nr);
|
unsigned long offset = btrfs_node_key_ptr_offset(nr);
|
||||||
map_private_extent_buffer(node, offset,
|
map_private_extent_buffer(node, offset,
|
||||||
sizeof(struct btrfs_key_ptr),
|
sizeof(struct btrfs_key_ptr),
|
||||||
@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
if ((search <= target && target - search <= 65536) ||
|
if ((search <= target && target - search <= 65536) ||
|
||||||
(search > target && search - target <= 65536)) {
|
(search > target && search - target <= 65536)) {
|
||||||
gen = btrfs_node_ptr_generation(node, nr);
|
gen = btrfs_node_ptr_generation(node, nr);
|
||||||
if (node->map_token) {
|
if (map && node->map_token) {
|
||||||
unmap_extent_buffer(node, node->map_token,
|
unmap_extent_buffer(node, node->map_token,
|
||||||
KM_USER1);
|
KM_USER1);
|
||||||
node->map_token = NULL;
|
node->map_token = NULL;
|
||||||
@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
if ((nread > 65536 || nscan > 32))
|
if ((nread > 65536 || nscan > 32))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (node->map_token) {
|
if (map && node->map_token) {
|
||||||
unmap_extent_buffer(node, node->map_token, KM_USER1);
|
unmap_extent_buffer(node, node->map_token, KM_USER1);
|
||||||
node->map_token = NULL;
|
node->map_token = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
|||||||
init_waitqueue_head(&fs_info->scrub_pause_wait);
|
init_waitqueue_head(&fs_info->scrub_pause_wait);
|
||||||
init_rwsem(&fs_info->scrub_super_lock);
|
init_rwsem(&fs_info->scrub_super_lock);
|
||||||
fs_info->scrub_workers_refcnt = 0;
|
fs_info->scrub_workers_refcnt = 0;
|
||||||
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
|
|
||||||
fs_info->thread_pool_size, &fs_info->generic_worker);
|
|
||||||
|
|
||||||
sb->s_blocksize = 4096;
|
sb->s_blocksize = 4096;
|
||||||
sb->s_blocksize_bits = blksize_bits(4096);
|
sb->s_blocksize_bits = blksize_bits(4096);
|
||||||
@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&splice);
|
INIT_LIST_HEAD(&splice);
|
||||||
|
|
||||||
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
|
|
||||||
|
|
||||||
spin_lock(&root->fs_info->delalloc_lock);
|
spin_lock(&root->fs_info->delalloc_lock);
|
||||||
|
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
|
||||||
|
|
||||||
while (!list_empty(&splice)) {
|
while (!list_empty(&splice)) {
|
||||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||||
|
@ -3089,6 +3089,13 @@ alloc:
|
|||||||
}
|
}
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have less pinned bytes than we want to allocate then
|
||||||
|
* don't bother committing the transaction, it won't help us.
|
||||||
|
*/
|
||||||
|
if (data_sinfo->bytes_pinned < bytes)
|
||||||
|
committed = 1;
|
||||||
spin_unlock(&data_sinfo->lock);
|
spin_unlock(&data_sinfo->lock);
|
||||||
|
|
||||||
/* commit the current transaction and try again */
|
/* commit the current transaction and try again */
|
||||||
@ -5211,9 +5218,7 @@ loop:
|
|||||||
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
|
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
|
||||||
* again
|
* again
|
||||||
*/
|
*/
|
||||||
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
|
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
|
||||||
(found_uncached_bg || empty_size || empty_cluster ||
|
|
||||||
allowed_chunk_alloc)) {
|
|
||||||
index = 0;
|
index = 0;
|
||||||
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
|
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
|
||||||
found_uncached_bg = false;
|
found_uncached_bg = false;
|
||||||
@ -5253,32 +5258,36 @@ loop:
|
|||||||
goto search;
|
goto search;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loop < LOOP_CACHING_WAIT) {
|
|
||||||
loop++;
|
loop++;
|
||||||
goto search;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (loop == LOOP_ALLOC_CHUNK) {
|
if (loop == LOOP_ALLOC_CHUNK) {
|
||||||
empty_size = 0;
|
|
||||||
empty_cluster = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (allowed_chunk_alloc) {
|
if (allowed_chunk_alloc) {
|
||||||
ret = do_chunk_alloc(trans, root, num_bytes +
|
ret = do_chunk_alloc(trans, root, num_bytes +
|
||||||
2 * 1024 * 1024, data,
|
2 * 1024 * 1024, data,
|
||||||
CHUNK_ALLOC_LIMITED);
|
CHUNK_ALLOC_LIMITED);
|
||||||
allowed_chunk_alloc = 0;
|
allowed_chunk_alloc = 0;
|
||||||
|
if (ret == 1)
|
||||||
done_chunk_alloc = 1;
|
done_chunk_alloc = 1;
|
||||||
} else if (!done_chunk_alloc &&
|
} else if (!done_chunk_alloc &&
|
||||||
space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
|
space_info->force_alloc ==
|
||||||
|
CHUNK_ALLOC_NO_FORCE) {
|
||||||
space_info->force_alloc = CHUNK_ALLOC_LIMITED;
|
space_info->force_alloc = CHUNK_ALLOC_LIMITED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loop < LOOP_NO_EMPTY_SIZE) {
|
/*
|
||||||
loop++;
|
* We didn't allocate a chunk, go ahead and drop the
|
||||||
goto search;
|
* empty size and loop again.
|
||||||
|
*/
|
||||||
|
if (!done_chunk_alloc)
|
||||||
|
loop = LOOP_NO_EMPTY_SIZE;
|
||||||
}
|
}
|
||||||
ret = -ENOSPC;
|
|
||||||
|
if (loop == LOOP_NO_EMPTY_SIZE) {
|
||||||
|
empty_size = 0;
|
||||||
|
empty_cluster = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
goto search;
|
||||||
} else if (!ins->objectid) {
|
} else if (!ins->objectid) {
|
||||||
ret = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
} else if (ins->objectid) {
|
} else if (ins->objectid) {
|
||||||
|
@ -126,9 +126,9 @@ struct extent_buffer {
|
|||||||
unsigned long map_len;
|
unsigned long map_len;
|
||||||
struct page *first_page;
|
struct page *first_page;
|
||||||
unsigned long bflags;
|
unsigned long bflags;
|
||||||
atomic_t refs;
|
|
||||||
struct list_head leak_list;
|
struct list_head leak_list;
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
|
atomic_t refs;
|
||||||
|
|
||||||
/* the spinlock is used to protect most operations */
|
/* the spinlock is used to protect most operations */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
|||||||
pgoff_t index = 0;
|
pgoff_t index = 0;
|
||||||
unsigned long first_page_offset;
|
unsigned long first_page_offset;
|
||||||
int num_checksums;
|
int num_checksums;
|
||||||
int ret = 0, ret2;
|
int ret = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&bitmaps);
|
INIT_LIST_HEAD(&bitmaps);
|
||||||
|
|
||||||
@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
|||||||
goto free_cache;
|
goto free_cache;
|
||||||
}
|
}
|
||||||
spin_lock(&ctl->tree_lock);
|
spin_lock(&ctl->tree_lock);
|
||||||
ret2 = link_free_space(ctl, e);
|
ret = link_free_space(ctl, e);
|
||||||
ctl->total_bitmaps++;
|
ctl->total_bitmaps++;
|
||||||
ctl->op->recalc_thresholds(ctl);
|
ctl->op->recalc_thresholds(ctl);
|
||||||
spin_unlock(&ctl->tree_lock);
|
spin_unlock(&ctl->tree_lock);
|
||||||
list_add_tail(&e->list, &bitmaps);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR "Duplicate entries in "
|
printk(KERN_ERR "Duplicate entries in "
|
||||||
"free space cache, dumping\n");
|
"free space cache, dumping\n");
|
||||||
@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
goto free_cache;
|
goto free_cache;
|
||||||
}
|
}
|
||||||
|
list_add_tail(&e->list, &bitmaps);
|
||||||
}
|
}
|
||||||
|
|
||||||
num_entries--;
|
num_entries--;
|
||||||
@ -1417,6 +1417,23 @@ again:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||||
|
struct btrfs_free_space *info, u64 offset,
|
||||||
|
u64 bytes)
|
||||||
|
{
|
||||||
|
u64 bytes_to_set = 0;
|
||||||
|
u64 end;
|
||||||
|
|
||||||
|
end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
|
||||||
|
|
||||||
|
bytes_to_set = min(end - offset, bytes);
|
||||||
|
|
||||||
|
bitmap_set_bits(ctl, info, offset, bytes_to_set);
|
||||||
|
|
||||||
|
return bytes_to_set;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||||
struct btrfs_free_space *info)
|
struct btrfs_free_space *info)
|
||||||
{
|
{
|
||||||
@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct btrfs_free_space_op free_space_op = {
|
||||||
|
.recalc_thresholds = recalculate_thresholds,
|
||||||
|
.use_bitmap = use_bitmap,
|
||||||
|
};
|
||||||
|
|
||||||
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
|
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||||
struct btrfs_free_space *info)
|
struct btrfs_free_space *info)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space *bitmap_info;
|
struct btrfs_free_space *bitmap_info;
|
||||||
|
struct btrfs_block_group_cache *block_group = NULL;
|
||||||
int added = 0;
|
int added = 0;
|
||||||
u64 bytes, offset, end;
|
u64 bytes, offset, bytes_added;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bytes = info->bytes;
|
bytes = info->bytes;
|
||||||
@ -1467,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||||||
if (!ctl->op->use_bitmap(ctl, info))
|
if (!ctl->op->use_bitmap(ctl, info))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (ctl->op == &free_space_op)
|
||||||
|
block_group = ctl->private;
|
||||||
again:
|
again:
|
||||||
|
/*
|
||||||
|
* Since we link bitmaps right into the cluster we need to see if we
|
||||||
|
* have a cluster here, and if so and it has our bitmap we need to add
|
||||||
|
* the free space to that bitmap.
|
||||||
|
*/
|
||||||
|
if (block_group && !list_empty(&block_group->cluster_list)) {
|
||||||
|
struct btrfs_free_cluster *cluster;
|
||||||
|
struct rb_node *node;
|
||||||
|
struct btrfs_free_space *entry;
|
||||||
|
|
||||||
|
cluster = list_entry(block_group->cluster_list.next,
|
||||||
|
struct btrfs_free_cluster,
|
||||||
|
block_group_list);
|
||||||
|
spin_lock(&cluster->lock);
|
||||||
|
node = rb_first(&cluster->root);
|
||||||
|
if (!node) {
|
||||||
|
spin_unlock(&cluster->lock);
|
||||||
|
goto no_cluster_bitmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||||
|
if (!entry->bitmap) {
|
||||||
|
spin_unlock(&cluster->lock);
|
||||||
|
goto no_cluster_bitmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry->offset == offset_to_bitmap(ctl, offset)) {
|
||||||
|
bytes_added = add_bytes_to_bitmap(ctl, entry,
|
||||||
|
offset, bytes);
|
||||||
|
bytes -= bytes_added;
|
||||||
|
offset += bytes_added;
|
||||||
|
}
|
||||||
|
spin_unlock(&cluster->lock);
|
||||||
|
if (!bytes) {
|
||||||
|
ret = 1;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
no_cluster_bitmap:
|
||||||
bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
|
bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
|
||||||
1, 0);
|
1, 0);
|
||||||
if (!bitmap_info) {
|
if (!bitmap_info) {
|
||||||
@ -1475,19 +1540,10 @@ again:
|
|||||||
goto new_bitmap;
|
goto new_bitmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
|
bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
|
||||||
|
bytes -= bytes_added;
|
||||||
if (offset >= bitmap_info->offset && offset + bytes > end) {
|
offset += bytes_added;
|
||||||
bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
|
|
||||||
bytes -= end - offset;
|
|
||||||
offset = end;
|
|
||||||
added = 0;
|
added = 0;
|
||||||
} else if (offset >= bitmap_info->offset && offset + bytes <= end) {
|
|
||||||
bitmap_set_bits(ctl, bitmap_info, offset, bytes);
|
|
||||||
bytes = 0;
|
|
||||||
} else {
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bytes) {
|
if (!bytes) {
|
||||||
ret = 1;
|
ret = 1;
|
||||||
@ -1766,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
|||||||
"\n", count);
|
"\n", count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct btrfs_free_space_op free_space_op = {
|
|
||||||
.recalc_thresholds = recalculate_thresholds,
|
|
||||||
.use_bitmap = use_bitmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
|
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||||
@ -2142,9 +2193,11 @@ again:
|
|||||||
/*
|
/*
|
||||||
* This searches the block group for just extents to fill the cluster with.
|
* This searches the block group for just extents to fill the cluster with.
|
||||||
*/
|
*/
|
||||||
static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
static noinline int
|
||||||
|
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
struct btrfs_free_cluster *cluster,
|
struct btrfs_free_cluster *cluster,
|
||||||
u64 offset, u64 bytes, u64 min_bytes)
|
struct list_head *bitmaps, u64 offset, u64 bytes,
|
||||||
|
u64 min_bytes)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||||
struct btrfs_free_space *first = NULL;
|
struct btrfs_free_space *first = NULL;
|
||||||
@ -2166,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
|||||||
* extent entry.
|
* extent entry.
|
||||||
*/
|
*/
|
||||||
while (entry->bitmap) {
|
while (entry->bitmap) {
|
||||||
|
if (list_empty(&entry->list))
|
||||||
|
list_add_tail(&entry->list, bitmaps);
|
||||||
node = rb_next(&entry->offset_index);
|
node = rb_next(&entry->offset_index);
|
||||||
if (!node)
|
if (!node)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
@ -2185,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
|||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||||
|
|
||||||
if (entry->bitmap)
|
if (entry->bitmap) {
|
||||||
|
if (list_empty(&entry->list))
|
||||||
|
list_add_tail(&entry->list, bitmaps);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we haven't filled the empty size and the window is
|
* we haven't filled the empty size and the window is
|
||||||
* very large. reset and try again
|
* very large. reset and try again
|
||||||
@ -2238,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
|||||||
* This specifically looks for bitmaps that may work in the cluster, we assume
|
* This specifically looks for bitmaps that may work in the cluster, we assume
|
||||||
* that we have already failed to find extents that will work.
|
* that we have already failed to find extents that will work.
|
||||||
*/
|
*/
|
||||||
static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
static noinline int
|
||||||
|
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
struct btrfs_free_cluster *cluster,
|
struct btrfs_free_cluster *cluster,
|
||||||
u64 offset, u64 bytes, u64 min_bytes)
|
struct list_head *bitmaps, u64 offset, u64 bytes,
|
||||||
|
u64 min_bytes)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||||
struct btrfs_free_space *entry;
|
struct btrfs_free_space *entry;
|
||||||
@ -2250,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
|||||||
if (ctl->total_bitmaps == 0)
|
if (ctl->total_bitmaps == 0)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First check our cached list of bitmaps and see if there is an entry
|
||||||
|
* here that will work.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(entry, bitmaps, list) {
|
||||||
|
if (entry->bytes < min_bytes)
|
||||||
|
continue;
|
||||||
|
ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
|
||||||
|
bytes, min_bytes);
|
||||||
|
if (!ret)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we do have entries on our list and we are here then we didn't find
|
||||||
|
* anything, so go ahead and get the next entry after the last entry in
|
||||||
|
* this list and start the search from there.
|
||||||
|
*/
|
||||||
|
if (!list_empty(bitmaps)) {
|
||||||
|
entry = list_entry(bitmaps->prev, struct btrfs_free_space,
|
||||||
|
list);
|
||||||
|
node = rb_next(&entry->offset_index);
|
||||||
|
if (!node)
|
||||||
|
return -ENOSPC;
|
||||||
|
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||||
|
goto search;
|
||||||
|
}
|
||||||
|
|
||||||
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
|
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
search:
|
||||||
node = &entry->offset_index;
|
node = &entry->offset_index;
|
||||||
do {
|
do {
|
||||||
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
entry = rb_entry(node, struct btrfs_free_space, offset_index);
|
||||||
@ -2284,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
|
|||||||
u64 offset, u64 bytes, u64 empty_size)
|
u64 offset, u64 bytes, u64 empty_size)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||||
|
struct list_head bitmaps;
|
||||||
|
struct btrfs_free_space *entry, *tmp;
|
||||||
u64 min_bytes;
|
u64 min_bytes;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2322,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
|
INIT_LIST_HEAD(&bitmaps);
|
||||||
min_bytes);
|
ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
|
||||||
if (ret)
|
|
||||||
ret = setup_cluster_bitmap(block_group, cluster, offset,
|
|
||||||
bytes, min_bytes);
|
bytes, min_bytes);
|
||||||
|
if (ret)
|
||||||
|
ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
|
||||||
|
offset, bytes, min_bytes);
|
||||||
|
|
||||||
|
/* Clear our temporary list */
|
||||||
|
list_for_each_entry_safe(entry, tmp, &bitmaps, list)
|
||||||
|
list_del_init(&entry->list);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
atomic_inc(&block_group->count);
|
atomic_inc(&block_group->count);
|
||||||
|
@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
|
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
|
||||||
return 0;
|
goto good;
|
||||||
|
|
||||||
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
||||||
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
|
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
|
||||||
@ -3646,7 +3646,7 @@ void btrfs_evict_inode(struct inode *inode)
|
|||||||
btrfs_i_size_write(inode, 0);
|
btrfs_i_size_write(inode, 0);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_join_transaction(root);
|
||||||
BUG_ON(IS_ERR(trans));
|
BUG_ON(IS_ERR(trans));
|
||||||
trans->block_rsv = root->orphan_block_rsv;
|
trans->block_rsv = root->orphan_block_rsv;
|
||||||
|
|
||||||
|
@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
|
|||||||
|
|
||||||
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
|
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
|
||||||
{
|
{
|
||||||
struct btrfs_ioctl_fs_info_args fi_args;
|
struct btrfs_ioctl_fs_info_args *fi_args;
|
||||||
struct btrfs_device *device;
|
struct btrfs_device *device;
|
||||||
struct btrfs_device *next;
|
struct btrfs_device *next;
|
||||||
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
|
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
fi_args.num_devices = fs_devices->num_devices;
|
fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
|
||||||
fi_args.max_id = 0;
|
if (!fi_args)
|
||||||
memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
|
return -ENOMEM;
|
||||||
|
|
||||||
|
fi_args->num_devices = fs_devices->num_devices;
|
||||||
|
memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
|
||||||
|
|
||||||
mutex_lock(&fs_devices->device_list_mutex);
|
mutex_lock(&fs_devices->device_list_mutex);
|
||||||
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
|
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
|
||||||
if (device->devid > fi_args.max_id)
|
if (device->devid > fi_args->max_id)
|
||||||
fi_args.max_id = device->devid;
|
fi_args->max_id = device->devid;
|
||||||
}
|
}
|
||||||
mutex_unlock(&fs_devices->device_list_mutex);
|
mutex_unlock(&fs_devices->device_list_mutex);
|
||||||
|
|
||||||
if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
|
if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
|
||||||
return -EFAULT;
|
ret = -EFAULT;
|
||||||
|
|
||||||
return 0;
|
kfree(fi_args);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
|
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
|
||||||
|
@ -16,13 +16,7 @@
|
|||||||
* Boston, MA 021110-1307, USA.
|
* Boston, MA 021110-1307, USA.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/pagemap.h>
|
|
||||||
#include <linux/writeback.h>
|
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/rbtree.h>
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include <linux/workqueue.h>
|
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "volumes.h"
|
#include "volumes.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
@ -804,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
|
|||||||
|
|
||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out_noplug;
|
||||||
|
|
||||||
l = path->nodes[0];
|
|
||||||
slot = path->slots[0];
|
|
||||||
btrfs_item_key_to_cpu(l, &key, slot);
|
|
||||||
if (key.objectid != logical) {
|
|
||||||
ret = btrfs_previous_item(root, path, 0,
|
|
||||||
BTRFS_EXTENT_ITEM_KEY);
|
|
||||||
if (ret < 0)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we might miss half an extent here, but that doesn't matter,
|
||||||
|
* as it's only the prefetch
|
||||||
|
*/
|
||||||
while (1) {
|
while (1) {
|
||||||
l = path->nodes[0];
|
l = path->nodes[0];
|
||||||
slot = path->slots[0];
|
slot = path->slots[0];
|
||||||
@ -824,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
|
|||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
continue;
|
continue;
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out_noplug;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -906,15 +894,20 @@ again:
|
|||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
if (ret > 0) {
|
||||||
l = path->nodes[0];
|
|
||||||
slot = path->slots[0];
|
|
||||||
btrfs_item_key_to_cpu(l, &key, slot);
|
|
||||||
if (key.objectid != logical) {
|
|
||||||
ret = btrfs_previous_item(root, path, 0,
|
ret = btrfs_previous_item(root, path, 0,
|
||||||
BTRFS_EXTENT_ITEM_KEY);
|
BTRFS_EXTENT_ITEM_KEY);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
if (ret > 0) {
|
||||||
|
/* there's no smaller item, so stick with the
|
||||||
|
* larger one */
|
||||||
|
btrfs_release_path(path);
|
||||||
|
ret = btrfs_search_slot(NULL, root, &key,
|
||||||
|
path, 0, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
@ -989,6 +982,7 @@ next:
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
out_noplug:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
@ -1064,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
|
|||||||
while (1) {
|
while (1) {
|
||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
break;
|
||||||
ret = 0;
|
if (ret > 0) {
|
||||||
|
if (path->slots[0] >=
|
||||||
|
btrfs_header_nritems(path->nodes[0])) {
|
||||||
|
ret = btrfs_next_leaf(root, path);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
l = path->nodes[0];
|
l = path->nodes[0];
|
||||||
slot = path->slots[0];
|
slot = path->slots[0];
|
||||||
@ -1075,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
|
|||||||
if (found_key.objectid != sdev->dev->devid)
|
if (found_key.objectid != sdev->dev->devid)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
|
if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (found_key.offset >= end)
|
if (found_key.offset >= end)
|
||||||
@ -1104,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
|
|||||||
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
||||||
if (!cache) {
|
if (!cache) {
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
goto out;
|
break;
|
||||||
}
|
}
|
||||||
ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
|
ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
|
||||||
chunk_offset, length);
|
chunk_offset, length);
|
||||||
@ -1116,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
|
|||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return ret;
|
|
||||||
|
/*
|
||||||
|
* ret can still be 1 from search_slot or next_leaf,
|
||||||
|
* that's not an error
|
||||||
|
*/
|
||||||
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
|
static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
|
||||||
@ -1155,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
|
|||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
|
|
||||||
mutex_lock(&fs_info->scrub_lock);
|
mutex_lock(&fs_info->scrub_lock);
|
||||||
if (fs_info->scrub_workers_refcnt == 0)
|
if (fs_info->scrub_workers_refcnt == 0) {
|
||||||
|
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
|
||||||
|
fs_info->thread_pool_size, &fs_info->generic_worker);
|
||||||
|
fs_info->scrub_workers.idle_thresh = 4;
|
||||||
btrfs_start_workers(&fs_info->scrub_workers, 1);
|
btrfs_start_workers(&fs_info->scrub_workers, 1);
|
||||||
|
}
|
||||||
++fs_info->scrub_workers_refcnt;
|
++fs_info->scrub_workers_refcnt;
|
||||||
mutex_unlock(&fs_info->scrub_lock);
|
mutex_unlock(&fs_info->scrub_lock);
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
|
|||||||
list) {
|
list) {
|
||||||
if (t->in_commit) {
|
if (t->in_commit) {
|
||||||
if (t->commit_done)
|
if (t->commit_done)
|
||||||
goto out;
|
break;
|
||||||
cur_trans = t;
|
cur_trans = t;
|
||||||
atomic_inc(&cur_trans->use_count);
|
atomic_inc(&cur_trans->use_count);
|
||||||
break;
|
break;
|
||||||
@ -1118,8 +1118,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
|||||||
wait_current_trans_commit_start_and_unblock(root, cur_trans);
|
wait_current_trans_commit_start_and_unblock(root, cur_trans);
|
||||||
else
|
else
|
||||||
wait_current_trans_commit_start(root, cur_trans);
|
wait_current_trans_commit_start(root, cur_trans);
|
||||||
put_transaction(cur_trans);
|
|
||||||
|
|
||||||
|
if (current->journal_info == trans)
|
||||||
|
current->journal_info = NULL;
|
||||||
|
|
||||||
|
put_transaction(cur_trans);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
|||||||
transid = btrfs_super_generation(disk_super);
|
transid = btrfs_super_generation(disk_super);
|
||||||
if (disk_super->label[0])
|
if (disk_super->label[0])
|
||||||
printk(KERN_INFO "device label %s ", disk_super->label);
|
printk(KERN_INFO "device label %s ", disk_super->label);
|
||||||
else {
|
else
|
||||||
/* FIXME, make a readl uuid parser */
|
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
|
||||||
printk(KERN_INFO "device fsid %llx-%llx ",
|
|
||||||
*(unsigned long long *)disk_super->fsid,
|
|
||||||
*(unsigned long long *)(disk_super->fsid + 8));
|
|
||||||
}
|
|
||||||
printk(KERN_CONT "devid %llu transid %llu %s\n",
|
printk(KERN_CONT "devid %llu transid %llu %s\n",
|
||||||
(unsigned long long)devid, (unsigned long long)transid, path);
|
(unsigned long long)devid, (unsigned long long)transid, path);
|
||||||
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
|
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
|
||||||
|
Loading…
Reference in New Issue
Block a user