mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-09-13 11:07:46 +10:00
block: fix race between set_blocksize and read paths
[ Upstream commit c0e473a0d2
]
With the new large sector size support, it's now the case that
set_blocksize can change i_blksize and the folio order in a manner that
conflicts with a concurrent reader and causes a kernel crash.
Specifically, let's say that udev-worker calls libblkid to detect the
labels on a block device. The read call can create an order-0 folio to
read the first 4096 bytes from the disk. But then udev is preempted.
Next, someone tries to mount an 8k-sectorsize filesystem from the same
block device. The filesystem calls set_blksize, which sets i_blksize to
8192 and the minimum folio order to 1.
Now udev resumes, still holding the order-0 folio it allocated. It then
tries to schedule a read bio and do_mpage_readahead tries to create
bufferheads for the folio. Unfortunately, blocks_per_folio == 0 because
the page size is 4096 but the blocksize is 8192 so no bufferheads are
attached and the bh walk never sets bdev. We then submit the bio with a
NULL block device and crash.
Therefore, truncate the page cache after flushing but before updating
i_blksize. However, that's not enough -- we also need to lock out file
IO and page faults during the update. Take both the i_rwsem and the
invalidate_lock in exclusive mode for invalidations, and in shared mode
for read/write operations.
I don't know if this is the correct fix, but xfs/259 found it.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Link: https://lore.kernel.org/r/174543795699.4139148.2086129139322431423.stgit@frogsfrogsfrogs
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
e9f646f089
commit
64f505b08e
17
block/bdev.c
17
block/bdev.c
@ -168,9 +168,26 @@ int set_blocksize(struct file *file, int size)
|
||||
|
||||
/* Don't change the size if it is same as current */
|
||||
if (inode->i_blkbits != blksize_bits(size)) {
|
||||
/*
|
||||
* Flush and truncate the pagecache before we reconfigure the
|
||||
* mapping geometry because folio sizes are variable now. If a
|
||||
* reader has already allocated a folio whose size is smaller
|
||||
* than the new min_order but invokes readahead after the new
|
||||
* min_order becomes visible, readahead will think there are
|
||||
* "zero" blocks per folio and crash. Take the inode and
|
||||
* invalidation locks to avoid racing with
|
||||
* read/write/fallocate.
|
||||
*/
|
||||
inode_lock(inode);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
|
||||
inode->i_blkbits = blksize_bits(size);
|
||||
kill_bdev(bdev);
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -347,6 +347,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
||||
op = REQ_OP_ZONE_RESET;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
if (ret)
|
||||
@ -368,8 +369,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
||||
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
|
||||
|
||||
fail:
|
||||
if (cmd == BLKRESETZONE)
|
||||
if (cmd == BLKRESETZONE) {
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
16
block/fops.c
16
block/fops.c
@ -721,7 +721,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
ret = direct_write_fallback(iocb, from, ret,
|
||||
blkdev_buffered_write(iocb, from));
|
||||
} else {
|
||||
/*
|
||||
* Take i_rwsem and invalidate_lock to avoid racing with
|
||||
* set_blocksize changing i_blkbits/folio order and punching
|
||||
* out the pagecache.
|
||||
*/
|
||||
inode_lock_shared(bd_inode);
|
||||
ret = blkdev_buffered_write(iocb, from);
|
||||
inode_unlock_shared(bd_inode);
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
@ -732,6 +739,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
|
||||
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
|
||||
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
@ -768,7 +776,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
|
||||
* changing i_blkbits/folio order and punching out the pagecache.
|
||||
*/
|
||||
inode_lock_shared(bd_inode);
|
||||
ret = filemap_read(iocb, to, ret);
|
||||
inode_unlock_shared(bd_inode);
|
||||
|
||||
reexpand:
|
||||
if (unlikely(shorted))
|
||||
@ -811,6 +825,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
inode_lock(inode);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
/*
|
||||
@ -843,6 +858,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
inode_unlock(inode);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -141,6 +141,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
|
||||
if (err)
|
||||
@ -173,6 +174,7 @@ out_unplug:
|
||||
blk_finish_plug(&plug);
|
||||
fail:
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -198,12 +200,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
|
||||
end > bdev_nr_bytes(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, end - 1);
|
||||
if (!err)
|
||||
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
|
||||
GFP_KERNEL);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -235,6 +239,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
||||
return -EINVAL;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages */
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, end);
|
||||
if (err)
|
||||
@ -245,6 +250,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
||||
|
||||
fail:
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user