mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-10-05 04:11:18 +10:00
Changes since last update:
- Address cache aliasing for mappable page cache folios; - Allow readdir() to be interrupted; - Fix large fragment handling which was errored out by mistake; - Add missing tracepoints; - Use memcpy_to_folio() to replace copy_to_iter() for inline data. -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEQ0A6bDUS9Y+83NPFUXZn5Zlu5qoFAmhyeJQRHHhpYW5nQGtl cm5lbC5vcmcACgkQUXZn5Zlu5qqlBBAAgPWmt8AqJBa+8BRI2VzM7dAygHODp14d 2m0NojMiONLh+vveCt/BTLnapqiOrnfUP9HXCzFjULClzLAjm7zUe3y1m304WGT+ WIgQpu6ZKEvoMLKAPWEjGmevixX6W3eeGSjoKJv8XUHBhLrH2QdLGu7GoM1j1Qk4 mf40VvzAyA7HkCf3jFOo7BOhMhzuAWfCGy+lMN4taDK+eQ3kpcola60Sjy0pUrew HHH4qFDO/wJ1Mh5DVFFcH82QBVFNuNlbqY/0twyENrPuDUSrnbTgXTIHjNYsdO5p kWSHQMBEPS9R4vJBYUG8yKWGR1nVT3MCfm8e0eebawazLiKBbTTRa9PHTdzC2w9F gVyMcJBSPtZTera4z+KoZVSBXU7Om0YS7TZdFAbocrMv06/l/F88mlbsy0b+uHRU k0WcyMmR+TbdJicsQ57jJ1xoNBpe12NDtoLjeCZLhC0Sd9bNS2LkxzthqQk33v/I 8SqzGoTyISyxALGZm07HI+e4GBTmGAgKjJEAEjcFRl5pFQivExJq59lg2Gp4vUo5 DD2ZN3uENERpPBrXFmXpDLwDYCBoZYUJCOfByr5zwBhy8/JjtKwXT0Bkcr6QQ+pT 8rraONl56ijBv4n6AjnjVM4ZScvoBEynAgYZnYAJ8tprix81+MQv8yx+iTKXQT5q AujV/p1p+lQ= =7VXc -----END PGP SIGNATURE----- Merge tag 'erofs-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull erofs fixes from Gao Xiang: "Fix for a cache aliasing issue by adding missing flush_dcache_folio(), which causes execution failures on some arm32 setups. Fix for large compressed fragments, which could be generated by -Eall-fragments option (but should be rare) and was rejected by mistake due to an on-disk hardening commit. The remaining ones are small fixes. Summary: - Address cache aliasing for mappable page cache folios - Allow readdir() to be interrupted - Fix large fragment handling which was errored out by mistake - Add missing tracepoints - Use memcpy_to_folio() to replace copy_to_iter() for inline data" * tag 'erofs-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: fix large fragment handling erofs: allow readdir() to be interrupted erofs: address D-cache aliasing erofs: use memcpy_to_folio() to replace copy_to_iter() erofs: fix to add missing tracepoint in erofs_read_folio() erofs: fix to add missing tracepoint in erofs_readahead()
This commit is contained in:
commit
3b428e1cfc
@ -214,9 +214,11 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
|
||||
|
||||
/*
|
||||
* bit 30: I/O error occurred on this folio
|
||||
* bit 29: CPU has dirty data in D-cache (needs aliasing handling);
|
||||
* bit 0 - 29: remaining parts to complete this folio
|
||||
*/
|
||||
#define EROFS_ONLINEFOLIO_EIO (1 << 30)
|
||||
#define EROFS_ONLINEFOLIO_EIO 30
|
||||
#define EROFS_ONLINEFOLIO_DIRTY 29
|
||||
|
||||
void erofs_onlinefolio_init(struct folio *folio)
|
||||
{
|
||||
@ -233,19 +235,23 @@ void erofs_onlinefolio_split(struct folio *folio)
|
||||
atomic_inc((atomic_t *)&folio->private);
|
||||
}
|
||||
|
||||
void erofs_onlinefolio_end(struct folio *folio, int err)
|
||||
void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
|
||||
{
|
||||
int orig, v;
|
||||
|
||||
do {
|
||||
orig = atomic_read((atomic_t *)&folio->private);
|
||||
v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
|
||||
DBG_BUGON(orig <= 0);
|
||||
v = dirty << EROFS_ONLINEFOLIO_DIRTY;
|
||||
v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
|
||||
} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
|
||||
|
||||
if (v & ~EROFS_ONLINEFOLIO_EIO)
|
||||
if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
|
||||
return;
|
||||
folio->private = 0;
|
||||
folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
|
||||
if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
|
||||
flush_dcache_folio(folio);
|
||||
folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
|
||||
}
|
||||
|
||||
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
@ -351,11 +357,16 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
*/
|
||||
static int erofs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
trace_erofs_read_folio(folio, true);
|
||||
|
||||
return iomap_read_folio(folio, &erofs_iomap_ops);
|
||||
}
|
||||
|
||||
static void erofs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
|
||||
readahead_count(rac), true);
|
||||
|
||||
return iomap_readahead(rac, &erofs_iomap_ops);
|
||||
}
|
||||
|
||||
|
@ -301,13 +301,11 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
cur = min(cur, rq->outputsize);
|
||||
if (cur && rq->out[0]) {
|
||||
kin = kmap_local_page(rq->in[nrpages_in - 1]);
|
||||
if (rq->out[0] == rq->in[nrpages_in - 1]) {
|
||||
if (rq->out[0] == rq->in[nrpages_in - 1])
|
||||
memmove(kin + rq->pageofs_out, kin + pi, cur);
|
||||
flush_dcache_page(rq->out[0]);
|
||||
} else {
|
||||
else
|
||||
memcpy_to_page(rq->out[0], rq->pageofs_out,
|
||||
kin + pi, cur);
|
||||
}
|
||||
kunmap_local(kin);
|
||||
}
|
||||
rq->outputsize -= cur;
|
||||
@ -325,14 +323,12 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
|
||||
DBG_BUGON(no >= nrpages_out);
|
||||
cnt = min(insz - pi, PAGE_SIZE - po);
|
||||
if (rq->out[no] == rq->in[ni]) {
|
||||
if (rq->out[no] == rq->in[ni])
|
||||
memmove(kin + po,
|
||||
kin + rq->pageofs_in + pi, cnt);
|
||||
flush_dcache_page(rq->out[no]);
|
||||
} else if (rq->out[no]) {
|
||||
else if (rq->out[no])
|
||||
memcpy_to_page(rq->out[no], po,
|
||||
kin + rq->pageofs_in + pi, cnt);
|
||||
}
|
||||
pi += cnt;
|
||||
} while (pi < insz);
|
||||
kunmap_local(kin);
|
||||
|
@ -58,6 +58,11 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
|
||||
struct erofs_dirent *de;
|
||||
unsigned int nameoff, maxsize;
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
de = erofs_bread(&buf, dbstart, true);
|
||||
if (IS_ERR(de)) {
|
||||
erofs_err(sb, "failed to readdir of logical block %llu of nid %llu",
|
||||
@ -88,6 +93,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
|
||||
break;
|
||||
ctx->pos = dbstart + maxsize;
|
||||
ofs = 0;
|
||||
cond_resched();
|
||||
}
|
||||
erofs_put_metabuf(&buf);
|
||||
if (EROFS_I(dir)->dot_omitted && ctx->pos == dir->i_size) {
|
||||
|
@ -38,7 +38,7 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
|
||||
} else {
|
||||
bio_for_each_folio_all(fi, &rq->bio) {
|
||||
DBG_BUGON(folio_test_uptodate(fi.folio));
|
||||
erofs_onlinefolio_end(fi.folio, ret);
|
||||
erofs_onlinefolio_end(fi.folio, ret, false);
|
||||
}
|
||||
}
|
||||
bio_uninit(&rq->bio);
|
||||
@ -96,8 +96,6 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
|
||||
struct erofs_map_blocks *map = &io->map;
|
||||
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
|
||||
loff_t pos = folio_pos(folio), ofs;
|
||||
struct iov_iter iter;
|
||||
struct bio_vec bv;
|
||||
int err = 0;
|
||||
|
||||
erofs_onlinefolio_init(folio);
|
||||
@ -122,13 +120,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
|
||||
err = PTR_ERR(src);
|
||||
break;
|
||||
}
|
||||
bvec_set_folio(&bv, folio, len, cur);
|
||||
iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
|
||||
if (copy_to_iter(src, len, &iter) != len) {
|
||||
erofs_put_metabuf(&buf);
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
memcpy_to_folio(folio, cur, src, len);
|
||||
erofs_put_metabuf(&buf);
|
||||
} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
|
||||
folio_zero_segment(folio, cur, cur + len);
|
||||
@ -162,7 +154,7 @@ io_retry:
|
||||
}
|
||||
cur += len;
|
||||
}
|
||||
erofs_onlinefolio_end(folio, err);
|
||||
erofs_onlinefolio_end(folio, err, false);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -315,10 +315,12 @@ static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
|
||||
/* The length of extent is full */
|
||||
#define EROFS_MAP_FULL_MAPPED 0x0008
|
||||
/* Located in the special packed inode */
|
||||
#define EROFS_MAP_FRAGMENT 0x0010
|
||||
#define __EROFS_MAP_FRAGMENT 0x0010
|
||||
/* The extent refers to partial decompressed data */
|
||||
#define EROFS_MAP_PARTIAL_REF 0x0020
|
||||
|
||||
#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
|
||||
|
||||
struct erofs_map_blocks {
|
||||
struct erofs_buf buf;
|
||||
|
||||
@ -390,7 +392,7 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
|
||||
void erofs_onlinefolio_init(struct folio *folio);
|
||||
void erofs_onlinefolio_split(struct folio *folio);
|
||||
void erofs_onlinefolio_end(struct folio *folio, int err);
|
||||
void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty);
|
||||
struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
|
||||
int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
|
||||
struct kstat *stat, u32 request_mask,
|
||||
|
@ -1034,7 +1034,7 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f,
|
||||
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
|
||||
folio_zero_segment(folio, cur, end);
|
||||
tight = false;
|
||||
} else if (map->m_flags & EROFS_MAP_FRAGMENT) {
|
||||
} else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
|
||||
erofs_off_t fpos = offset + cur - map->m_la;
|
||||
|
||||
err = z_erofs_read_fragment(inode->i_sb, folio, cur,
|
||||
@ -1091,7 +1091,7 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f,
|
||||
tight = (bs == PAGE_SIZE);
|
||||
}
|
||||
} while ((end = cur) > 0);
|
||||
erofs_onlinefolio_end(folio, err);
|
||||
erofs_onlinefolio_end(folio, err, false);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1196,7 +1196,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
|
||||
cur += len;
|
||||
}
|
||||
kunmap_local(dst);
|
||||
erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
|
||||
erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true);
|
||||
list_del(p);
|
||||
kfree(bvi);
|
||||
}
|
||||
@ -1355,7 +1355,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
|
||||
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
if (!z_erofs_is_shortlived_page(page)) {
|
||||
erofs_onlinefolio_end(page_folio(page), err);
|
||||
erofs_onlinefolio_end(page_folio(page), err, true);
|
||||
continue;
|
||||
}
|
||||
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
|
||||
|
@ -413,8 +413,7 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
|
||||
!vi->z_tailextent_headlcn) {
|
||||
map->m_la = 0;
|
||||
map->m_llen = inode->i_size;
|
||||
map->m_flags = EROFS_MAP_MAPPED |
|
||||
EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
|
||||
map->m_flags = EROFS_MAP_FRAGMENT;
|
||||
return 0;
|
||||
}
|
||||
initial_lcn = ofs >> lclusterbits;
|
||||
@ -489,7 +488,7 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
|
||||
goto unmap_out;
|
||||
}
|
||||
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
|
||||
map->m_flags |= EROFS_MAP_FRAGMENT;
|
||||
map->m_flags = EROFS_MAP_FRAGMENT;
|
||||
} else {
|
||||
map->m_pa = erofs_pos(sb, m.pblk);
|
||||
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
|
||||
@ -617,7 +616,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
|
||||
if (lstart < lend) {
|
||||
map->m_la = lstart;
|
||||
if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
|
||||
map->m_flags |= EROFS_MAP_MAPPED | EROFS_MAP_FRAGMENT;
|
||||
map->m_flags = EROFS_MAP_FRAGMENT;
|
||||
vi->z_fragmentoff = map->m_plen;
|
||||
if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
|
||||
vi->z_fragmentoff |= map->m_pa << 32;
|
||||
@ -797,7 +796,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
|
||||
iomap->length = map.m_llen;
|
||||
if (map.m_flags & EROFS_MAP_MAPPED) {
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
|
||||
iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
|
||||
IOMAP_NULL_ADDR : map.m_pa;
|
||||
} else {
|
||||
iomap->type = IOMAP_HOLE;
|
||||
|
Loading…
Reference in New Issue
Block a user