mirror of
				https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
				synced 2025-11-04 07:44:51 +10:00 
			
		
		
		
	mm/fs: add hooks to support cleancache
This fourth patch of eight in this cleancache series provides the core hooks in VFS for: initializing cleancache per filesystem; capturing clean pages reclaimed by page cache; attempting to get pages from cleancache before filesystem read; and ensuring coherency between pagecache, disk, and cleancache. Note that the placement of these hooks was stable from 2.6.18 to 2.6.38; a minor semantic change was required due to a patchset in 2.6.39. All hooks become no-ops if CONFIG_CLEANCACHE is unset, or become a check of a boolean global if CONFIG_CLEANCACHE is set but no cleancache "backend" has claimed cleancache_ops. Details and a FAQ can be found in Documentation/vm/cleancache.txt [v8: minchan.kim@gmail.com: adapt to new remove_from_page_cache function] Signed-off-by: Chris Mason <chris.mason@oracle.com> Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Reviewed-by: Jeremy Fitzhardinge <jeremy@goop.org> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik Van Riel <riel@redhat.com> Cc: Jan Beulich <JBeulich@novell.com> Cc: Andreas Dilger <adilger@sun.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <joel.becker@oracle.com> Cc: Nitin Gupta <ngupta@vflare.org>
This commit is contained in:
		
							parent
							
								
									077b1f83a6
								
							
						
					
					
						commit
						c515e1fd36
					
				@ -41,6 +41,7 @@
 | 
				
			|||||||
#include <linux/bitops.h>
 | 
					#include <linux/bitops.h>
 | 
				
			||||||
#include <linux/mpage.h>
 | 
					#include <linux/mpage.h>
 | 
				
			||||||
#include <linux/bit_spinlock.h>
 | 
					#include <linux/bit_spinlock.h>
 | 
				
			||||||
 | 
					#include <linux/cleancache.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 | 
					static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev)
 | 
				
			|||||||
	invalidate_bh_lrus();
 | 
						invalidate_bh_lrus();
 | 
				
			||||||
	lru_add_drain_all();	/* make sure all lru add caches are flushed */
 | 
						lru_add_drain_all();	/* make sure all lru add caches are flushed */
 | 
				
			||||||
	invalidate_mapping_pages(mapping, 0, -1);
 | 
						invalidate_mapping_pages(mapping, 0, -1);
 | 
				
			||||||
 | 
						/* 99% of the time, we don't need to flush the cleancache on the bdev.
 | 
				
			||||||
 | 
						 * But, for the strange corners, lets be cautious
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						cleancache_flush_inode(mapping);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(invalidate_bdev);
 | 
					EXPORT_SYMBOL(invalidate_bdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -27,6 +27,7 @@
 | 
				
			|||||||
#include <linux/writeback.h>
 | 
					#include <linux/writeback.h>
 | 
				
			||||||
#include <linux/backing-dev.h>
 | 
					#include <linux/backing-dev.h>
 | 
				
			||||||
#include <linux/pagevec.h>
 | 
					#include <linux/pagevec.h>
 | 
				
			||||||
 | 
					#include <linux/cleancache.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * I/O completion handler for multipage BIOs.
 | 
					 * I/O completion handler for multipage BIOs.
 | 
				
			||||||
@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 | 
				
			|||||||
		SetPageMappedToDisk(page);
 | 
							SetPageMappedToDisk(page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
 | 
				
			||||||
 | 
						    cleancache_get_page(page) == 0) {
 | 
				
			||||||
 | 
							SetPageUptodate(page);
 | 
				
			||||||
 | 
							goto confused;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * This page will go to BIO.  Do we need to send this BIO off first?
 | 
						 * This page will go to BIO.  Do we need to send this BIO off first?
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 | 
				
			|||||||
@ -31,6 +31,7 @@
 | 
				
			|||||||
#include <linux/mutex.h>
 | 
					#include <linux/mutex.h>
 | 
				
			||||||
#include <linux/backing-dev.h>
 | 
					#include <linux/backing-dev.h>
 | 
				
			||||||
#include <linux/rculist_bl.h>
 | 
					#include <linux/rculist_bl.h>
 | 
				
			||||||
 | 
					#include <linux/cleancache.h>
 | 
				
			||||||
#include "internal.h"
 | 
					#include "internal.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
 | 
				
			|||||||
		s->s_maxbytes = MAX_NON_LFS;
 | 
							s->s_maxbytes = MAX_NON_LFS;
 | 
				
			||||||
		s->s_op = &default_op;
 | 
							s->s_op = &default_op;
 | 
				
			||||||
		s->s_time_gran = 1000000000;
 | 
							s->s_time_gran = 1000000000;
 | 
				
			||||||
 | 
							s->cleancache_poolid = -1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	return s;
 | 
						return s;
 | 
				
			||||||
@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
	struct file_system_type *fs = s->s_type;
 | 
						struct file_system_type *fs = s->s_type;
 | 
				
			||||||
	if (atomic_dec_and_test(&s->s_active)) {
 | 
						if (atomic_dec_and_test(&s->s_active)) {
 | 
				
			||||||
 | 
							cleancache_flush_fs(s);
 | 
				
			||||||
		fs->kill_sb(s);
 | 
							fs->kill_sb(s);
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * We need to call rcu_barrier so all the delayed rcu free
 | 
							 * We need to call rcu_barrier so all the delayed rcu free
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										11
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								mm/filemap.c
									
									
									
									
									
								
							@ -34,6 +34,7 @@
 | 
				
			|||||||
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 | 
					#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 | 
				
			||||||
#include <linux/memcontrol.h>
 | 
					#include <linux/memcontrol.h>
 | 
				
			||||||
#include <linux/mm_inline.h> /* for page_is_file_cache() */
 | 
					#include <linux/mm_inline.h> /* for page_is_file_cache() */
 | 
				
			||||||
 | 
					#include <linux/cleancache.h>
 | 
				
			||||||
#include "internal.h"
 | 
					#include "internal.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
	struct address_space *mapping = page->mapping;
 | 
						struct address_space *mapping = page->mapping;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * if we're uptodate, flush out into the cleancache, otherwise
 | 
				
			||||||
 | 
						 * invalidate any existing cleancache entries.  We can't leave
 | 
				
			||||||
 | 
						 * stale data around in the cleancache once our page is gone
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (PageUptodate(page) && PageMappedToDisk(page))
 | 
				
			||||||
 | 
							cleancache_put_page(page);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							cleancache_flush_page(mapping, page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	radix_tree_delete(&mapping->page_tree, page->index);
 | 
						radix_tree_delete(&mapping->page_tree, page->index);
 | 
				
			||||||
	page->mapping = NULL;
 | 
						page->mapping = NULL;
 | 
				
			||||||
	mapping->nrpages--;
 | 
						mapping->nrpages--;
 | 
				
			||||||
 | 
				
			|||||||
@ -19,6 +19,7 @@
 | 
				
			|||||||
#include <linux/task_io_accounting_ops.h>
 | 
					#include <linux/task_io_accounting_ops.h>
 | 
				
			||||||
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
 | 
					#include <linux/buffer_head.h>	/* grr. try_to_release_page,
 | 
				
			||||||
				   do_invalidatepage */
 | 
									   do_invalidatepage */
 | 
				
			||||||
 | 
					#include <linux/cleancache.h>
 | 
				
			||||||
#include "internal.h"
 | 
					#include "internal.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
 | 
				
			|||||||
static inline void truncate_partial_page(struct page *page, unsigned partial)
 | 
					static inline void truncate_partial_page(struct page *page, unsigned partial)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	zero_user_segment(page, partial, PAGE_CACHE_SIZE);
 | 
						zero_user_segment(page, partial, PAGE_CACHE_SIZE);
 | 
				
			||||||
 | 
						cleancache_flush_page(page->mapping, page);
 | 
				
			||||||
	if (page_has_private(page))
 | 
						if (page_has_private(page))
 | 
				
			||||||
		do_invalidatepage(page, partial);
 | 
							do_invalidatepage(page, partial);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 | 
				
			|||||||
	pgoff_t next;
 | 
						pgoff_t next;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cleancache_flush_inode(mapping);
 | 
				
			||||||
	if (mapping->nrpages == 0)
 | 
						if (mapping->nrpages == 0)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 | 
				
			|||||||
		pagevec_release(&pvec);
 | 
							pagevec_release(&pvec);
 | 
				
			||||||
		mem_cgroup_uncharge_end();
 | 
							mem_cgroup_uncharge_end();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						cleancache_flush_inode(mapping);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(truncate_inode_pages_range);
 | 
					EXPORT_SYMBOL(truncate_inode_pages_range);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 | 
				
			|||||||
	int did_range_unmap = 0;
 | 
						int did_range_unmap = 0;
 | 
				
			||||||
	int wrapped = 0;
 | 
						int wrapped = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cleancache_flush_inode(mapping);
 | 
				
			||||||
	pagevec_init(&pvec, 0);
 | 
						pagevec_init(&pvec, 0);
 | 
				
			||||||
	next = start;
 | 
						next = start;
 | 
				
			||||||
	while (next <= end && !wrapped &&
 | 
						while (next <= end && !wrapped &&
 | 
				
			||||||
@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 | 
				
			|||||||
		mem_cgroup_uncharge_end();
 | 
							mem_cgroup_uncharge_end();
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						cleancache_flush_inode(mapping);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 | 
					EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
		Reference in New Issue
	
	Block a user