mirror of
				https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
				synced 2025-11-04 08:34:47 +10:00 
			
		
		
		
	dma-mapping: Add a new dma_need_sync API
Add a new API to check if calls to dma_sync_single_for_{device,cpu} are
required for a given DMA streaming mapping.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200629130359.2690853-2-hch@lst.de
			
			
This commit is contained in:
		
							parent
							
								
									2bdeb3ed54
								
							
						
					
					
						commit
						3aa9162500
					
				@ -204,6 +204,14 @@ Returns the maximum size of a mapping for the device. The size parameter
 | 
			
		||||
of the mapping functions like dma_map_single(), dma_map_page() and
 | 
			
		||||
others should not be larger than the returned value.
 | 
			
		||||
 | 
			
		||||
::
 | 
			
		||||
 | 
			
		||||
	bool
 | 
			
		||||
	dma_need_sync(struct device *dev, dma_addr_t dma_addr);
 | 
			
		||||
 | 
			
		||||
Returns %true if dma_sync_single_for_{device,cpu} calls are required to
 | 
			
		||||
transfer memory ownership.  Returns %false if those calls can be skipped.
 | 
			
		||||
 | 
			
		||||
::
 | 
			
		||||
 | 
			
		||||
	unsigned long
 | 
			
		||||
 | 
			
		||||
@ -87,4 +87,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
 | 
			
		||||
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 | 
			
		||||
		unsigned long attrs);
 | 
			
		||||
int dma_direct_supported(struct device *dev, u64 mask);
 | 
			
		||||
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 | 
			
		||||
#endif /* _LINUX_DMA_DIRECT_H */
 | 
			
		||||
 | 
			
		||||
@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 | 
			
		||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
 | 
			
		||||
u64 dma_get_required_mask(struct device *dev);
 | 
			
		||||
size_t dma_max_mapping_size(struct device *dev);
 | 
			
		||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
 | 
			
		||||
unsigned long dma_get_merge_boundary(struct device *dev);
 | 
			
		||||
#else /* CONFIG_HAS_DMA */
 | 
			
		||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
 | 
			
		||||
@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
 | 
			
		||||
{
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
static inline unsigned long dma_get_merge_boundary(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
@ -530,3 +530,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
 | 
			
		||||
		return swiotlb_max_mapping_size(dev);
 | 
			
		||||
	return SIZE_MAX;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
 | 
			
		||||
{
 | 
			
		||||
	return !dev_is_dma_coherent(dev) ||
 | 
			
		||||
		is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
 | 
			
		||||
 | 
			
		||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
 | 
			
		||||
{
 | 
			
		||||
	const struct dma_map_ops *ops = get_dma_ops(dev);
 | 
			
		||||
 | 
			
		||||
	if (dma_is_direct(ops))
 | 
			
		||||
		return dma_direct_need_sync(dev, dma_addr);
 | 
			
		||||
	return ops->sync_single_for_cpu || ops->sync_single_for_device;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(dma_need_sync);
 | 
			
		||||
 | 
			
		||||
unsigned long dma_get_merge_boundary(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	const struct dma_map_ops *ops = get_dma_ops(dev);
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user