diff options
Diffstat (limited to 'kernel/dma/mapping.c')
-rw-r--r-- | kernel/dma/mapping.c | 28 |
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 9a4db5cce600..58db8fd70471 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -760,12 +760,6 @@ bool dma_pci_p2pdma_supported(struct device *dev) } EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); -#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK -void arch_dma_set_mask(struct device *dev, u64 mask); -#else -#define arch_dma_set_mask(dev, mask) do { } while (0) -#endif - int dma_set_mask(struct device *dev, u64 mask) { /* @@ -799,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_set_coherent_mask); +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +bool dma_addressing_limited(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < + dma_get_required_mask(dev)) + return true; + + if (unlikely(ops)) + return false; + return !dma_direct_all_ram_mapped(dev); +} +EXPORT_SYMBOL_GPL(dma_addressing_limited); + size_t dma_max_mapping_size(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); |