aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c141
1 files changed, 54 insertions, 87 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0e67f1721a3d..6b9bb58a414f 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -106,6 +106,18 @@
#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
+/*
+ * For !FWB these code to:
+ * 1111 = Normal outer write back cachable / Inner Write Back Cachable
+ * Permit S1 to override
+ * 0101 = Normal Non-cachable / Inner Non-cachable
+ * 0001 = Device / Device-nGnRE
+ * For S2FWB these code:
+ * 0110 Force Normal Write Back
+ * 0101 Normal* is forced Normal-NC, Device unchanged
+ * 0001 Force Device-nGnRE
+ */
+#define ARM_LPAE_PTE_MEMATTR_FWB_WB (((arm_lpae_iopte)0x6) << 2)
#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
@@ -199,6 +211,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
}
+/*
+ * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
+ * a concatenated PGD, into the maximum number of entries that can be
+ * mapped in the same table page.
+ */
+static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
+{
+ int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
+
+ return ptes_per_table - (i & (ptes_per_table - 1));
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -390,7 +414,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* If we can install a leaf entry at this level, then do so */
if (size == block_size) {
- max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
+ max_entries = arm_lpae_max_entries(map_idx_start, data);
num_entries = min_t(int, pgcount, max_entries);
ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
if (!ret)
@@ -458,12 +482,16 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
*/
if (data->iop.fmt == ARM_64_LPAE_S2 ||
data->iop.fmt == ARM_32_LPAE_S2) {
- if (prot & IOMMU_MMIO)
+ if (prot & IOMMU_MMIO) {
pte |= ARM_LPAE_PTE_MEMATTR_DEV;
- else if (prot & IOMMU_CACHE)
- pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
- else
+ } else if (prot & IOMMU_CACHE) {
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
+ pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
+ else
+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
+ } else {
pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ }
} else {
if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
@@ -569,66 +597,6 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
kfree(data);
}
-static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
- struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size,
- arm_lpae_iopte blk_pte, int lvl,
- arm_lpae_iopte *ptep, size_t pgcount)
-{
- struct io_pgtable_cfg *cfg = &data->iop.cfg;
- arm_lpae_iopte pte, *tablep;
- phys_addr_t blk_paddr;
- size_t tablesz = ARM_LPAE_GRANULE(data);
- size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
- int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
- int i, unmap_idx_start = -1, num_entries = 0, max_entries;
-
- if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
- return 0;
-
- tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
- if (!tablep)
- return 0; /* Bytes unmapped */
-
- if (size == split_sz) {
- unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
- max_entries = ptes_per_table - unmap_idx_start;
- num_entries = min_t(int, pgcount, max_entries);
- }
-
- blk_paddr = iopte_to_paddr(blk_pte, data);
- pte = iopte_prot(blk_pte);
-
- for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
- /* Unmap! */
- if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
- continue;
-
- __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
- }
-
- pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
- if (pte != blk_pte) {
- __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
- /*
- * We may race against someone unmapping another part of this
- * block, but anything else is invalid. We can't misinterpret
- * a page entry here since we're never at the last level.
- */
- if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
- return 0;
-
- tablep = iopte_deref(pte, data);
- } else if (unmap_idx_start >= 0) {
- for (i = 0; i < num_entries; i++)
- io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
-
- return num_entries * size;
- }
-
- return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
-}
-
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, size_t pgcount,
@@ -650,7 +618,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
- max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
+ max_entries = arm_lpae_max_entries(unmap_idx_start, data);
num_entries = min_t(int, pgcount, max_entries);
/* Find and handle non-leaf entries */
@@ -678,12 +646,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
- /*
- * Insert a table at the next level to map the old region,
- * minus the part we want to unmap
- */
- return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
- lvl + 1, ptep, pgcount);
+ WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
+ return 0;
}
/* Keep on walkin' */
@@ -1035,8 +999,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- /* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks)
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1347,19 +1310,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
iova += SZ_1G;
}
- /* Partial unmap */
- size = 1UL << __ffs(cfg->pgsize_bitmap);
- if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
- return __FAIL(ops, i);
-
- /* Remap of partial unmap */
- if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
- IOMMU_READ, GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
- return __FAIL(ops, i);
-
/* Full unmap */
iova = 0;
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
@@ -1382,6 +1332,23 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
iova += SZ_1G;
}
+ /*
+ * Map/unmap the last largest supported page of the IAS, this can
+ * trigger corner cases in the concatednated page tables.
+ */
+ mapped = 0;
+ size = 1UL << __fls(cfg->pgsize_bitmap);
+ iova = (1UL << cfg->ias) - size;
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
+ return __FAIL(ops, i);
+ if (mapped != size)
+ return __FAIL(ops, i);
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
+ return __FAIL(ops, i);
+
free_io_pgtable_ops(ops);
}