summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 14:14:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 14:14:40 -0700
commit52a5525214d0d612160154d902956eca0558b7c0 (patch)
treea459e3fc3977531d9f1c389eda7b49aaa0f416d0 /include/linux
parentbbfe0d6b8b730af4954a0e0e741217eb3e1c58bc (diff)
parente95adb9add75affb98570a518c902f50e5fcce1b (diff)
Merge tag 'iommu-updates-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: - batched unmap support for the IOMMU-API - support for unlocked command queueing in the ARM-SMMU driver - rework the ATS support in the ARM-SMMU driver - more refactoring in the ARM-SMMU driver to support hardware implemention specific quirks and errata - bounce buffering DMA-API implementatation in the Intel VT-d driver for untrusted devices (like Thunderbolt devices) - fixes for runtime PM support in the OMAP iommu driver - MT8183 IOMMU support in the Mediatek IOMMU driver - rework of the way the IOMMU core sets the default domain type for groups. Changing the default domain type on x86 does not require two kernel parameters anymore. - more smaller fixes and cleanups * tag 'iommu-updates-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (113 commits) iommu/vt-d: Declare Broadwell igfx dmar support snafu iommu/vt-d: Add Scalable Mode fault information iommu/vt-d: Use bounce buffer for untrusted devices iommu/vt-d: Add trace events for device dma map/unmap iommu/vt-d: Don't switch off swiotlb if bounce page is used iommu/vt-d: Check whether device requires bounce buffer swiotlb: Split size parameter to map/unmap APIs iommu/omap: Mark pm functions __maybe_unused iommu/ipmmu-vmsa: Disable cache snoop transactions on R-Car Gen3 iommu/ipmmu-vmsa: Move IMTTBCR_SL0_TWOBIT_* to restore sort order iommu: Don't use sme_active() in generic code iommu/arm-smmu-v3: Fix build error without CONFIG_PCI_ATS iommu/qcom: Use struct_size() helper iommu: Remove wrong default domain comments iommu/dma: Fix for dereferencing before null checking iommu/mediatek: Clean up struct mtk_smi_iommu memory: mtk-smi: Get rid of need_larbid iommu/mediatek: Fix VLD_PA_RNG register backup when suspend memory: mtk-smi: Add bus_sel for mt8183 memory: mtk-smi: Invoke pm runtime_callback to enable clocks ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amd-iommu.h12
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io-pgtable.h66
-rw-r--r--include/linux/iommu.h108
-rw-r--r--include/linux/omap-iommu.h15
-rw-r--r--include/linux/platform_data/iommu-omap.h4
-rw-r--r--include/linux/swiotlb.h8
8 files changed, 175 insertions, 45 deletions
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 4a4d00646040..21e950e4ab62 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
extern int
amd_iommu_update_ga(int cpu, bool is_run, void *data);
+extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_deactivate_guest_mode(void *data);
+
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
static inline int
@@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data)
return 0;
}
+static inline int amd_iommu_activate_guest_mode(void *data)
+{
+ return 0;
+}
+
+static inline int amd_iommu_deactivate_guest_mode(void *data)
+{
+ return 0;
+}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index feff3fe4467e..1b1fa1557e68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,6 +311,7 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
- return cookie != BLK_QC_T_NONE;
+ return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4fc6454f7ebb..ed11ef594378 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -272,6 +272,8 @@
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
+#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index b5a450a3bb47..ec7a13405f10 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
+
#include <linux/bitops.h>
+#include <linux/iommu.h>
/*
* Public API for use by IOMMU drivers
@@ -17,22 +19,31 @@ enum io_pgtable_fmt {
};
/**
- * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
*
- * @tlb_flush_all: Synchronously invalidate the entire TLB context.
- * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
- * any corresponding page table updates are visible to the
- * IOMMU.
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
+ * (sometimes referred to as the "walk cache") for a virtual
+ * address range.
+ * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
+ * address range.
+ * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
+ * single page. IOMMUs that cannot batch TLB invalidation
+ * operations efficiently will typically issue them here, but
+ * others may decide to update the iommu_iotlb_gather structure
+ * and defer the invalidation until iommu_tlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.
*/
-struct iommu_gather_ops {
+struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie);
- void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
- void (*tlb_sync)(void *cookie);
+ void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
+ void *cookie);
+ void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
+ void *cookie);
+ void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie);
};
/**
@@ -65,10 +76,9 @@ struct io_pgtable_cfg {
* (unmapped) entries but the hardware might do so anyway, perform
* TLB maintenance when mapping as well as when unmapping.
*
- * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
- * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
- * when the SoC is in "4GB mode" and they can only access the high
- * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
+ * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
+ * to support up to 34 bits PA where the bit32 and bit33 are
+ * encoded in the bit9 and bit4 of the PTE respectively.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for
@@ -77,14 +87,14 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
- #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
unsigned int oas;
bool coherent_walk;
- const struct iommu_gather_ops *tlb;
+ const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
/* Low-level data specific to the table format */
@@ -128,7 +138,7 @@ struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size);
+ size_t size, struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
@@ -184,15 +194,27 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
-static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
- unsigned long iova, size_t size, size_t granule, bool leaf)
+static inline void
+io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
+ size_t size, size_t granule)
+{
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
+ size_t size, size_t granule)
{
- iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+ iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
}
-static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop,
+ struct iommu_iotlb_gather * gather, unsigned long iova,
+ size_t granule)
{
- iop->cfg.tlb->tlb_sync(iop->cookie);
+ if (iop->cfg.tlb->tlb_add_page)
+ iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
}
/**
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index fdc355ccc570..29bac5345563 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -192,6 +192,23 @@ struct iommu_sva_ops {
#ifdef CONFIG_IOMMU_API
/**
+ * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+ *
+ * @start: IOVA representing the start of the range to be flushed
+ * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @pgsize: The interval at which to perform the flush
+ *
+ * This structure is intended to be updated by multiple calls to the
+ * ->unmap() function in struct iommu_ops before eventually being passed
+ * into ->iotlb_sync().
+ */
+struct iommu_iotlb_gather {
+ unsigned long start;
+ unsigned long end;
+ size_t pgsize;
+};
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @domain_alloc: allocate iommu domain
@@ -201,7 +218,6 @@ struct iommu_sva_ops {
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
- * @iotlb_range_add: Add a given iova range to the flush queue for this domain
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
@@ -242,12 +258,11 @@ struct iommu_ops {
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_range_add)(struct iommu_domain *domain,
- unsigned long iova, size_t size);
void (*iotlb_sync_map)(struct iommu_domain *domain);
- void (*iotlb_sync)(struct iommu_domain *domain);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
@@ -378,6 +393,13 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
return (struct iommu_device *)dev_get_drvdata(dev);
}
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
+{
+ *gather = (struct iommu_iotlb_gather) {
+ .start = ULONG_MAX,
+ };
+}
+
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -402,7 +424,8 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
- unsigned long iova, size_t size);
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
@@ -413,6 +436,9 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev);
extern int iommu_request_dma_domain_for_dev(struct device *dev);
+extern void iommu_set_default_passthrough(bool cmd_line);
+extern void iommu_set_default_translated(bool cmd_line);
+extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
enum iommu_resv_type type);
@@ -476,17 +502,38 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
domain->ops->flush_iotlb_all(domain);
}
-static inline void iommu_tlb_range_add(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static inline void iommu_tlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- if (domain->ops->iotlb_range_add)
- domain->ops->iotlb_range_add(domain, iova, size);
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain, iotlb_gather);
+
+ iommu_iotlb_gather_init(iotlb_gather);
}
-static inline void iommu_tlb_sync(struct iommu_domain *domain)
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
{
- if (domain->ops->iotlb_sync)
- domain->ops->iotlb_sync(domain);
+ unsigned long start = iova, end = start + size;
+
+ /*
+ * If the new page is disjoint from the current range or is mapped at
+ * a different granularity, then sync the TLB so that the gather
+ * structure can be rewritten.
+ */
+ if (gather->pgsize != size ||
+ end < gather->start || start > gather->end) {
+ if (gather->pgsize)
+ iommu_tlb_sync(domain, gather);
+ gather->pgsize = size;
+ }
+
+ if (gather->end < end)
+ gather->end = end;
+
+ if (gather->start > start)
+ gather->start = start;
}
/* PCI device grouping function */
@@ -567,6 +614,7 @@ struct iommu_group {};
struct iommu_fwspec {};
struct iommu_device {};
struct iommu_fault_param {};
+struct iommu_iotlb_gather {};
static inline bool iommu_present(struct bus_type *bus)
{
@@ -621,7 +669,8 @@ static inline size_t iommu_unmap(struct iommu_domain *domain,
}
static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+ unsigned long iova, int gfp_order,
+ struct iommu_iotlb_gather *iotlb_gather)
{
return 0;
}
@@ -637,12 +686,8 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}
-static inline void iommu_tlb_range_add(struct iommu_domain *domain,
- unsigned long iova, size_t size)
-{
-}
-
-static inline void iommu_tlb_sync(struct iommu_domain *domain)
+static inline void iommu_tlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
{
}
@@ -694,6 +739,19 @@ static inline int iommu_request_dma_domain_for_dev(struct device *dev)
return -ENODEV;
}
+static inline void iommu_set_default_passthrough(bool cmd_line)
+{
+}
+
+static inline void iommu_set_default_translated(bool cmd_line)
+{
+}
+
+static inline bool iommu_default_passthrough(void)
+{
+ return true;
+}
+
static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -827,6 +885,16 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
return NULL;
}
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
+{
+}
+
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
index 153bf25b4df3..2c32ca09df02 100644
--- a/include/linux/omap-iommu.h
+++ b/include/linux/omap-iommu.h
@@ -10,12 +10,27 @@
#ifndef _OMAP_IOMMU_H_
#define _OMAP_IOMMU_H_
+struct iommu_domain;
+
#ifdef CONFIG_OMAP_IOMMU
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
+
+int omap_iommu_domain_deactivate(struct iommu_domain *domain);
+int omap_iommu_domain_activate(struct iommu_domain *domain);
#else
static inline void omap_iommu_save_ctx(struct device *dev) {}
static inline void omap_iommu_restore_ctx(struct device *dev) {}
+
+static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int omap_iommu_domain_activate(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 44d913a7580c..8474a0208b34 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -13,4 +13,8 @@ struct iommu_platform_data {
const char *reset_name;
int (*assert_reset)(struct platform_device *pdev, const char *name);
int (*deassert_reset)(struct platform_device *pdev, const char *name);
+ int (*device_enable)(struct platform_device *pdev);
+ int (*device_idle)(struct platform_device *pdev);
+ int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request,
+ u8 *pwrst);
};
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 361f62bb4a8e..cde3dc18e21a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -46,13 +46,17 @@ enum dma_sync_target {
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
- phys_addr_t phys, size_t size,
+ phys_addr_t phys,
+ size_t mapping_size,
+ size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
+ size_t mapping_size,
+ size_t alloc_size,
+ enum dma_data_direction dir,
unsigned long attrs);
extern void swiotlb_tbl_sync_single(struct device *hwdev,