summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c51
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c34
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c4
-rw-r--r--drivers/iommu/s390-iommu.c3
6 files changed, 53 insertions, 48 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a7aa0e76eebd..58fa8cc0262b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1008,15 +1008,13 @@ static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
* Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command.
*/
-static int iommu_queue_command_sync(struct amd_iommu *iommu,
- struct iommu_cmd *cmd,
- bool sync)
+static int __iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
{
u32 left, tail, head, next_tail;
- unsigned long flags;
again:
- spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
@@ -1025,15 +1023,14 @@ again:
if (left <= 2) {
struct iommu_cmd sync_cmd;
- volatile u64 sem = 0;
int ret;
- build_completion_wait(&sync_cmd, (u64)&sem);
- copy_cmd_to_buffer(iommu, &sync_cmd, tail);
+ iommu->cmd_sem = 0;
- spin_unlock_irqrestore(&iommu->lock, flags);
+ build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem);
+ copy_cmd_to_buffer(iommu, &sync_cmd, tail);
- if ((ret = wait_on_sem(&sem)) != 0)
+ if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0)
return ret;
goto again;
@@ -1044,9 +1041,21 @@ again:
/* We need to sync now to make sure all commands are processed */
iommu->need_sync = sync;
+ return 0;
+}
+
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, cmd, sync);
spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
+ return ret;
}
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
@@ -1061,19 +1070,29 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
static int iommu_completion_wait(struct amd_iommu *iommu)
{
struct iommu_cmd cmd;
- volatile u64 sem = 0;
+ unsigned long flags;
int ret;
if (!iommu->need_sync)
return 0;
- build_completion_wait(&cmd, (u64)&sem);
- ret = iommu_queue_command_sync(iommu, &cmd, false);
+ build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ iommu->cmd_sem = 0;
+
+ ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
- return ret;
+ goto out_unlock;
+
+ ret = wait_on_sem(&iommu->cmd_sem);
- return wait_on_sem(&sem);
+out_unlock:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
}
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index fa766eefd590..0d91785ebdc3 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -565,6 +565,8 @@ struct amd_iommu {
struct amd_irte_ops *irte_ops;
#endif
+
+ volatile u64 __aligned(8) cmd_sem;
};
#define ACPIHID_UID_LEN 256
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ce801170d5f2..641e88761319 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f49fe29f202..2db74ebc3240 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- int flags, ret;
- u32 fsr, fsynr, resume;
+ u32 fsr, fsynr;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
- if (fsr & FSR_IGN)
- dev_err_ratelimited(smmu->dev,
- "Unexpected context fault (fsr 0x%x)\n",
- fsr);
-
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
- if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
- ret = IRQ_HANDLED;
- resume = RESUME_RETRY;
- } else {
- dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- iova, fsynr, cfg->cbndx);
- ret = IRQ_NONE;
- resume = RESUME_TERMINATE;
- }
-
- /* Clear the faulting FSR */
- writel(fsr, cb_base + ARM_SMMU_CB_FSR);
- /* Retry or terminate any stalled transactions */
- if (fsr & FSR_SS)
- writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cfg->cbndx);
- return ret;
+ writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
}
static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
/* SCTLR */
- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 8c6139986d7d..def8ca1c982d 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
- if (attr & ARM_V7S_PTE_AP_RDONLY)
+ if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
+ if (pte & ARM_V7S_ATTR_XN(lvl))
+ prot |= IOMMU_NOEXEC;
return prot;
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index a04d491cf431..3b44b1d82f3b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -101,8 +101,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zpci_dma_exit_device(zdev);
zdev->dma_table = s390_domain->dma_table;
- rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
- zdev->start_dma + zdev->iommu_size - 1,
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
goto out_restore;