diff options
author | Arnd Bergmann <arnd@arndb.de> | 2018-10-08 14:44:40 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2018-10-08 14:44:40 +0200 |
commit | 75bda3609f94a24a213fa56235bf369056565299 (patch) | |
tree | 4d8d39200aef7a552bad1ddf8291119607b53e1c /drivers/soc | |
parent | e51e8d5de999cafec922bdd11235812161e12a69 (diff) | |
parent | 6d06009cb216d071e955b3814086595851627910 (diff) |
Merge tag 'soc-fsl-next-v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into next/drivers
NXP/FSL SoC drivers updates for v4.20 take 2
- Update qbman driver to better work with CPU hotplug
- Add Kconfig dependency of 64-bit DMA addressing for qbman driver
- Use last reponse to determine valid bit for qbman driver
- Defer bman_portals probe if bman is not probed
- Add interrupt coalescing APIs to qbman driver
* tag 'soc-fsl-next-v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux:
soc: fsl: qbman: add interrupt coalesce changing APIs
soc: fsl: bman_portals: defer probe after bman's probe
soc: fsl: qbman: Use last response to determine valid bit
soc: fsl: qbman: Add 64 bit DMA addressing requirement to QBMan
soc: fsl: qbman: replace CPU 0 with any online CPU in hotplug handlers
soc: fsl: qbman: Check if CPU is offline when initializing portals
soc: fsl: qman_portals: defer probe after qman's probe
soc: fsl: qbman: add APIs to retrieve the probing status
soc: fsl: qe: Fix copy/paste bug in ucc_get_tdm_sync_shift()
soc: fsl: qbman: qman: avoid allocating from non existing gen_pool
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/soc')
-rw-r--r-- | drivers/soc/fsl/qbman/Kconfig | 2 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman.c | 6 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman_ccsr.c | 11 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman_portal.c | 14 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/dpaa_sys.h | 20 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman.c | 56 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_ccsr.c | 11 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_portal.c | 14 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/ucc.c | 2 |
9 files changed, 120 insertions, 16 deletions
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index d570cb5fd381..b0943e541796 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig @@ -1,6 +1,6 @@ menuconfig FSL_DPAA bool "QorIQ DPAA1 framework support" - depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE) + depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT) select GENERIC_ALLOCATOR help The Freescale Data Path Acceleration Architecture (DPAA) is a set of diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index f9485cedc648..f84ab596bde8 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -562,11 +562,9 @@ static int bman_create_portal(struct bman_portal *portal, dev_err(c->dev, "request_irq() failed\n"); goto fail_irq; } - if (c->cpu != -1 && irq_can_set_affinity(c->irq) && - irq_set_affinity(c->irq, cpumask_of(c->cpu))) { - dev_err(c->dev, "irq_set_affinity() failed\n"); + + if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) goto fail_affinity; - } /* Need RCR to be empty before continuing */ ret = bm_rcr_get_fill(p); diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 05c42235dd41..7c3cc968053c 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size) */ static dma_addr_t fbpr_a; static size_t fbpr_sz; +static int __bman_probed; static int bman_fbpr(struct reserved_mem *rmem) { @@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr) return IRQ_HANDLED; } +int bman_is_probed(void) +{ + return __bman_probed; +} +EXPORT_SYMBOL_GPL(bman_is_probed); + static int fsl_bman_probe(struct platform_device *pdev) { int ret, err_irq; @@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev) u16 id, bm_pool_cnt; u8 major, minor; + __bman_probed = -1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", @@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev) return ret; } + __bman_probed = 1; + return 0; }; diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 2f71f7df3465..2c95cf59f3e7 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -65,7 +65,9 @@ static int bman_offline_cpu(unsigned int cpu) if (!pcfg) return 0; - irq_set_affinity(pcfg->irq, cpumask_of(0)); + /* use any other online CPU */ + cpu = cpumask_any_but(cpu_online_mask, cpu); + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); return 0; } @@ -91,7 +93,15 @@ static int bman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct bm_portal_config *pcfg; struct resource *addr_phys[2]; - int irq, cpu; + int irq, cpu, err; + + err = bman_is_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(&pdev->dev, "failing probe due to bman probe error\n"); + return -ENODEV; + } pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index 9f379000da85..ae8afa552b1e 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -111,4 +111,24 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, #define QBMAN_MEMREMAP_ATTR MEMREMAP_WC #endif +static inline int dpaa_set_portal_irq_affinity(struct device *dev, + int irq, int cpu) +{ + int ret = 0; + + if (!irq_can_set_affinity(irq)) { + dev_err(dev, "unable to set IRQ affinity\n"); + return -EINVAL; + } + + if (cpu == -1 || !cpu_online(cpu)) + cpu = cpumask_any(cpu_online_mask); + + ret = irq_set_affinity(irq, cpumask_of(cpu)); + if (ret) + dev_err(dev, "irq_set_affinity() on CPU %d failed\n", cpu); + + return ret; +} + #endif /* __DPAA_SYS_H */ diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index ecb22749df0b..5ce24718c2fd 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -850,12 +850,24 @@ static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) static inline int qm_mc_init(struct qm_portal *portal) { + u8 rr0, rr1; struct qm_mc *mc = &portal->mc; mc->cr = portal->addr.ce + QM_CL_CR; mc->rr = portal->addr.ce + QM_CL_RR0; - mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT) - ? 0 : 1; + /* + * The expected valid bit polarity for the next CR command is 0 + * if RR1 contains a valid response, and is 1 if RR0 contains a + * valid response. If both RR contain all 0, this indicates either + * that no command has been executed since reset (in which case the + * expected valid bit polarity is 1) + */ + rr0 = mc->rr->verb; + rr1 = (mc->rr+1)->verb; + if ((rr0 == 0 && rr1 == 0) || rr0 != 0) + mc->rridx = 1; + else + mc->rridx = 0; mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; #ifdef CONFIG_FSL_DPAA_CHECKING mc->state = qman_mc_idle; @@ -1000,6 +1012,37 @@ static inline void put_affine_portal(void) static struct workqueue_struct *qm_portal_wq; +void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) +{ + if (!portal) + return; + + qm_dqrr_set_ithresh(&portal->p, ithresh); + portal->p.dqrr.ithresh = ithresh; +} +EXPORT_SYMBOL(qman_dqrr_set_ithresh); + +void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh) +{ + if (portal && ithresh) + *ithresh = portal->p.dqrr.ithresh; +} +EXPORT_SYMBOL(qman_dqrr_get_ithresh); + +void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod) +{ + if (portal && iperiod) + *iperiod = qm_in(&portal->p, QM_REG_ITPR); +} +EXPORT_SYMBOL(qman_portal_get_iperiod); + +void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod) +{ + if (portal) + qm_out(&portal->p, QM_REG_ITPR, iperiod); +} +EXPORT_SYMBOL(qman_portal_set_iperiod); + int qman_wq_alloc(void) { qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); @@ -1210,11 +1253,9 @@ static int qman_create_portal(struct qman_portal *portal, dev_err(c->dev, "request_irq() failed\n"); goto fail_irq; } - if (c->cpu != -1 && irq_can_set_affinity(c->irq) && - irq_set_affinity(c->irq, cpumask_of(c->cpu))) { - dev_err(c->dev, "irq_set_affinity() failed\n"); + + if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) goto fail_affinity; - } /* Need EQCR to be empty before continuing */ isdr &= ~QM_PIRQ_EQCI; @@ -2729,6 +2770,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) { unsigned long addr; + if (!p) + return -ENODEV; + addr = gen_pool_alloc(p, cnt); if (!addr) return -ENOMEM; diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 79cba58387a5..6fd5fef5f39b 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = { static u32 __iomem *qm_ccsr_start; /* A SDQCR mask comprising all the available/visible pool channels */ static u32 qm_pools_sdqcr; +static int __qman_probed; static inline u32 qm_ccsr_in(u32 offset) { @@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev) return 0; } +int qman_is_probed(void) +{ + return __qman_probed; +} +EXPORT_SYMBOL_GPL(qman_is_probed); + static int fsl_qman_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev) u16 id; u8 major, minor; + __qman_probed = -1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", @@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev) if (ret) return ret; + __qman_probed = 1; + return 0; } diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index a120002b630e..661c9b234d32 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -195,8 +195,10 @@ static int qman_offline_cpu(unsigned int cpu) if (p) { pcfg = qman_get_qm_portal_config(p); if (pcfg) { - irq_set_affinity(pcfg->irq, cpumask_of(0)); - qman_portal_update_sdest(pcfg, 0); + /* select any other online CPU */ + cpu = cpumask_any_but(cpu_online_mask, cpu); + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); + qman_portal_update_sdest(pcfg, cpu); } } return 0; @@ -227,6 +229,14 @@ static int qman_portal_probe(struct platform_device *pdev) int irq, cpu, err; u32 val; + err = qman_is_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(&pdev->dev, "failing probe due to qman probe error\n"); + return -ENODEV; + } + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) return -ENOMEM; diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index c646d8713861..681f7d4b7724 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num) { u32 shift; - shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE; + shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE; shift -= tdm_num * 2; return shift; |