summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDragan Cvetic <dragan.cvetic@xilinx.com>2019-07-27 09:33:55 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-15 17:54:38 +0200
commitcc538f609dee49b73545569c49e3abd891fdd8b3 (patch)
treeb8a7622cf127a83c5888a521a5fefefd417ec159 /drivers
parent77dd39d924e650cd20696d790f861dfe26e0cb64 (diff)
misc: xilinx_sdfec: Support poll file operation
Support monitoring and detecting the SD-FEC error events through IRQ and poll file operation. The SD-FEC device can detect one-error or multi-error events. An error triggers an interrupt which creates and run the ONE_SHOT IRQ thread. The ONE_SHOT IRQ thread detects type of error and pass that information to the poll function. The file_operation callback poll(), collects the events and updates the statistics accordingly. The function poll blocks() on waiting queue which can be unblocked by ONE_SHOT IRQ handling thread. Support SD-FEC interrupt set ioctl callback. The SD-FEC can detect two type of errors: coding errors (ECC) and a data interface errors (TLAST). The errors are events which can trigger an IRQ if enabled. The driver can monitor and detect these errors through IRQ. Also the driver updates the statistical data. Tested-by: Dragan Cvetic <dragan.cvetic@xilinx.com> Signed-off-by: Derek Kiernan <derek.kiernan@xilinx.com> Signed-off-by: Dragan Cvetic <dragan.cvetic@xilinx.com> Link: https://lore.kernel.org/r/1564216438-322406-6-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/xilinx_sdfec.c240
1 files changed, 236 insertions, 4 deletions
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 579f23617dd7..61775c5626ed 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -191,27 +191,43 @@ struct xsdfec_clks {
* struct xsdfec_dev - Driver data for SDFEC
* @miscdev: Misc device handle
* @clks: Clocks managed by the SDFEC driver
- * @regs: device physical base address
- * @dev: pointer to device struct
+ * @waitq: Driver wait queue
* @config: Configuration of the SDFEC device
* @dev_name: Device name
+ * @flags: spinlock flags
+ * @regs: device physical base address
+ * @dev: pointer to device struct
* @state: State of the SDFEC device
* @error_data_lock: Error counter and states spinlock
* @dev_id: Device ID
+ * @isr_err_count: Count of ISR errors
+ * @cecc_count: Count of Correctable ECC errors (SBE)
+ * @uecc_count: Count of Uncorrectable ECC errors (MBE)
+ * @irq: IRQ number
+ * @state_updated: indicates State updated by interrupt handler
+ * @stats_updated: indicates Stats updated by interrupt handler
*
* This structure contains necessary state for SDFEC driver to operate
*/
struct xsdfec_dev {
struct miscdevice miscdev;
struct xsdfec_clks clks;
- void __iomem *regs;
- struct device *dev;
+ wait_queue_head_t waitq;
struct xsdfec_config config;
char dev_name[DEV_NAME_LEN];
+ unsigned long flags;
+ void __iomem *regs;
+ struct device *dev;
enum xsdfec_state state;
/* Spinlock to protect state_updated and stats_updated */
spinlock_t error_data_lock;
int dev_id;
+ u32 isr_err_count;
+ u32 cecc_count;
+ u32 uecc_count;
+ int irq;
+ bool state_updated;
+ bool stats_updated;
};
static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
@@ -284,6 +300,90 @@ static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
return err;
}
+static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if (mask_read & XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling irq with IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disabling irq with IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling ECC irq with ECC IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_ECC_ISR_MASK) ||
+ ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_PL_INIT_ECC_ISR_MASK))) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disable ECC irq with ECC IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_irq irq;
+ int err;
+ int isr_err;
+ int ecc_err;
+
+ err = copy_from_user(&irq, arg, sizeof(irq));
+ if (err)
+ return -EFAULT;
+
+ /* Setup tlast related IRQ */
+ isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
+ if (!isr_err)
+ xsdfec->config.irq.enable_isr = irq.enable_isr;
+
+ /* Setup ECC related IRQ */
+ ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
+ if (!ecc_err)
+ xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
+
+ if (isr_err < 0 || ecc_err < 0)
+ err = -EIO;
+
+ return err;
+}
+
static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
{
struct xsdfec_turbo turbo;
@@ -760,6 +860,9 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
case XSDFEC_GET_CONFIG:
rval = xsdfec_get_config(xsdfec, arg);
break;
+ case XSDFEC_SET_IRQ:
+ rval = xsdfec_set_irq(xsdfec, arg);
+ break;
case XSDFEC_SET_TURBO:
rval = xsdfec_set_turbo(xsdfec, arg);
break;
@@ -793,11 +896,36 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
}
#endif
+static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
+
+ if (!xsdfec)
+ return POLLNVAL | POLLHUP;
+
+ poll_wait(file, &xsdfec->waitq, wait);
+
+ /* XSDFEC ISR detected an error */
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ if (xsdfec->state_updated)
+ mask |= POLLIN | POLLPRI;
+
+ if (xsdfec->stats_updated)
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ return mask;
+}
+
static const struct file_operations xsdfec_fops = {
.owner = THIS_MODULE,
.open = xsdfec_dev_open,
.release = xsdfec_dev_release,
.unlocked_ioctl = xsdfec_dev_ioctl,
+ .poll = xsdfec_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = xsdfec_dev_compat_ioctl,
#endif
@@ -883,6 +1011,91 @@ static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
return 0;
}
+static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
+{
+ struct xsdfec_dev *xsdfec = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 ecc_err;
+ u32 isr_err;
+ u32 uecc_count;
+ u32 cecc_count;
+ u32 isr_err_count;
+ u32 aecc_count;
+ u32 tmp;
+
+ WARN_ON(xsdfec->irq != irq);
+
+ /* Mask Interrupts */
+ xsdfec_isr_enable(xsdfec, false);
+ xsdfec_ecc_isr_enable(xsdfec, false);
+ /* Read ISR */
+ ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
+ isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
+ /* Clear the interrupts */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
+ xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
+
+ tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
+ /* Count uncorrectable 2-bit errors */
+ uecc_count = hweight32(tmp);
+ /* Count all ECC errors */
+ aecc_count = hweight32(ecc_err);
+ /* Number of correctable 1-bit ECC error */
+ cecc_count = aecc_count - 2 * uecc_count;
+ /* Count ISR errors */
+ isr_err_count = hweight32(isr_err);
+ dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
+ uecc_count, aecc_count, cecc_count, isr_err_count);
+ dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
+ xsdfec->cecc_count, xsdfec->isr_err_count);
+
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ /* Add new errors to a 2-bits counter */
+ if (uecc_count)
+ xsdfec->uecc_count += uecc_count;
+ /* Add new errors to a 1-bits counter */
+ if (cecc_count)
+ xsdfec->cecc_count += cecc_count;
+ /* Add new errors to a ISR counter */
+ if (isr_err_count)
+ xsdfec->isr_err_count += isr_err_count;
+
+ /* Update state/stats flag */
+ if (uecc_count) {
+ if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_PL_RECONFIGURE;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ if (cecc_count)
+ xsdfec->stats_updated = true;
+
+ if (isr_err_count) {
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
+ xsdfec->stats_updated);
+
+ /* Enable another polling */
+ if (xsdfec->state_updated || xsdfec->stats_updated)
+ wake_up_interruptible(&xsdfec->waitq);
+ else
+ ret = IRQ_NONE;
+
+ /* Unmask Interrupts */
+ xsdfec_isr_enable(xsdfec, true);
+ xsdfec_ecc_isr_enable(xsdfec, true);
+
+ return ret;
+}
+
static int xsdfec_clk_init(struct platform_device *pdev,
struct xsdfec_clks *clks)
{
@@ -1049,6 +1262,7 @@ static int xsdfec_probe(struct platform_device *pdev)
struct device *dev;
struct resource *res;
int err;
+ bool irq_enabled = true;
xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
if (!xsdfec)
@@ -1069,6 +1283,12 @@ static int xsdfec_probe(struct platform_device *pdev)
goto err_xsdfec_dev;
}
+ xsdfec->irq = platform_get_irq(pdev, 0);
+ if (xsdfec->irq < 0) {
+ dev_dbg(dev, "platform_get_irq failed");
+ irq_enabled = false;
+ }
+
err = xsdfec_parse_of(xsdfec);
if (err < 0)
goto err_xsdfec_dev;
@@ -1078,6 +1298,18 @@ static int xsdfec_probe(struct platform_device *pdev)
/* Save driver private data */
platform_set_drvdata(pdev, xsdfec);
+ if (irq_enabled) {
+ init_waitqueue_head(&xsdfec->waitq);
+ /* Register IRQ thread */
+ err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
+ xsdfec_irq_thread, IRQF_ONESHOT,
+ "xilinx-sdfec16", xsdfec);
+ if (err < 0) {
+ dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
+ goto err_xsdfec_dev;
+ }
+ }
+
mutex_lock(&dev_idr_lock);
err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL);
mutex_unlock(&dev_idr_lock);