diff options
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/Kconfig | 26 | ||||
-rw-r--r-- | drivers/bus/arm-cci.c | 517 |
2 files changed, 313 insertions, 230 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 7e9c2674af81..ea816ef23537 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -5,11 +5,31 @@ menu "Bus devices" config ARM_CCI - bool "ARM CCI driver support" + bool + +config ARM_CCI400_COMMON + bool + select ARM_CCI + +config ARM_CCI400_PMU + bool "ARM CCI400 PMU support" + default y + depends on ARM || ARM64 + depends on HW_PERF_EVENTS + select ARM_CCI400_COMMON + help + Support for PMU events monitoring on the ARM CCI cache coherent + interconnect. + + If unsure, say Y + +config ARM_CCI400_PORT_CTRL + bool depends on ARM && OF && CPU_V7 + select ARM_CCI400_COMMON help - Driver supporting the CCI cache coherent interconnect for ARM - platforms. + Low level power management driver for CCI400 cache coherent + interconnect for ARM platforms. config ARM_CCN bool "ARM CCN driver support" diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 84fd66057dad..b854125e4831 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -29,41 +29,36 @@ #include <asm/cacheflush.h> #include <asm/smp_plat.h> -#define DRIVER_NAME "CCI-400" -#define DRIVER_NAME_PMU DRIVER_NAME " PMU" - -#define CCI_PORT_CTRL 0x0 -#define CCI_CTRL_STATUS 0xc - -#define CCI_ENABLE_SNOOP_REQ 0x1 -#define CCI_ENABLE_DVM_REQ 0x2 -#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) +static void __iomem *cci_ctrl_base; +static unsigned long cci_ctrl_phys; +#ifdef CONFIG_ARM_CCI400_PORT_CTRL struct cci_nb_ports { unsigned int nb_ace; unsigned int nb_ace_lite; }; -enum cci_ace_port_type { - ACE_INVALID_PORT = 0x0, - ACE_PORT, - ACE_LITE_PORT, +static const struct cci_nb_ports cci400_ports = { + .nb_ace = 2, + .nb_ace_lite = 3 }; -struct cci_ace_port { - void __iomem *base; - unsigned long phys; - enum cci_ace_port_type type; - struct device_node *dn; -}; +#define CCI400_PORTS_DATA (&cci400_ports) +#else +#define CCI400_PORTS_DATA (NULL) +#endif -static struct cci_ace_port *ports; -static unsigned int nb_cci_ports; +static const struct of_device_id arm_cci_matches[] = { +#ifdef CONFIG_ARM_CCI400_COMMON + {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, +#endif + {}, +}; -static void __iomem *cci_ctrl_base; -static unsigned long cci_ctrl_phys; +#ifdef CONFIG_ARM_CCI400_PMU -#ifdef CONFIG_HW_PERF_EVENTS +#define DRIVER_NAME "CCI-400" +#define DRIVER_NAME_PMU DRIVER_NAME " PMU" #define CCI_PMCR 0x0100 #define CCI_PID2 0x0fe8 @@ -75,20 +70,6 @@ static unsigned long cci_ctrl_phys; #define CCI_PID2_REV_MASK 0xf0 #define CCI_PID2_REV_SHIFT 4 -/* Port ids */ -#define CCI_PORT_S0 0 -#define CCI_PORT_S1 1 -#define CCI_PORT_S2 2 -#define CCI_PORT_S3 3 -#define CCI_PORT_S4 4 -#define CCI_PORT_M0 5 -#define CCI_PORT_M1 6 -#define CCI_PORT_M2 7 - -#define CCI_REV_R0 0 -#define CCI_REV_R1 1 -#define CCI_REV_R1_PX 5 - #define CCI_PMU_EVT_SEL 0x000 #define CCI_PMU_CNTR 0x004 #define CCI_PMU_CNTR_CTRL 0x008 @@ -100,76 +81,22 @@ static unsigned long cci_ctrl_phys; #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) -/* - * Instead of an event id to monitor CCI cycles, a dedicated counter is - * provided. Use 0xff to represent CCI cycles and hope that no future revisions - * make use of this event in hardware. - */ -enum cci400_perf_events { - CCI_PMU_CYCLES = 0xff -}; - -#define CCI_PMU_EVENT_MASK 0xff +#define CCI_PMU_EVENT_MASK 0xffUL #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ -#define CCI_PMU_CYCLE_CNTR_IDX 0 -#define CCI_PMU_CNTR0_IDX 1 -#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) - -/* - * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 - * ports and bits 4:0 are event codes. There are different event codes - * associated with each port type. - * - * Additionally, the range of events associated with the port types changed - * between Rev0 and Rev1. - * - * The constants below define the range of valid codes for each port type for - * the different revisions and are used to validate the event to be monitored. - */ - -#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 -#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 -#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 -#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a - -#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 -#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 -#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 -#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 - -struct pmu_port_event_ranges { - u8 slave_min; - u8 slave_max; - u8 master_min; - u8 master_max; -}; - -static struct pmu_port_event_ranges port_event_range[] = { - [CCI_REV_R0] = { - .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV, - .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV, - .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV, - .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV, - }, - [CCI_REV_R1] = { - .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV, - .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV, - .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV, - .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV, - }, +/* Types of interfaces that can generate events */ +enum { + CCI_IF_SLAVE, + CCI_IF_MASTER, + CCI_IF_MAX, }; -/* - * Export different PMU names for the different revisions so userspace knows - * because the event ids are different - */ -static char *const pmu_names[] = { - [CCI_REV_R0] = "CCI_400", - [CCI_REV_R1] = "CCI_400_r1", +struct event_range { + u32 min; + u32 max; }; struct cci_pmu_hw_events { @@ -178,13 +105,20 @@ struct cci_pmu_hw_events { raw_spinlock_t pmu_lock; }; +struct cci_pmu_model { + char *name; + struct event_range event_ranges[CCI_IF_MAX]; +}; + +static struct cci_pmu_model cci_pmu_models[]; + struct cci_pmu { void __iomem *base; struct pmu pmu; int nr_irqs; int irqs[CCI_PMU_MAX_HW_EVENTS]; unsigned long active_irqs; - struct pmu_port_event_ranges *port_ranges; + const struct cci_pmu_model *model; struct cci_pmu_hw_events hw_events; struct platform_device *plat_device; int num_events; @@ -196,52 +130,63 @@ static struct cci_pmu *pmu; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) -static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) -{ - int i; - - for (i = 0; i < nr_irqs; i++) - if (irq == irqs[i]) - return true; - - return false; -} +/* Port ids */ +#define CCI_PORT_S0 0 +#define CCI_PORT_S1 1 +#define CCI_PORT_S2 2 +#define CCI_PORT_S3 3 +#define CCI_PORT_S4 4 +#define CCI_PORT_M0 5 +#define CCI_PORT_M1 6 +#define CCI_PORT_M2 7 -static int probe_cci_revision(void) -{ - int rev; - rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; - rev >>= CCI_PID2_REV_SHIFT; +#define CCI_REV_R0 0 +#define CCI_REV_R1 1 +#define CCI_REV_R1_PX 5 - if (rev < CCI_REV_R1_PX) - return CCI_REV_R0; - else - return CCI_REV_R1; -} +/* + * Instead of an event id to monitor CCI cycles, a dedicated counter is + * provided. Use 0xff to represent CCI cycles and hope that no future revisions + * make use of this event in hardware. + */ +enum cci400_perf_events { + CCI_PMU_CYCLES = 0xff +}; -static struct pmu_port_event_ranges *port_range_by_rev(void) -{ - int rev = probe_cci_revision(); +#define CCI_PMU_CYCLE_CNTR_IDX 0 +#define CCI_PMU_CNTR0_IDX 1 +#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) - return &port_event_range[rev]; -} +/* + * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 + * ports and bits 4:0 are event codes. There are different event codes + * associated with each port type. + * + * Additionally, the range of events associated with the port types changed + * between Rev0 and Rev1. + * + * The constants below define the range of valid codes for each port type for + * the different revisions and are used to validate the event to be monitored. + */ -static int pmu_is_valid_slave_event(u8 ev_code) -{ - return pmu->port_ranges->slave_min <= ev_code && - ev_code <= pmu->port_ranges->slave_max; -} +#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 +#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 +#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 +#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a -static int pmu_is_valid_master_event(u8 ev_code) -{ - return pmu->port_ranges->master_min <= ev_code && - ev_code <= pmu->port_ranges->master_max; -} +#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 +#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 +#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 +#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 -static int pmu_validate_hw_event(u8 hw_event) +static int pmu_validate_hw_event(unsigned long hw_event) { u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI_PMU_EVENT_MASK) + return -ENOENT; switch (ev_source) { case CCI_PORT_S0: @@ -250,21 +195,44 @@ static int pmu_validate_hw_event(u8 hw_event) case CCI_PORT_S3: case CCI_PORT_S4: /* Slave Interface */ - if (pmu_is_valid_slave_event(ev_code)) - return hw_event; + if_type = CCI_IF_SLAVE; break; case CCI_PORT_M0: case CCI_PORT_M1: case CCI_PORT_M2: /* Master Interface */ - if (pmu_is_valid_master_event(ev_code)) - return hw_event; + if_type = CCI_IF_MASTER; break; + default: + return -ENOENT; } + if (ev_code >= pmu->model->event_ranges[if_type].min && + ev_code <= pmu->model->event_ranges[if_type].max) + return hw_event; + return -ENOENT; } +static int probe_cci_revision(void) +{ + int rev; + rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; + rev >>= CCI_PID2_REV_SHIFT; + + if (rev < CCI_REV_R1_PX) + return CCI_REV_R0; + else + return CCI_REV_R1; +} + +static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +{ + if (platform_has_secure_cci_access()) + return &cci_pmu_models[probe_cci_revision()]; + return NULL; +} + static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) { return CCI_PMU_CYCLE_CNTR_IDX <= idx && @@ -293,7 +261,6 @@ static void pmu_enable_counter(int idx) static void pmu_set_event(int idx, unsigned long event) { - event &= CCI_PMU_EVENT_MASK; pmu_write_register(event, idx, CCI_PMU_EVT_SEL); } @@ -310,7 +277,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev { struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); struct hw_perf_event *hw_event = &event->hw; - unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; + unsigned long cci_event = hw_event->config_base; int idx; if (cci_event == CCI_PMU_CYCLES) { @@ -331,7 +298,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev static int pmu_map_event(struct perf_event *event) { int mapping; - u8 config = event->attr.config & CCI_PMU_EVENT_MASK; + unsigned long config = event->attr.config; if (event->attr.type < PERF_TYPE_MAX) return -ENOENT; @@ -660,12 +627,21 @@ static void cci_pmu_del(struct perf_event *event, int flags) } static int -validate_event(struct cci_pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *cci_pmu, + struct cci_pmu_hw_events *hw_events, + struct perf_event *event) { if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != cci_pmu) + return 0; + if (event->state < PERF_EVENT_STATE_OFF) return 1; @@ -687,15 +663,15 @@ validate_group(struct perf_event *event) .used_mask = CPU_BITS_NONE, }; - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; @@ -831,9 +807,9 @@ static const struct attribute_group *pmu_attr_groups[] = { static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) { - char *name = pmu_names[probe_cci_revision()]; + char *name = cci_pmu->model->name; cci_pmu->pmu = (struct pmu) { - .name = pmu_names[probe_cci_revision()], + .name = cci_pmu->model->name, .task_ctx_nr = perf_invalid_context, .pmu_enable = cci_pmu_enable, .pmu_disable = cci_pmu_disable, @@ -886,22 +862,93 @@ static struct notifier_block cci_pmu_cpu_nb = { .priority = CPU_PRI_PERF + 1, }; +static struct cci_pmu_model cci_pmu_models[] = { + [CCI_REV_R0] = { + .name = "CCI_400", + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI_REV_R0_SLAVE_PORT_MIN_EV, + CCI_REV_R0_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI_REV_R0_MASTER_PORT_MIN_EV, + CCI_REV_R0_MASTER_PORT_MAX_EV, + }, + }, + }, + [CCI_REV_R1] = { + .name = "CCI_400_r1", + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI_REV_R1_SLAVE_PORT_MIN_EV, + CCI_REV_R1_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI_REV_R1_MASTER_PORT_MIN_EV, + CCI_REV_R1_MASTER_PORT_MAX_EV, + }, + }, + }, +}; + static const struct of_device_id arm_cci_pmu_matches[] = { { .compatible = "arm,cci-400-pmu", + .data = NULL, + }, + { + .compatible = "arm,cci-400-pmu,r0", + .data = &cci_pmu_models[CCI_REV_R0], + }, + { + .compatible = "arm,cci-400-pmu,r1", + .data = &cci_pmu_models[CCI_REV_R1], }, {}, }; +static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) +{ + const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, + pdev->dev.of_node); + if (!match) + return NULL; + if (match->data) + return match->data; + + dev_warn(&pdev->dev, "DEPRECATED compatible property," + "requires secure access to CCI registers"); + return probe_cci_model(pdev); +} + +static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) + if (irq == irqs[i]) + return true; + + return false; +} + static int cci_pmu_probe(struct platform_device *pdev) { struct resource *res; int i, ret, irq; + const struct cci_pmu_model *model; + + model = get_cci_model(pdev); + if (!model) { + dev_warn(&pdev->dev, "CCI PMU version not supported\n"); + return -ENODEV; + } pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); if (!pmu) return -ENOMEM; + pmu->model = model; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pmu->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmu->base)) @@ -933,12 +980,6 @@ static int cci_pmu_probe(struct platform_device *pdev) return -EINVAL; } - pmu->port_ranges = port_range_by_rev(); - if (!pmu->port_ranges) { - dev_warn(&pdev->dev, "CCI PMU version not supported\n"); - return -EINVAL; - } - raw_spin_lock_init(&pmu->hw_events.pmu_lock); mutex_init(&pmu->reserve_mutex); atomic_set(&pmu->active_events, 0); @@ -952,6 +993,7 @@ static int cci_pmu_probe(struct platform_device *pdev) if (ret) return ret; + pr_info("ARM %s PMU driver probed", pmu->model->name); return 0; } @@ -963,7 +1005,66 @@ static int cci_platform_probe(struct platform_device *pdev) return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); } -#endif /* CONFIG_HW_PERF_EVENTS */ +static struct platform_driver cci_pmu_driver = { + .driver = { + .name = DRIVER_NAME_PMU, + .of_match_table = arm_cci_pmu_matches, + }, + .probe = cci_pmu_probe, +}; + +static struct platform_driver cci_platform_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = arm_cci_matches, + }, + .probe = cci_platform_probe, +}; + +static int __init cci_platform_init(void) +{ + int ret; + + ret = platform_driver_register(&cci_pmu_driver); + if (ret) + return ret; + + return platform_driver_register(&cci_platform_driver); +} + +#else /* !CONFIG_ARM_CCI400_PMU */ + +static int __init cci_platform_init(void) +{ + return 0; +} + +#endif /* CONFIG_ARM_CCI400_PMU */ + +#ifdef CONFIG_ARM_CCI400_PORT_CTRL + +#define CCI_PORT_CTRL 0x0 +#define CCI_CTRL_STATUS 0xc + +#define CCI_ENABLE_SNOOP_REQ 0x1 +#define CCI_ENABLE_DVM_REQ 0x2 +#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) + +enum cci_ace_port_type { + ACE_INVALID_PORT = 0x0, + ACE_PORT, + ACE_LITE_PORT, +}; + +struct cci_ace_port { + void __iomem *base; + unsigned long phys; + enum cci_ace_port_type type; + struct device_node *dn; +}; + +static struct cci_ace_port *ports; +static unsigned int nb_cci_ports; struct cpu_port { u64 mpidr; @@ -1284,36 +1385,20 @@ int notrace __cci_control_port_by_index(u32 port, bool enable) } EXPORT_SYMBOL_GPL(__cci_control_port_by_index); -static const struct cci_nb_ports cci400_ports = { - .nb_ace = 2, - .nb_ace_lite = 3 -}; - -static const struct of_device_id arm_cci_matches[] = { - {.compatible = "arm,cci-400", .data = &cci400_ports }, - {}, -}; - static const struct of_device_id arm_cci_ctrl_if_matches[] = { {.compatible = "arm,cci-400-ctrl-if", }, {}, }; -static int cci_probe(void) +static int cci_probe_ports(struct device_node *np) { struct cci_nb_ports const *cci_config; int ret, i, nb_ace = 0, nb_ace_lite = 0; - struct device_node *np, *cp; + struct device_node *cp; struct resource res; const char *match_str; bool is_ace; - np = of_find_matching_node(NULL, arm_cci_matches); - if (!np) - return -ENODEV; - - if (!of_device_is_available(np)) - return -ENODEV; cci_config = of_match_node(arm_cci_matches, np)->data; if (!cci_config) @@ -1325,17 +1410,6 @@ static int cci_probe(void) if (!ports) return -ENOMEM; - ret = of_address_to_resource(np, 0, &res); - if (!ret) { - cci_ctrl_base = ioremap(res.start, resource_size(&res)); - cci_ctrl_phys = res.start; - } - if (ret || !cci_ctrl_base) { - WARN(1, "unable to ioremap CCI ctrl\n"); - ret = -ENXIO; - goto memalloc_err; - } - for_each_child_of_node(np, cp) { if (!of_match_node(arm_cci_ctrl_if_matches, cp)) continue; @@ -1395,12 +1469,37 @@ static int cci_probe(void) sync_cache_w(&cpu_port); __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); pr_info("ARM CCI driver probed\n"); + return 0; +} +#else /* !CONFIG_ARM_CCI400_PORT_CTRL */ +static inline int cci_probe_ports(struct device_node *np) +{ + return 0; +} +#endif /* CONFIG_ARM_CCI400_PORT_CTRL */ -memalloc_err: +static int cci_probe(void) +{ + int ret; + struct device_node *np; + struct resource res; + + np = of_find_matching_node(NULL, arm_cci_matches); + if(!np || !of_device_is_available(np)) + return -ENODEV; - kfree(ports); - return ret; + ret = of_address_to_resource(np, 0, &res); + if (!ret) { + cci_ctrl_base = ioremap(res.start, resource_size(&res)); + cci_ctrl_phys = res.start; + } + if (ret || !cci_ctrl_base) { + WARN(1, "unable to ioremap CCI ctrl\n"); + return -ENXIO; + } + + return cci_probe_ports(np); } static int cci_init_status = -EAGAIN; @@ -1418,42 +1517,6 @@ static int cci_init(void) return cci_init_status; } -#ifdef CONFIG_HW_PERF_EVENTS -static struct platform_driver cci_pmu_driver = { - .driver = { - .name = DRIVER_NAME_PMU, - .of_match_table = arm_cci_pmu_matches, - }, - .probe = cci_pmu_probe, -}; - -static struct platform_driver cci_platform_driver = { - .driver = { - .name = DRIVER_NAME, - .of_match_table = arm_cci_matches, - }, - .probe = cci_platform_probe, -}; - -static int __init cci_platform_init(void) -{ - int ret; - - ret = platform_driver_register(&cci_pmu_driver); - if (ret) - return ret; - - return platform_driver_register(&cci_platform_driver); -} - -#else - -static int __init cci_platform_init(void) -{ - return 0; -} - -#endif /* * To sort out early init calls ordering a helper function is provided to * check if the CCI driver has beed initialized. Function check if the driver |