summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/dma_64.c8
-rw-r--r--arch/powerpc/kernel/iommu.c93
-rw-r--r--arch/powerpc/kernel/legacy_serial.c3
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/mm/mem.c7
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig7
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c99
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c250
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c10
23 files changed, 434 insertions, 147 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b94d4502a477..cf030b004415 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -256,6 +256,9 @@ config IOMMU_VMERGE
Most drivers don't have this problem; it is safe to say Y here.
+config IOMMU_HELPER
+ def_bool PPC64
+
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ed083feaf6f9..e6e49289f788 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -22,6 +22,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/suspend.h>
+#include <linux/hrtimer.h>
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -312,7 +313,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- DEFINE(CLOCK_REALTIME_RES, TICK_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, (KTIME_MONOTONIC_RES).tv64);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 84239076a5b8..3a317cb0636a 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -31,8 +31,8 @@ static inline unsigned long device_to_mask(struct device *dev)
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
- device_to_mask(dev), flag,
+ return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
+ dma_handle, device_to_mask(dev), flag,
dev->archdata.numa_node);
}
@@ -52,7 +52,7 @@ static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
size_t size,
enum dma_data_direction direction)
{
- return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+ return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
device_to_mask(dev), direction);
}
@@ -68,7 +68,7 @@ static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+ return iommu_map_sg(dev, sglist, nelems,
device_to_mask(dev), direction);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a3c406aca664..8f1f4e539c4b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
+#include <linux/iommu-helper.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu);
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+static unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
- unsigned long n, end, i, start;
+ unsigned long n, end, start;
unsigned long limit;
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
+ unsigned long boundary_size;
align_mask = 0xffffffffffffffffl >> (64 - align_order);
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
start &= mask;
}
- n = find_next_zero_bit(tbl->it_map, limit, start);
-
- /* Align allocation */
- n = (n + align_mask) & ~align_mask;
-
- end = n + npages;
+ if (dev)
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ 1 << IOMMU_PAGE_SHIFT);
+ else
+ boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
+ /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- if (unlikely(end >= limit)) {
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages,
+ tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
+ align_mask);
+ if (n == -1) {
if (likely(pass < 2)) {
/* First failure, just rescan the half of the table.
* Second failure, rescan the other half of the table.
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
}
}
- for (i = n; i < end; i++)
- if (test_bit(i, tbl->it_map)) {
- start = i+1;
- goto again;
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, tbl->it_map);
+ end = n + npages;
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return n;
}
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
- unsigned int npages, enum dma_data_direction direction,
- unsigned long mask, unsigned int align_order)
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
+ void *page, unsigned int npages,
+ enum dma_data_direction direction,
+ unsigned long mask, unsigned int align_order)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
spin_lock_irqsave(&(tbl->it_lock), flags);
- entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
+ entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags);
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
- unsigned long i;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
-
- for (i = 0; i < npages; i++)
- __clear_bit(free_entry+i, tbl->it_map);
+ iommu_area_free(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -270,16 +267,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
-int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask,
enum dma_data_direction direction)
{
+ struct iommu_table *tbl = dev->archdata.dma_data;
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i;
unsigned int align;
unsigned long handle;
+ unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE);
@@ -298,6 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags);
+ max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
@@ -314,7 +314,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle,
+ entry = iommu_range_alloc(dev, tbl, npages, &handle,
mask >> IOMMU_PAGE_SHIFT, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -344,7 +344,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
- if (novmerge || (dma_addr != dma_next)) {
+ if (novmerge || (dma_addr != dma_next) ||
+ (outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -452,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
- unsigned long start_index, end_index;
- unsigned long entries_per_4g;
- unsigned long index;
static int welcomed = 0;
struct page *page;
@@ -476,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
+ unsigned long index;
unsigned long tceval;
unsigned long tcecount = 0;
@@ -506,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
- /*
- * DMA cannot cross 4 GB boundary. Mark last entry of each 4
- * GB chunk as reserved.
- */
- if (protect4gb) {
- entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
-
- /* Mark the last bit before a 4GB boundary as used */
- start_index = tbl->it_offset | (entries_per_4g - 1);
- start_index -= tbl->it_offset;
-
- end_index = tbl->it_size;
-
- for (index = start_index; index < end_index - 1; index += entries_per_4g)
- __set_bit(index, tbl->it_map);
- }
-
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
@@ -570,9 +552,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
-dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
- size_t size, unsigned long mask,
- enum dma_data_direction direction)
+dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, size_t size, unsigned long mask,
+ enum dma_data_direction direction)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
@@ -589,7 +571,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
((unsigned long)vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
@@ -621,8 +603,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
-void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
+void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
@@ -656,7 +639,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
/* Set up tces to cover the allocated range */
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
- mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 76b862bd1fe9..61dd17449ddc 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -36,7 +36,8 @@ static struct legacy_serial_info {
static struct __initdata of_device_id parents[] = {
{.type = "soc",},
{.type = "tsi-bridge",},
- {.type = "opb", .compatible = "ibm,opb",},
+ {.type = "opb", },
+ {.compatible = "ibm,opb",},
{.compatible = "simple-bus",},
{.compatible = "wrs,epld-localbus",},
};
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f0bad7070fb5..f98867252ee2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -176,7 +176,7 @@ static void __devinit vio_dev_release(struct device *dev)
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
-struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
+struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
const unsigned int *unit_address;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e8122447f019..c7d7bd43a251 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -483,7 +483,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
*/
_tlbie(address, 0 /* 8xx doesn't care about PID */);
#endif
- if (!PageReserved(page)
+ /* The _PAGE_USER test should really be _PAGE_EXEC, but
+ * older glibc versions execute some code from no-exec
+ * pages, which for now we are supporting. If exec-only
+ * pages are ever implemented, this will have to change.
+ */
+ if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
&& !test_bit(PG_arch_1, &page->flags)) {
if (vma->vm_mm == current->active_mm) {
__flush_dcache_icache((void *) address);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 64488723162a..f80f90c4d58b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -86,7 +86,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
@@ -123,7 +123,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(pte_t *pte)
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
@@ -131,7 +131,7 @@ void pte_free_kernel(pte_t *pte)
free_page((unsigned long)pte);
}
-void pte_free(struct page *ptepage)
+void pte_free(struct mm_struct *mm, struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 3fce6b375dbc..7d3018751988 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -134,13 +134,12 @@ static void __init mpc8272_ads_setup_arch(void)
}
bcsr = of_iomap(np, 0);
+ of_node_put(np);
if (!bcsr) {
printk(KERN_ERR "Cannot map BCSR registers\n");
return;
}
- of_node_put(np);
-
clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
setbits32(&bcsr[1], BCSR1_FETH_RST);
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index 68196e349994..e1dceeec4994 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -130,13 +130,12 @@ static void __init pq2fads_setup_arch(void)
}
bcsr = of_iomap(np, 0);
+ of_node_put(np);
if (!bcsr) {
printk(KERN_ERR "Cannot map BCSR registers\n");
return;
}
- of_node_put(np);
-
/* Enable the serial and ethernet ports */
clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3a963b4a9be0..2f169991896d 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -54,6 +54,13 @@ config SPU_FS_64K_LS
uses 4K pages. This can improve performances of applications
using multiple SPEs by lowering the TLB pressure on them.
+config SPU_TRACE
+ tristate "SPU event tracing support"
+ depends on SPU_FS && MARKERS
+ help
+ This option allows reading a trace of spu-related events through
+ the sputrace file in procfs.
+
config SPU_BASE
bool
default n
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 095988f13bf4..d95e71dee91f 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
-#include <linux/reboot.h>
+#include <linux/of_platform.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
@@ -65,14 +65,12 @@
struct axon_msic {
struct irq_host *irq_host;
- __le32 *fifo;
+ __le32 *fifo_virt;
+ dma_addr_t fifo_phys;
dcr_host_t dcr_host;
- struct list_head list;
u32 read_offset;
};
-static LIST_HEAD(axon_msic_list);
-
static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
{
pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
@@ -94,7 +92,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
while (msic->read_offset != write_offset) {
idx = msic->read_offset / sizeof(__le32);
- msi = le32_to_cpu(msic->fifo[idx]);
+ msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
pr_debug("axon_msi: woff %x roff %x msi %x\n",
@@ -139,6 +137,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
tmp = dn;
dn = of_find_node_by_phandle(*ph);
+ of_node_put(tmp);
if (!dn) {
dev_dbg(&dev->dev,
"axon_msi: msi-translator doesn't point to a node\n");
@@ -156,7 +155,6 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
out_error:
of_node_put(dn);
- of_node_put(tmp);
return msic;
}
@@ -292,30 +290,24 @@ static struct irq_host_ops msic_host_ops = {
.map = msic_host_map,
};
-static int axon_msi_notify_reboot(struct notifier_block *nb,
- unsigned long code, void *data)
+static int axon_msi_shutdown(struct of_device *device)
{
- struct axon_msic *msic;
+ struct axon_msic *msic = device->dev.platform_data;
u32 tmp;
- list_for_each_entry(msic, &axon_msic_list, list) {
- pr_debug("axon_msi: disabling %s\n",
- msic->irq_host->of_node->full_name);
- tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
- tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
- msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
- }
+ pr_debug("axon_msi: disabling %s\n",
+ msic->irq_host->of_node->full_name);
+ tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
+ tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
+ msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
return 0;
}
-static struct notifier_block axon_msi_reboot_notifier = {
- .notifier_call = axon_msi_notify_reboot
-};
-
-static int axon_msi_setup_one(struct device_node *dn)
+static int axon_msi_probe(struct of_device *device,
+ const struct of_device_id *device_id)
{
- struct page *page;
+ struct device_node *dn = device->node;
struct axon_msic *msic;
unsigned int virq;
int dcr_base, dcr_len;
@@ -346,16 +338,14 @@ static int axon_msi_setup_one(struct device_node *dn)
goto out_free_msic;
}
- page = alloc_pages_node(of_node_to_nid(dn), GFP_KERNEL,
- get_order(MSIC_FIFO_SIZE_BYTES));
- if (!page) {
+ msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
+ &msic->fifo_phys, GFP_KERNEL);
+ if (!msic->fifo_virt) {
printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
dn->full_name);
goto out_free_msic;
}
- msic->fifo = page_address(page);
-
msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP,
NR_IRQS, &msic_host_ops, 0);
if (!msic->irq_host) {
@@ -378,14 +368,18 @@ static int axon_msi_setup_one(struct device_node *dn)
pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
- msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, (u64)msic->fifo >> 32);
+ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
- (u64)msic->fifo & 0xFFFFFFFF);
+ msic->fifo_phys & 0xFFFFFFFF);
msic_dcr_write(msic, MSIC_CTRL_REG,
MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
MSIC_CTRL_FIFO_SIZE);
- list_add(&msic->list, &axon_msic_list);
+ device->dev.platform_data = msic;
+
+ ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
+ ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
+ ppc_md.msi_check_device = axon_msi_check_device;
printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
@@ -394,7 +388,8 @@ static int axon_msi_setup_one(struct device_node *dn)
out_free_host:
kfree(msic->irq_host);
out_free_fifo:
- __free_pages(virt_to_page(msic->fifo), get_order(MSIC_FIFO_SIZE_BYTES));
+ dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
+ msic->fifo_phys);
out_free_msic:
kfree(msic);
out:
@@ -402,28 +397,24 @@ out:
return -1;
}
-static int axon_msi_init(void)
-{
- struct device_node *dn;
- int found = 0;
-
- pr_debug("axon_msi: initialising ...\n");
-
- for_each_compatible_node(dn, NULL, "ibm,axon-msic") {
- if (axon_msi_setup_one(dn) == 0)
- found++;
- }
-
- if (found) {
- ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
- ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
- ppc_md.msi_check_device = axon_msi_check_device;
-
- register_reboot_notifier(&axon_msi_reboot_notifier);
+static const struct of_device_id axon_msi_device_id[] = {
+ {
+ .compatible = "ibm,axon-msic"
+ },
+ {}
+};
- pr_debug("axon_msi: registered callbacks!\n");
- }
+static struct of_platform_driver axon_msi_driver = {
+ .match_table = axon_msi_device_id,
+ .probe = axon_msi_probe,
+ .shutdown = axon_msi_shutdown,
+ .driver = {
+ .name = "axon-msi"
+ },
+};
- return 0;
+static int __init axon_msi_init(void)
+{
+ return of_register_platform_driver(&axon_msi_driver);
}
-arch_initcall(axon_msi_init);
+subsys_initcall(axon_msi_init);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index e6534b519c9a..a7f609b3b876 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -98,7 +98,7 @@ static int __init cell_publish_devices(void)
}
return 0;
}
-machine_device_initcall(cell, cell_publish_devices);
+machine_subsys_initcall(cell, cell_publish_devices);
static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index d3a349fb42e5..99610a6361f2 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -4,6 +4,8 @@ spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
+obj-$(CONFIG_SPU_TRACE) += sputrace.o
+
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
SPU_CC := $(SPU_CROSS)gcc
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3fcd06418b01..1018acd1746b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -29,6 +29,7 @@
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
+#include <linux/marker.h>
#include <asm/io.h>
#include <asm/semaphore.h>
@@ -358,6 +359,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
+ spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
+
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= ps_size)
return NOPFN_SIGBUS;
@@ -375,11 +378,14 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
up_read(&current->mm->mmap_sem);
+ spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+ spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+ spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
}
spu_release(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index c0e968a4c211..90784c029f25 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -322,7 +322,7 @@ static struct spu_context *
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
struct file *filp)
{
- struct spu_context *tmp, *neighbor;
+ struct spu_context *tmp, *neighbor, *err;
int count, node;
int aff_supp;
@@ -354,11 +354,15 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
!list_entry(neighbor->aff_list.next, struct spu_context,
- aff_list)->aff_head)
- return ERR_PTR(-EEXIST);
+ aff_list)->aff_head) {
+ err = ERR_PTR(-EEXIST);
+ goto out_put_neighbor;
+ }
- if (gang != neighbor->gang)
- return ERR_PTR(-EINVAL);
+ if (gang != neighbor->gang) {
+ err = ERR_PTR(-EINVAL);
+ goto out_put_neighbor;
+ }
count = 1;
list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
@@ -372,11 +376,17 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
break;
}
- if (node == MAX_NUMNODES)
- return ERR_PTR(-EEXIST);
+ if (node == MAX_NUMNODES) {
+ err = ERR_PTR(-EEXIST);
+ goto out_put_neighbor;
+ }
}
return neighbor;
+
+out_put_neighbor:
+ put_spu_context(neighbor);
+ return err;
}
static void
@@ -454,9 +464,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if (ret)
goto out_aff_unlock;
- if (affinity)
+ if (affinity) {
spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
neighbor);
+ if (neighbor)
+ put_spu_context(neighbor);
+ }
/*
* get references for dget and mntget, will be released
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c01a09da1e56..b4814c740d8a 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -410,8 +410,11 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
* since we have TIF_SINGLESTEP set, thus the kernel will do
* it upon return from the syscall anyawy
*/
- if ((status & SPU_STATUS_STOPPED_BY_STOP)
- && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+ if (unlikely(status & SPU_STATUS_SINGLE_STEP))
+ ret = -ERESTARTSYS;
+
+ else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
+ && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
force_sig(SIGTRAP, current);
ret = -ERESTARTSYS;
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 00d914232af1..5915343e2599 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -39,6 +39,7 @@
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/marker.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -216,8 +217,8 @@ void do_notify_spus_active(void)
*/
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
- spu->number, spu->node);
+ spu_context_trace(spu_bind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (ctx->flags & SPU_CREATE_NOSCHED)
@@ -399,8 +400,8 @@ static int has_affinity(struct spu_context *ctx)
*/
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
- spu->pid, spu->number, spu->node);
+ spu_context_trace(spu_unbind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (spu->ctx->flags & SPU_CREATE_NOSCHED)
@@ -528,6 +529,8 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
struct spu *spu, *aff_ref_spu;
int node, n;
+ spu_context_nospu_trace(spu_get_idle__enter, ctx);
+
if (ctx->gang) {
mutex_lock(&ctx->gang->aff_mutex);
if (has_affinity(ctx)) {
@@ -546,8 +549,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
ctx->gang->aff_ref_spu = NULL;
mutex_unlock(&ctx->gang->aff_mutex);
-
- return NULL;
+ goto not_found;
}
mutex_unlock(&ctx->gang->aff_mutex);
}
@@ -565,12 +567,14 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
mutex_unlock(&cbe_spu_info[node].list_mutex);
}
+ not_found:
+ spu_context_nospu_trace(spu_get_idle__not_found, ctx);
return NULL;
found:
spu->alloc_state = SPU_USED;
mutex_unlock(&cbe_spu_info[node].list_mutex);
- pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_context_trace(spu_get_idle__found, ctx, spu);
spu_init_channels(spu);
return spu;
}
@@ -587,6 +591,8 @@ static struct spu *find_victim(struct spu_context *ctx)
struct spu *spu;
int node, n;
+ spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
@@ -640,6 +646,8 @@ static struct spu *find_victim(struct spu_context *ctx)
goto restart;
}
+ spu_context_trace(__spu_deactivate__unload, ctx, spu);
+
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
spu_unbind_context(spu, victim);
@@ -822,6 +830,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
*/
void spu_deactivate(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_deactivate__enter, ctx);
__spu_deactivate(ctx, 1, MAX_PRIO);
}
@@ -835,6 +844,7 @@ void spu_deactivate(struct spu_context *ctx)
*/
void spu_yield(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_yield__enter, ctx);
if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
mutex_lock(&ctx->state_mutex);
__spu_deactivate(ctx, 0, MAX_PRIO);
@@ -864,11 +874,15 @@ static noinline void spusched_tick(struct spu_context *ctx)
goto out;
spu = ctx->spu;
+
+ spu_context_trace(spusched_tick__preempt, ctx, spu);
+
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
spu_unschedule(spu, ctx);
spu_add_to_rq(ctx);
} else {
+ spu_context_nospu_trace(spusched_tick__newslice, ctx);
ctx->time_slice++;
}
out:
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0e114038ea6f..795a1b52538b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -325,4 +325,9 @@ extern void spu_free_lscsa(struct spu_state *csa);
extern void spuctx_switch_state(struct spu_context *ctx,
enum spu_utilization_state new_state);
+#define spu_context_trace(name, ctx, spu) \
+ trace_mark(name, "%p %p", ctx, spu);
+#define spu_context_nospu_trace(name, ctx) \
+ trace_mark(name, "%p", ctx);
+
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
new file mode 100644
index 000000000000..2b1953f6f12e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH
+ * Released under GPL v2.
+ *
+ * Partially based on net/ipv4/tcp_probe.c.
+ *
+ * Simple tracing facility for spu contexts.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/proc_fs.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include "spufs.h"
+
+struct spu_probe {
+ const char *name;
+ const char *format;
+ marker_probe_func *probe_func;
+};
+
+struct sputrace {
+ ktime_t tstamp;
+ int owner_tid; /* owner */
+ int curr_tid;
+ const char *name;
+ int number;
+};
+
+static int bufsize __read_mostly = 16384;
+MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)");
+module_param(bufsize, int, 0);
+
+
+static DEFINE_SPINLOCK(sputrace_lock);
+static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
+static ktime_t sputrace_start;
+static unsigned long sputrace_head, sputrace_tail;
+static struct sputrace *sputrace_log;
+
+static int sputrace_used(void)
+{
+ return (sputrace_head - sputrace_tail) % bufsize;
+}
+
+static inline int sputrace_avail(void)
+{
+ return bufsize - sputrace_used();
+}
+
+static int sputrace_sprint(char *tbuf, int n)
+{
+ const struct sputrace *t = sputrace_log + sputrace_tail % bufsize;
+ struct timespec tv =
+ ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
+
+ return snprintf(tbuf, n,
+ "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n",
+ (unsigned long) tv.tv_sec,
+ (unsigned long) tv.tv_nsec,
+ t->owner_tid,
+ t->name,
+ t->curr_tid,
+ t->number);
+}
+
+static ssize_t sputrace_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int error = 0, cnt = 0;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ while (cnt < len) {
+ char tbuf[128];
+ int width;
+
+ error = wait_event_interruptible(sputrace_wait,
+ sputrace_used() > 0);
+ if (error)
+ break;
+
+ spin_lock(&sputrace_lock);
+ if (sputrace_head == sputrace_tail) {
+ spin_unlock(&sputrace_lock);
+ continue;
+ }
+
+ width = sputrace_sprint(tbuf, sizeof(tbuf));
+ if (width < len)
+ sputrace_tail = (sputrace_tail + 1) % bufsize;
+ spin_unlock(&sputrace_lock);
+
+ if (width >= len)
+ break;
+
+ error = copy_to_user(buf + cnt, tbuf, width);
+ if (error)
+ break;
+ cnt += width;
+ }
+
+ return cnt == 0 ? error : cnt;
+}
+
+static int sputrace_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&sputrace_lock);
+ sputrace_head = sputrace_tail = 0;
+ sputrace_start = ktime_get();
+ spin_unlock(&sputrace_lock);
+
+ return 0;
+}
+
+static const struct file_operations sputrace_fops = {
+ .owner = THIS_MODULE,
+ .open = sputrace_open,
+ .read = sputrace_read,
+};
+
+static void sputrace_log_item(const char *name, struct spu_context *ctx,
+ struct spu *spu)
+{
+ spin_lock(&sputrace_lock);
+ if (sputrace_avail() > 1) {
+ struct sputrace *t = sputrace_log + sputrace_head;
+
+ t->tstamp = ktime_get();
+ t->owner_tid = ctx->tid;
+ t->name = name;
+ t->curr_tid = current->pid;
+ t->number = spu ? spu->number : -1;
+
+ sputrace_head = (sputrace_head + 1) % bufsize;
+ } else {
+ printk(KERN_WARNING
+ "sputrace: lost samples due to full buffer.\n");
+ }
+ spin_unlock(&sputrace_lock);
+
+ wake_up(&sputrace_wait);
+}
+
+static void spu_context_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+ struct spu *spu;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+ spu = va_arg(ap, struct spu *);
+
+ sputrace_log_item(p->name, ctx, spu);
+ va_end(ap);
+}
+
+static void spu_context_nospu_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+
+ sputrace_log_item(p->name, ctx, NULL);
+ va_end(ap);
+}
+
+struct spu_probe spu_probes[] = {
+ { "spu_bind_context__enter", "%p %p", spu_context_event },
+ { "spu_unbind_context__enter", "%p %p", spu_context_event },
+ { "spu_get_idle__enter", "%p", spu_context_nospu_event },
+ { "spu_get_idle__found", "%p %p", spu_context_event },
+ { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
+ { "spu_find_victim__enter", "%p", spu_context_nospu_event },
+ { "spusched_tick__preempt", "%p %p", spu_context_event },
+ { "spusched_tick__newslice", "%p", spu_context_nospu_event },
+ { "spu_yield__enter", "%p", spu_context_nospu_event },
+ { "spu_deactivate__enter", "%p", spu_context_nospu_event },
+ { "__spu_deactivate__unload", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
+ { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
+ { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
+};
+
+static int __init sputrace_init(void)
+{
+ struct proc_dir_entry *entry;
+ int i, error = -ENOMEM;
+
+ sputrace_log = kcalloc(sizeof(struct sputrace),
+ bufsize, GFP_KERNEL);
+ if (!sputrace_log)
+ goto out;
+
+ entry = create_proc_entry("sputrace", S_IRUSR, NULL);
+ if (!entry)
+ goto out_free_log;
+ entry->proc_fops = &sputrace_fops;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
+ struct spu_probe *p = &spu_probes[i];
+
+ error = marker_probe_register(p->name, p->format,
+ p->probe_func, p);
+ if (error)
+ printk(KERN_INFO "Unable to register probe %s\n",
+ p->name);
+
+ error = marker_arm(p->name);
+ if (error)
+ printk(KERN_INFO "Unable to arm probe %s\n", p->name);
+ }
+
+ return 0;
+
+out_free_log:
+ kfree(sputrace_log);
+out:
+ return -ENOMEM;
+}
+
+static void __exit sputrace_exit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
+ marker_probe_unregister(spu_probes[i].name);
+
+ remove_proc_entry("sputrace", NULL);
+ kfree(sputrace_log);
+}
+
+module_init(sputrace_init);
+module_exit(sputrace_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 6a0c6f6675cd..11fa3c772ed5 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -199,7 +199,7 @@ static struct iommu_table vio_iommu_table;
void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(&vio_iommu_table, size, dma_handle,
+ return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
DMA_32BIT_MASK, flag, -1);
}
EXPORT_SYMBOL_GPL(iseries_hv_alloc);
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(iseries_hv_free);
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
enum dma_data_direction direction)
{
- return iommu_map_single(&vio_iommu_table, vaddr, size,
+ return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
DMA_32BIT_MASK, direction);
}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index c02f8742c54d..2800fced8c7c 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -167,6 +167,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
if ((child = of_get_next_child(np, NULL))) {
of_node_put(child);
+ of_node_put(parent);
return -EBUSY;
}
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 0e74a4bd9827..5d2d5522ef41 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -174,15 +174,19 @@ int mpc8xx_pic_init(void)
goto out;
siu_reg = ioremap(res.start, res.end - res.start + 1);
- if (siu_reg == NULL)
- return -EINVAL;
+ if (siu_reg == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
- mpc8xx_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
+ mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
64, &mpc8xx_pic_host_ops, 64);
if (mpc8xx_pic_host == NULL) {
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
ret = -ENOMEM;
+ goto out;
}
+ return 0;
out:
of_node_put(np);