summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/ia64/Kconfig19
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c9
-rw-r--r--arch/ia64/include/asm/io.h24
-rw-r--r--arch/ia64/include/asm/machvec.h22
-rw-r--r--arch/ia64/include/asm/meminit.h1
-rw-r--r--arch/ia64/include/asm/sal.h15
-rw-r--r--arch/ia64/include/asm/sn/sn_sal.h45
-rw-r--r--arch/ia64/kernel/acpi.c29
-rw-r--r--arch/ia64/kernel/pci-dma.c2
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--arch/ia64/mm/discontig.c1
-rw-r--r--arch/ia64/uv/kernel/setup.c6
-rw-r--r--arch/x86/oprofile/op_model_ppro.c9
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/mmu.c13
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/message/fusion/mptlan.c108
-rw-r--r--drivers/oprofile/event_buffer.c6
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/rom.c6
-rw-r--r--drivers/staging/Kconfig20
-rw-r--r--drivers/staging/usbip/Kconfig2
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--fs/ext4/mballoc.c1
-rw-r--r--fs/ext4/super.c24
-rw-r--r--fs/jbd/checkpoint.c31
-rw-r--r--fs/jbd2/checkpoint.c32
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--mm/vmalloc.c7
34 files changed, 248 insertions, 259 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index e6ab550bceb3..8977d99987cb 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -21,7 +21,7 @@ config OPROFILE_IBS
Instruction-Based Sampling (IBS) is a new profiling
technique that provides rich, precise program performance
information. IBS is introduced by AMD Family10h processors
- (AMD Opteron Quad-Core processor “Barcelona”) to overcome
+ (AMD Opteron Quad-Core processor "Barcelona") to overcome
the limitations of conventional performance counter
sampling.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 27eec71429b0..6bd91ed7cd03 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -148,6 +148,7 @@ config IA64_GENERIC
select ACPI_NUMA
select SWIOTLB
select PCI_MSI
+ select DMAR
help
This selects the system type of your hardware. A "generic" kernel
will run on any supported IA-64 system. However, if you configure
@@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt"
endmenu
-menu "Power management and ACPI"
+menu "Power management and ACPI options"
source "kernel/power/Kconfig"
@@ -641,6 +642,8 @@ source "net/Kconfig"
source "drivers/Kconfig"
+source "arch/ia64/hp/sim/Kconfig"
+
config MSPEC
tristate "Memory special operations driver"
depends on IA64
@@ -652,6 +655,12 @@ config MSPEC
source "fs/Kconfig"
+source "arch/ia64/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
source "arch/ia64/kvm/Kconfig"
source "lib/Kconfig"
@@ -678,11 +687,3 @@ config IRQ_PER_CPU
config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
-
-source "arch/ia64/hp/sim/Kconfig"
-
-source "arch/ia64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 88b6e6f3fd88..2769dbfd03bf 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,19 +13,12 @@
*/
#include <linux/device.h>
+#include <linux/swiotlb.h>
#include <asm/machvec.h>
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
-extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
-extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
-extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
-extern ia64_mv_dma_supported swiotlb_dma_supported;
-extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
/* hwiommu declarations & definitions: */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 7f257507cd86..0d9d16e2d949 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n);
# endif /* __KERNEL__ */
-/*
- * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
- * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
- * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
- * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
- * over BIO-level virtual merging.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#if 1
-#define BIO_VMERGE_BOUNDARY 0
-#else
-/*
- * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
- * replaced by dma_merge_mask() or something of that sort. Note: the only way
- * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
- * expanded into:
- *
- * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
- *
- * which is precisely what we want.
- */
-#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
-#endif
-
#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 1ea28bcee33b..59c17e446683 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,6 +11,7 @@
#define _ASM_IA64_MACHVEC_H
#include <linux/types.h>
+#include <linux/swiotlb.h>
/* forward declarations: */
struct device;
@@ -298,27 +299,6 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# endif /* CONFIG_IA64_GENERIC */
/*
- * Declare default routines which aren't declared anywhere else:
- */
-extern ia64_mv_dma_init swiotlb_init;
-extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
-extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
-extern ia64_mv_dma_map_single swiotlb_map_single;
-extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
-extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
-extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
-extern ia64_mv_dma_map_sg swiotlb_map_sg;
-extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
-extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
-extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
-extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
-extern ia64_mv_dma_supported swiotlb_dma_supported;
-
-/*
* Define default versions so we can extend machvec for new platforms without having
* to update the machvec files for all existing platforms.
*/
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 6bc96ee54327..c0cea375620a 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
*/
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
-#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
#ifdef CONFIG_NUMA
extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
index ea310c0812aa..966797a97c94 100644
--- a/arch/ia64/include/asm/sal.h
+++ b/arch/ia64/include/asm/sal.h
@@ -337,11 +337,24 @@ typedef struct sal_log_record_header {
#define sal_log_severity_fatal 1
#define sal_log_severity_corrected 2
+/*
+ * Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3
+ * Error Section Error_Recovery_Info Field Definition.
+ */
+#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */
+#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */
+#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */
+#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */
+#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */
+#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */
+#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */
+
/* Definition of log section header structures */
typedef struct sal_log_sec_header {
efi_guid_t guid; /* Unique Section ID */
sal_log_revision_t revision; /* Major and Minor revision of Section */
- u16 reserved;
+ u8 error_recovery_info; /* Platform error recovery status */
+ u8 reserved;
u32 len; /* Section length */
} sal_log_section_hdr_t;
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h
index 57e649d388b8..e310fc0135dc 100644
--- a/arch/ia64/include/asm/sn/sn_sal.h
+++ b/arch/ia64/include/asm/sn/sn_sal.h
@@ -90,6 +90,8 @@
#define SN_SAL_SET_CPU_NUMBER 0x02000068
#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
+#define SN_SAL_WATCHLIST_ALLOC 0x02000070
+#define SN_SAL_WATCHLIST_FREE 0x02000071
/*
* Service-specific constants
@@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void)
SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
return rv.status;
}
+
+union sn_watchlist_u {
+ u64 val;
+ struct {
+ u64 blade : 16,
+ size : 32,
+ filler : 16;
+ };
+};
+
+static inline int
+sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
+ unsigned long *intr_mmr_offset)
+{
+ struct ia64_sal_retval rv;
+ unsigned long addr;
+ union sn_watchlist_u size_blade;
+ int watchlist;
+
+ addr = (unsigned long)mq;
+ size_blade.size = mq_size;
+ size_blade.blade = blade;
+
+ /*
+ * bios returns watchlist number or negative error number.
+ */
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
+ size_blade.val, (u64)intr_mmr_offset,
+ (u64)&watchlist, 0, 0, 0);
+ if (rv.status < 0)
+ return rv.status;
+
+ return watchlist;
+}
+
+static inline int
+sn_mq_watchlist_free(int blade, int watchlist_num)
+{
+ struct ia64_sal_retval rv;
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
+ watchlist_num, 0, 0, 0, 0, 0);
+ return rv.status;
+}
#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 0635015d0aaa..bd7acc71e8a9 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0;
}
+int __init early_acpi_boot_init(void)
+{
+ int ret;
+
+ /*
+ * do a partial walk of MADT to determine how many CPUs
+ * we have including offline CPUs
+ */
+ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
+ printk(KERN_ERR PREFIX "Can't find MADT\n");
+ return 0;
+ }
+
+ ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
+ acpi_parse_lsapic, NR_CPUS);
+ if (ret < 1)
+ printk(KERN_ERR PREFIX
+ "Error parsing MADT - no LAPIC entries\n");
+
+ return 0;
+}
+
+
+
int __init acpi_boot_init(void)
{
@@ -701,11 +725,6 @@ int __init acpi_boot_init(void)
printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
- < 1)
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no LAPIC entries\n");
-
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
< 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 031abbf9c875..dbdb778efa05 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -12,13 +12,11 @@
#include <asm/machvec.h>
#include <linux/dma-mapping.h>
-#include <asm/machvec.h>
#include <asm/system.h>
#ifdef CONFIG_DMAR
#include <linux/kernel.h>
-#include <linux/string.h>
#include <asm/page.h>
#include <asm/iommu.h>
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ae7911702bf8..865af27c7737 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -359,7 +359,7 @@ reserve_memory (void)
}
#endif
-#ifdef CONFIG_CRASH_KERNEL
+#ifdef CONFIG_CRASH_DUMP
if (reserve_elfcorehdr(&rsvd_region[n].start,
&rsvd_region[n].end) == 0)
n++;
@@ -561,8 +561,12 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
+ early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+#endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
@@ -853,9 +857,6 @@ void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
- prefill_possible_map();
-#endif
}
/*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d8c5fcd89e5b..d85ba98d9008 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
#endif
start = GRANULEROUNDDOWN(start);
- start = ORDERROUNDDOWN(start);
end = GRANULEROUNDUP(end);
mem_data[node].max_pfn = max(mem_data[node].max_pfn,
end >> PAGE_SHIFT);
diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c
index cf5f28ae96c4..7a5ae633198b 100644
--- a/arch/ia64/uv/kernel/setup.c
+++ b/arch/ia64/uv/kernel/setup.c
@@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
#ifdef CONFIG_IA64_SGI_UV
int sn_prom_type;
+long sn_partition_id;
+EXPORT_SYMBOL(sn_partition_id);
+long sn_coherency_id;
+EXPORT_SYMBOL_GPL(sn_coherency_id);
+long sn_region_size;
+EXPORT_SYMBOL(sn_region_size);
#endif
struct redir_addr {
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 0620d6d45f7d..3f1b81a83e2e 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -27,8 +27,7 @@ static int num_counters = 2;
static int counter_width = 32;
#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
-#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
+#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
static int ppro_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
for (i = 0 ; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CTR_READ(low, high, msrs, i);
- if (CTR_OVERFLOWED(low)) {
+ rdmsrl(msrs->counters[i].addr, val);
+ if (CTR_OVERFLOWED(val)) {
oprofile_add_sample(regs, i);
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b61534c7a4c4..5e4686d70f62 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
if (PagePinned(virt_to_page(mm->pgd))) {
SetPagePinned(page);
+ vm_unmap_aliases();
if (!PageHighMem(page)) {
make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
- } else
+ } else {
/* make sure there are no stray mappings of
this page */
kmap_flush_unused();
- vm_unmap_aliases();
+ }
}
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aba77b2b7d18..688936044dc9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
+ vm_unmap_aliases();
+
xen_mc_batch();
- if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
- /* re-enable interrupts for kmap_flush_unused */
+ if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
+ /* re-enable interrupts for flushing */
xen_mc_issue(0);
+
kmap_flush_unused();
- vm_unmap_aliases();
+
xen_mc_batch();
}
@@ -874,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
#else /* CONFIG_X86_32 */
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is pinnable */
- xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
+ xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
PT_PMD);
#endif
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
@@ -991,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is unpinned */
- xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
+ xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
PT_PMD);
#endif
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3e526b6d00cb..8daf4793ac32 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -81,9 +81,9 @@ static void dmi_table(u8 *buf, int len, int num,
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
- * We want to know the total length (formated area and strings)
- * before decoding to make sure we won't run off the table in
- * dmi_decode or dmi_string
+ * We want to know the total length (formatted area and
+ * strings) before decoding to make sure we won't run off the
+ * table in dmi_decode or dmi_string
*/
data += dm->length;
while ((data - buf < len - 1) && (data[0] || data[1]))
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a1abf95cf751..603ffd008c73 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -77,12 +77,6 @@ MODULE_VERSION(my_VERSION);
* Fusion MPT LAN private structures
*/
-struct NAA_Hosed {
- u16 NAA;
- u8 ieee[FC_ALEN];
- struct NAA_Hosed *next;
-};
-
struct BufferControl {
struct sk_buff *skb;
dma_addr_t dma;
@@ -159,11 +153,6 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u32 max_buckets_out = 127;
static u32 tx_max_out_p = 127 - 16;
-#ifdef QLOGIC_NAA_WORKAROUND
-static struct NAA_Hosed *mpt_bad_naa = NULL;
-DEFINE_RWLOCK(bad_naa_lock);
-#endif
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* lan_reply - Handle all data sent from the hardware.
@@ -780,30 +769,6 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
// ctx, skb, skb->data));
mac = skb_mac_header(skb);
-#ifdef QLOGIC_NAA_WORKAROUND
-{
- struct NAA_Hosed *nh;
-
- /* Munge the NAA for Tx packets to QLogic boards, which don't follow
- RFC 2625. The longer I look at this, the more my opinion of Qlogic
- drops. */
- read_lock_irq(&bad_naa_lock);
- for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
- if ((nh->ieee[0] == mac[0]) &&
- (nh->ieee[1] == mac[1]) &&
- (nh->ieee[2] == mac[2]) &&
- (nh->ieee[3] == mac[3]) &&
- (nh->ieee[4] == mac[4]) &&
- (nh->ieee[5] == mac[5])) {
- cur_naa = nh->NAA;
- dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
- "= %04x.\n", cur_naa));
- break;
- }
- }
- read_unlock_irq(&bad_naa_lock);
-}
-#endif
pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
(mac[0] << 8) |
@@ -1572,79 +1537,6 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
fcllc = (struct fcllc *)skb->data;
-#ifdef QLOGIC_NAA_WORKAROUND
-{
- u16 source_naa = fch->stype, found = 0;
-
- /* Workaround for QLogic not following RFC 2625 in regards to the NAA
- value. */
-
- if ((source_naa & 0xF000) == 0)
- source_naa = swab16(source_naa);
-
- if (fcllc->ethertype == htons(ETH_P_ARP))
- dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
- "%04x.\n", source_naa));
-
- if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
- ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
- struct NAA_Hosed *nh, *prevnh;
- int i;
-
- dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
- "system with non-RFC 2625 NAA value (%04x).\n",
- source_naa));
-
- write_lock_irq(&bad_naa_lock);
- for (prevnh = nh = mpt_bad_naa; nh != NULL;
- prevnh=nh, nh=nh->next) {
- if ((nh->ieee[0] == fch->saddr[0]) &&
- (nh->ieee[1] == fch->saddr[1]) &&
- (nh->ieee[2] == fch->saddr[2]) &&
- (nh->ieee[3] == fch->saddr[3]) &&
- (nh->ieee[4] == fch->saddr[4]) &&
- (nh->ieee[5] == fch->saddr[5])) {
- found = 1;
- dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
- "q/Rep w/ bad NAA from system already"
- " in DB.\n"));
- break;
- }
- }
-
- if ((!found) && (nh == NULL)) {
-
- nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
- dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
- " bad NAA from system not yet in DB.\n"));
-
- if (nh != NULL) {
- nh->next = NULL;
- if (!mpt_bad_naa)
- mpt_bad_naa = nh;
- if (prevnh)
- prevnh->next = nh;
-
- nh->NAA = source_naa; /* Set the S_NAA value. */
- for (i = 0; i < FC_ALEN; i++)
- nh->ieee[i] = fch->saddr[i];
- dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
- "%02x:%02x with non-compliant S_NAA value.\n",
- fch->saddr[0], fch->saddr[1], fch->saddr[2],
- fch->saddr[3], fch->saddr[4],fch->saddr[5]));
- } else {
- printk (KERN_ERR "mptlan/type_trans: Unable to"
- " kmalloc a NAA_Hosed struct.\n");
- }
- } else if (!found) {
- printk (KERN_ERR "mptlan/type_trans: found not"
- " set, but nh isn't null. Evil "
- "funkiness abounds.\n");
- }
- write_unlock_irq(&bad_naa_lock);
- }
-}
-#endif
/* Strip the SNAP header from ARP packets since we don't
* pass them through to the 802.2/SNAP layers.
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index d962ba0dd87a..191a3202cecc 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -105,7 +105,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (test_and_set_bit(0, &buffer_opened))
+ if (test_and_set_bit_lock(0, &buffer_opened))
return -EBUSY;
/* Register as a user of dcookies
@@ -129,7 +129,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
fail:
dcookie_unregister(file->private_data);
out:
- clear_bit(0, &buffer_opened);
+ __clear_bit_unlock(0, &buffer_opened);
return err;
}
@@ -141,7 +141,7 @@ static int event_buffer_release(struct inode *inode, struct file *file)
dcookie_unregister(file->private_data);
buffer_pos = 0;
atomic_set(&buffer_ready, 0);
- clear_bit(0, &buffer_opened);
+ __clear_bit_unlock(0, &buffer_opened);
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 110022d78689..5d72866897a8 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -575,7 +575,7 @@ static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
- size = pci_resource_len(pdev, resno) >> PAGE_SHIFT;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
if (start < size && size - start >= nr)
return 1;
WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbf66ea8fd87..5049a47030ac 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1692,24 +1692,24 @@ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709S,
- quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709S,
+ quirk_brcm_570x_limit_vpd);
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 1f5f6143f35c..132a78159b60 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -100,7 +100,8 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size)
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
- * @return: kernel virtual pointer to image of ROM
+ *
+ * Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
@@ -167,7 +168,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
* pci_map_rom_copy - map a PCI ROM to kernel space, create a copy
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
- * @return: kernel virtual pointer to image of ROM
+ *
+ * Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e1654f59eb70..0a49cd788a75 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -21,7 +21,23 @@ menuconfig STAGING
If in doubt, say N here.
-if STAGING
+
+config STAGING_EXCLUDE_BUILD
+ bool "Exclude Staging drivers from being built"
+ default y
+ ---help---
+ Are you sure you really want to build the staging drivers?
+ They taint your kernel, don't live up to the normal Linux
+ kernel quality standards, are a bit crufty around the edges,
+ and might go off and kick your dog when you aren't paying
+ attention.
+
+ Say N here to be able to select and build the Staging drivers.
+ This option is primarily here to prevent them from being built
+ when selecting 'make allyesconfg' and 'make allmodconfig' so
+ don't be all that put off, your dog will be just fine.
+
+if !STAGING_EXCLUDE_BUILD
source "drivers/staging/et131x/Kconfig"
@@ -45,4 +61,4 @@ source "drivers/staging/at76_usb/Kconfig"
source "drivers/staging/poch/Kconfig"
-endif # STAGING
+endif # !STAGING_EXCLUDE_BUILD
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 7426235ccc44..217fb7e62c2f 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -1,6 +1,6 @@
config USB_IP_COMMON
tristate "USB IP support (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ depends on USB && NET && EXPERIMENTAL
default N
---help---
This enables pushing USB packets over IP to allow remote
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fe34d74cfb19..2a117e286e54 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -718,6 +718,8 @@ got:
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
free = ext4_free_blocks_after_init(sb, group, gdp);
gdp->bg_free_blocks_count = cpu_to_le16(free);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
+ gdp);
}
spin_unlock(sb_bgl_lock(sbi, group));
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8dbf6953845b..be21a5ae33cb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page,
unlock_page(page);
return 0;
}
+ /* now mark the buffer_heads as dirty and uptodate */
+ block_commit_write(page, 0, PAGE_CACHE_SIZE);
}
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
- return ext4_indirect_trans_blocks(inode, nrblocks, 0);
- return ext4_ext_index_trans_blocks(inode, nrblocks, 0);
+ return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
+ return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
+
/*
* Account for index blocks, block groups bitmaps and block group
* descriptor blocks if modify datablocks and index blocks
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index dfe17a134052..444ad998f72e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
else if (block >= (entry->start_blk + entry->count))
n = &(*n)->rb_right;
else {
+ ext4_unlock_group(sb, group);
ext4_error(sb, __func__,
"Double free of blocks %d (%d %d)\n",
block, entry->start_blk, entry->count);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 994859df010e..e4a241c65dbe 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
/* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
- ((sbi->s_es->s_reserved_gdt_blocks +1 ) <<
- EXT4_DESC_PER_BLOCK_BITS(sb))) /
- groups_per_flex;
+ ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
+ EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
sbi->s_flex_groups = kzalloc(flex_group_count *
sizeof(struct flex_groups), GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb)
/*
* Ext4 always journals updates to the superblock itself, so we don't
* have to propagate any other updates to the superblock on disk at this
- * point. Just start an async writeback to get the buffers on their way
- * to the disk.
- *
- * This implicitly triggers the writebehind on sync().
+ * point. (We can probably nuke this function altogether, and remove
+ * any mention to sb->s_dirt in all of fs/ext4; eventual cleanup...)
*/
-
static void ext4_write_super(struct super_block *sb)
{
if (mutex_trylock(&sb->s_lock) != 0)
@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb)
static int ext4_sync_fs(struct super_block *sb, int wait)
{
- tid_t target;
+ int ret = 0;
trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
sb->s_dirt = 0;
- if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
- if (wait)
- jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target);
- }
- return 0;
+ if (wait)
+ ret = ext4_force_commit(sb);
+ else
+ jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
+ return ret;
}
/*
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 1bd8d4acc6f2..61f32f3868cd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
*/
void __log_wait_for_space(journal_t *journal)
{
- int nblocks;
+ int nblocks, space_left;
assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
- * outstanding transactions there is nothing to checkpoint and
- * we can't make progress. Abort the journal in this case.
+ * transactions ready to be checkpointed, try to recover
+ * journal space by calling cleanup_journal_tail(), and if
+ * that doesn't work, by waiting for the currently committing
+ * transaction to complete. If there is absolutely no way
+ * to make progress, this is either a BUG or corrupted
+ * filesystem, so abort the journal and leave a stack
+ * trace for forensic evidence.
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
- if (__log_space_left(journal) < nblocks) {
+ space_left = __log_space_left(journal);
+ if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
+ tid_t tid = 0;
+ if (journal->j_committing_transaction)
+ tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
if (chkpt) {
log_do_checkpoint(journal);
+ } else if (cleanup_journal_tail(journal) == 0) {
+ /* We were able to recover space; yay! */
+ ;
+ } else if (tid) {
+ log_wait_commit(journal, tid);
} else {
- printk(KERN_ERR "%s: no transactions\n",
- __func__);
+ printk(KERN_ERR "%s: needed %d blocks and "
+ "only had %d space available\n",
+ __func__, nblocks, space_left);
+ printk(KERN_ERR "%s: no way to get more "
+ "journal space\n", __func__);
+ WARN_ON(1);
journal_abort(journal, 0);
}
-
spin_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9203c3332f17..9497718fe920 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -116,7 +116,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
*/
void __jbd2_log_wait_for_space(journal_t *journal)
{
- int nblocks;
+ int nblocks, space_left;
assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal);
@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal)
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
- * outstanding transactions there is nothing to checkpoint and
- * we can't make progress. Abort the journal in this case.
+ * transactions ready to be checkpointed, try to recover
+ * journal space by calling cleanup_journal_tail(), and if
+ * that doesn't work, by waiting for the currently committing
+ * transaction to complete. If there is absolutely no way
+ * to make progress, this is either a BUG or corrupted
+ * filesystem, so abort the journal and leave a stack
+ * trace for forensic evidence.
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
- if (__jbd2_log_space_left(journal) < nblocks) {
+ space_left = __jbd2_log_space_left(journal);
+ if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
+ tid_t tid = 0;
+ if (journal->j_committing_transaction)
+ tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
+ } else if (jbd2_cleanup_journal_tail(journal) == 0) {
+ /* We were able to recover space; yay! */
+ ;
+ } else if (tid) {
+ jbd2_log_wait_commit(journal, tid);
} else {
- printk(KERN_ERR "%s: no transactions\n",
- __func__);
+ printk(KERN_ERR "%s: needed %d blocks and "
+ "only had %d space available\n",
+ __func__, nblocks, space_left);
+ printk(KERN_ERR "%s: no way to get more "
+ "journal space in %s\n", __func__,
+ journal->j_devname);
+ WARN_ON(1);
jbd2_journal_abort(journal, 0);
}
-
spin_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 783de118de92..e70d657a19f8 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1089,6 +1089,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__func__);
+ jbd2_stats_proc_exit(journal);
kfree(journal);
return NULL;
}
@@ -1098,6 +1099,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
__func__);
+ jbd2_stats_proc_exit(journal);
kfree(journal);
return NULL;
}
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c62aff1..18546d8eb78e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(pfn); \
+ unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c75b82bda327..feb4657bb043 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1136,7 +1136,7 @@ static inline void pci_mmcfg_late_init(void) { }
#endif
#ifdef CONFIG_HAS_IOMEM
-static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar)
+static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
/*
* Make sure the BAR is actually a memory resource, not an IO resource
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 66fad3fc02b1..ba6b0f5f7fac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
+static bool vmap_initialized __read_mostly = false;
+
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
int cpu;
int flush = 0;
+ if (unlikely(!vmap_initialized))
+ return;
+
for_each_possible_cpu(cpu) {
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
struct vmap_block *vb;
@@ -942,6 +947,8 @@ void __init vmalloc_init(void)
INIT_LIST_HEAD(&vbq->dirty);
vbq->nr_dirty = 0;
}
+
+ vmap_initialized = true;
}
void unmap_kernel_range(unsigned long addr, unsigned long size)