diff options
Diffstat (limited to 'include')
32 files changed, 286 insertions, 82 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c33749f95b32..058129e9b04c 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -30,8 +30,7 @@ * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define atomic_read(v) (*(volatile int *)&(v)->counter) @@ -40,8 +39,7 @@ * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define atomic_set(v, i) (((v)->counter) = (i)) @@ -53,7 +51,6 @@ * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns the result - * Note that the guaranteed useful range of an atomic_t is only 24 bits. */ static inline int atomic_add_return(int i, atomic_t *v) { @@ -75,7 +72,6 @@ static inline int atomic_add_return(int i, atomic_t *v) * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns the result - * Note that the guaranteed useful range of an atomic_t is only 24 bits. */ static inline int atomic_sub_return(int i, atomic_t *v) { diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index 97e807c8c812..0232ccb76f2b 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -29,6 +29,9 @@ KMAP_D(16) KM_IRQ_PTE, KMAP_D(17) KM_NMI, KMAP_D(18) KM_NMI_PTE, KMAP_D(19) KM_KDB, +/* + * Remember to update debug_kmap_atomic() when adding new kmap types! + */ KMAP_D(20) KM_TYPE_NR }; diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index 3c80fd7e8b56..d53a67dff018 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h @@ -7,6 +7,9 @@ #ifndef __BIG_ENDIAN_BITFIELD #define __BIG_ENDIAN_BITFIELD #endif +#ifndef __BYTE_ORDER +#define __BYTE_ORDER __BIG_ENDIAN +#endif #include <linux/types.h> #include <linux/swab.h> diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index 83195fb82962..f7f8ad13adb6 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h @@ -7,6 +7,9 @@ #ifndef __LITTLE_ENDIAN_BITFIELD #define __LITTLE_ENDIAN_BITFIELD #endif +#ifndef __BYTE_ORDER +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif #include <linux/types.h> #include <linux/swab.h> diff --git a/include/linux/compaction.h b/include/linux/compaction.h new file mode 100644 index 000000000000..5ac51552d908 --- /dev/null +++ b/include/linux/compaction.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_COMPACTION_H +#define _LINUX_COMPACTION_H + +/* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was not possible or direct reclaim was more suitable */ +#define COMPACT_SKIPPED 0 +/* compaction should continue to another pageblock */ +#define COMPACT_CONTINUE 1 +/* direct compaction partially compacted a zone and there are suitable pages */ +#define COMPACT_PARTIAL 2 +/* The full zone was compacted */ +#define COMPACT_COMPLETE 3 + +#ifdef CONFIG_COMPACTION +extern int sysctl_compact_memory; +extern int sysctl_compaction_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_extfrag_threshold; +extern int sysctl_extfrag_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); + +extern int fragmentation_index(struct zone *zone, unsigned int order); +extern unsigned long try_to_compact_pages(struct zonelist *zonelist, + int order, gfp_t gfp_mask, nodemask_t *mask); + +/* Do not skip compaction more than 64 times */ +#define COMPACT_MAX_DEFER_SHIFT 6 + +/* + * Compaction is deferred when compaction fails to result in a page + * allocation success. 1 << compact_defer_limit compactions are skipped up + * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT + */ +static inline void defer_compaction(struct zone *zone) +{ + zone->compact_considered = 0; + zone->compact_defer_shift++; + + if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) + zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; +} + +/* Returns true if compaction should be skipped this time */ +static inline bool compaction_deferred(struct zone *zone) +{ + unsigned long defer_limit = 1UL << zone->compact_defer_shift; + + /* Avoid possible overflow */ + if (++zone->compact_considered > defer_limit) + zone->compact_considered = defer_limit; + + return zone->compact_considered < (1UL << zone->compact_defer_shift); +} + +#else +static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, + int order, gfp_t gfp_mask, nodemask_t *nodemask) +{ + return COMPACT_CONTINUE; +} + +static inline void defer_compaction(struct zone *zone) +{ +} + +static inline bool compaction_deferred(struct zone *zone) +{ + return 1; +} + +#endif /* CONFIG_COMPACTION */ + +#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +extern int compaction_register_node(struct node *node); +extern void compaction_unregister_node(struct node *node); + +#else + +static inline int compaction_register_node(struct node *node) +{ + return 0; +} + +static inline void compaction_unregister_node(struct node *node) +{ +} +#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ + +#endif /* _LINUX_COMPACTION_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index a73454aec333..20b51cab6593 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -86,9 +86,44 @@ extern void rebuild_sched_domains(void); extern void cpuset_print_task_mems_allowed(struct task_struct *p); +/* + * reading current mems_allowed and mempolicy in the fastpath must protected + * by get_mems_allowed() + */ +static inline void get_mems_allowed(void) +{ + current->mems_allowed_change_disable++; + + /* + * ensure that reading mems_allowed and mempolicy happens after the + * update of ->mems_allowed_change_disable. + * + * the write-side task finds ->mems_allowed_change_disable is not 0, + * and knows the read-side task is reading mems_allowed or mempolicy, + * so it will clear old bits lazily. + */ + smp_mb(); +} + +static inline void put_mems_allowed(void) +{ + /* + * ensure that reading mems_allowed and mempolicy before reducing + * mems_allowed_change_disable. + * + * the write-side task will know that the read-side task is still + * reading mems_allowed or mempolicy, don't clears old bits in the + * nodemask. + */ + smp_mb(); + --ACCESS_ONCE(current->mems_allowed_change_disable); +} + static inline void set_mems_allowed(nodemask_t nodemask) { + task_lock(current); current->mems_allowed = nodemask; + task_unlock(current); } #else /* !CONFIG_CPUSETS */ @@ -187,6 +222,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) { } +static inline void get_mems_allowed(void) +{ +} + +static inline void put_mems_allowed(void) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index f8c2e1767500..b3cd4de9432b 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -28,7 +28,7 @@ struct _ddebug { /* * The flags field controls the behaviour at the callsite. * The bits here are changed dynamically when the user - * writes commands to <debugfs>/dynamic_debug/ddebug + * writes commands to <debugfs>/dynamic_debug/control */ #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ #define _DPRINTK_FLAGS_DEFAULT 0 diff --git a/include/linux/err.h b/include/linux/err.h index 1b12642636c7..448afc12c78a 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -19,22 +19,22 @@ #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) -static inline void *ERR_PTR(long error) +static inline void * __must_check ERR_PTR(long error) { return (void *) error; } -static inline long PTR_ERR(const void *ptr) +static inline long __must_check PTR_ERR(const void *ptr) { return (long) ptr; } -static inline long IS_ERR(const void *ptr) +static inline long __must_check IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } -static inline long IS_ERR_OR_NULL(const void *ptr) +static inline long __must_check IS_ERR_OR_NULL(const void *ptr) { return !ptr || IS_ERR_VALUE((unsigned long)ptr); } @@ -46,7 +46,7 @@ static inline long IS_ERR_OR_NULL(const void *ptr) * Explicitly cast an error-valued pointer to another pointer type in such a * way as to make it clear that's what's going on. */ -static inline void *ERR_CAST(const void *ptr) +static inline void * __must_check ERR_CAST(const void *ptr) { /* cast away the const */ return (void *) ptr; diff --git a/include/linux/fb.h b/include/linux/fb.h index 1296af45169d..f3793ebc241c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -37,7 +37,7 @@ struct dentry; #define FBIOGET_HWCINFO 0x4616 #define FBIOPUT_MODEINFO 0x4617 #define FBIOGET_DISPINFO 0x4618 - +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ #define FB_TYPE_PLANES 1 /* Non interleaved planes */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4c6d41333f98..975609cb8548 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,7 +15,7 @@ struct vm_area_struct; * Zone modifiers (see linux/mmzone.h - low three bits) * * Do not put any conditional on these. If necessary modify the definitions - * without the underscores and use the consistently. The definitions here may + * without the underscores and use them consistently. The definitions here may * be used in bit comparisons. */ #define __GFP_DMA ((__force gfp_t)0x01u) @@ -101,7 +101,7 @@ struct vm_area_struct; __GFP_NORETRY|__GFP_NOMEMALLOC) /* Control slab gfp mask during early boot */ -#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) +#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) @@ -152,12 +152,12 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long * and there are 16 of them to cover all possible combinations of - * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM + * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. - * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". + * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". * * bit result * ================= @@ -187,7 +187,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ @@ -197,7 +197,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) ) /* - * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 + * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. @@ -320,17 +320,17 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); #define __get_free_page(gfp_mask) \ - __get_free_pages((gfp_mask),0) + __get_free_pages((gfp_mask), 0) #define __get_dma_pages(gfp_mask, order) \ - __get_free_pages((gfp_mask) | GFP_DMA,(order)) + __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_cold_page(struct page *page, int cold); #define __free_page(page) __free_pages((page), 0) -#define free_page(addr) free_pages((addr),0) +#define free_page(addr) free_pages((addr), 0) void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 74152c08ad07..caafd0561aa1 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -27,7 +27,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #include <asm/kmap_types.h> -#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) +#ifdef CONFIG_DEBUG_HIGHMEM void debug_kmap_atomic(enum km_type type); diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h index 9d88b29ddf55..e8b92f67f10d 100644 --- a/include/linux/ivtvfb.h +++ b/include/linux/ivtvfb.h @@ -33,6 +33,5 @@ struct ivtvfb_dma_frame { }; #define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame) -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cc5e3ffe9fce..8317ec4b9f3b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -24,9 +24,9 @@ extern const char linux_banner[]; extern const char linux_proc_banner[]; -#define USHORT_MAX ((u16)(~0U)) -#define SHORT_MAX ((s16)(USHORT_MAX>>1)) -#define SHORT_MIN (-SHORT_MAX - 1) +#define USHRT_MAX ((u16)(~0U)) +#define SHRT_MAX ((s16)(USHRT_MAX>>1)) +#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) #define INT_MAX ((int)(~0U>>1)) #define INT_MIN (-INT_MAX - 1) #define UINT_MAX (~0U) @@ -375,6 +375,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte) return buf; } +extern int hex_to_bin(char ch); + #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif @@ -389,6 +391,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warning(fmt, ...) \ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn pr_warning #define pr_notice(fmt, ...) \ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ @@ -423,14 +426,13 @@ static inline char *pack_hex_byte(char *buf, u8 byte) * no local ratelimit_state used in the !PRINTK case */ #ifdef CONFIG_PRINTK -#define printk_ratelimited(fmt, ...) ({ \ - static struct ratelimit_state _rs = { \ - .interval = DEFAULT_RATELIMIT_INTERVAL, \ - .burst = DEFAULT_RATELIMIT_BURST, \ - }; \ - \ - if (__ratelimit(&_rs)) \ - printk(fmt, ##__VA_ARGS__); \ +#define printk_ratelimited(fmt, ...) ({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + printk(fmt, ##__VA_ARGS__); \ }) #else /* No effect, but we still get type checking even in the !PRINTK case: */ @@ -447,6 +449,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warning_ratelimited(fmt, ...) \ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_ratelimited pr_warning_ratelimited #define pr_notice_ratelimited(fmt, ...) \ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_ratelimited(fmt, ...) \ diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index f1ca0dcc1628..0e8a346424bb 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -25,12 +25,14 @@ struct lis3lv02d_platform_data { #define LIS3_IRQ1_FF_WU_12 (3 << 0) #define LIS3_IRQ1_DATA_READY (4 << 0) #define LIS3_IRQ1_CLICK (7 << 0) +#define LIS3_IRQ1_MASK (7 << 0) #define LIS3_IRQ2_DISABLE (0 << 3) #define LIS3_IRQ2_FF_WU_1 (1 << 3) #define LIS3_IRQ2_FF_WU_2 (2 << 3) #define LIS3_IRQ2_FF_WU_12 (3 << 3) #define LIS3_IRQ2_DATA_READY (4 << 3) #define LIS3_IRQ2_CLICK (7 << 3) +#define LIS3_IRQ2_MASK (7 << 3) #define LIS3_IRQ_OPEN_DRAIN (1 << 6) #define LIS3_IRQ_ACTIVE_LOW (1 << 7) unsigned char irq_cfg; @@ -43,6 +45,15 @@ struct lis3lv02d_platform_data { #define LIS3_WAKEUP_Z_HI (1 << 5) unsigned char wakeup_flags; unsigned char wakeup_thresh; + unsigned char wakeup_flags2; + unsigned char wakeup_thresh2; +#define LIS3_HIPASS_CUTFF_8HZ 0 +#define LIS3_HIPASS_CUTFF_4HZ 1 +#define LIS3_HIPASS_CUTFF_2HZ 2 +#define LIS3_HIPASS_CUTFF_1HZ 3 +#define LIS3_HIPASS1_DISABLE (1 << 2) +#define LIS3_HIPASS2_DISABLE (1 << 3) + unsigned char hipass_ctrl; #define LIS3_NO_MAP 0 #define LIS3_DEV_X 1 #define LIS3_DEV_Y 2 @@ -58,6 +69,7 @@ struct lis3lv02d_platform_data { /* Limits for selftest are specified in chip data sheet */ s16 st_min_limits[3]; /* min pass limit x, y, z */ s16 st_max_limits[3]; /* max pass limit x, y, z */ + int irq2; }; #endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h index 2203121a43e9..8c22a8938642 100644 --- a/include/linux/matroxfb.h +++ b/include/linux/matroxfb.h @@ -4,6 +4,7 @@ #include <asm/ioctl.h> #include <linux/types.h> #include <linux/videodev2.h> +#include <linux/fb.h> struct matroxioc_output_mode { __u32 output; /* which output */ @@ -37,7 +38,5 @@ enum matroxfb_ctrl_id { MATROXFB_CID_LAST }; -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) - #endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 44301c6affa8..05894795fdc1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,6 +25,13 @@ struct page_cgroup; struct page; struct mm_struct; +extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, + struct list_head *dst, + unsigned long *scanned, int order, + int mode, struct zone *z, + struct mem_cgroup *mem_cont, + int active, int file); + #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* * All "charge" functions with gfp_mask should use GFP_KERNEL or @@ -64,12 +71,6 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page); extern int mem_cgroup_shmem_charge_fallback(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, - struct list_head *dst, - unsigned long *scanned, int order, - int mode, struct zone *z, - struct mem_cgroup *mem_cont, - int active, int file); extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 35b07b773e6c..864035fb8f8a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -202,6 +202,7 @@ static inline int is_mem_section_removable(unsigned long pfn, } #endif /* CONFIG_MEMORY_HOTREMOVE */ +extern int mem_online_node(int nid); extern int add_memory(int nid, u64 start, u64 size); extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 1cc966cd3e5f..7b9ef6bf45aa 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -23,6 +23,13 @@ enum { MPOL_MAX, /* always last member of enum */ }; +enum mpol_rebind_step { + MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */ + MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */ + MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/ + MPOL_REBIND_NSTEP, +}; + /* Flags for set_mempolicy */ #define MPOL_F_STATIC_NODES (1 << 15) #define MPOL_F_RELATIVE_NODES (1 << 14) @@ -51,6 +58,7 @@ enum { */ #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ +#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ #ifdef __KERNEL__ @@ -193,8 +201,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, extern void numa_default_policy(void); extern void numa_policy_init(void); -extern void mpol_rebind_task(struct task_struct *tsk, - const nodemask_t *new); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, + enum mpol_rebind_step step); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern void mpol_fix_fork_child_flag(struct task_struct *p); @@ -308,7 +316,8 @@ static inline void numa_default_policy(void) } static inline void mpol_rebind_task(struct task_struct *tsk, - const nodemask_t *new) + const nodemask_t *new, + enum mpol_rebind_step step) { } diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h index 73f92c5feea2..e3c4ff8c3e38 100644 --- a/include/linux/mfd/88pm860x.h +++ b/include/linux/mfd/88pm860x.h @@ -132,6 +132,7 @@ enum { PM8607_ID_LDO9, PM8607_ID_LDO10, PM8607_ID_LDO12, + PM8607_ID_LDO13, PM8607_ID_LDO14, PM8607_ID_RG_MAX, @@ -309,7 +310,7 @@ struct pm860x_chip { }; -#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */ +#define PM8607_MAX_REGULATOR PM8607_ID_RG_MAX /* 3 Bucks, 13 LDOs */ enum { GI2C_PORT = 0, diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 7f085c97c799..7238231b8dd4 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -9,7 +9,7 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); #ifdef CONFIG_MIGRATION #define PAGE_MIGRATION 1 -extern int putback_lru_pages(struct list_head *l); +extern void putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *); extern int migrate_pages(struct list_head *l, new_page_t x, @@ -19,17 +19,19 @@ extern int fail_migrate_page(struct address_space *, struct page *, struct page *); extern int migrate_prep(void); +extern int migrate_prep_local(void); extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else #define PAGE_MIGRATION 0 -static inline int putback_lru_pages(struct list_head *l) { return 0; } +static inline void putback_lru_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, int offlining) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } +static inline int migrate_prep_local(void) { return -ENOSYS; } static inline int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, diff --git a/include/linux/mm.h b/include/linux/mm.h index fb19bb92b809..b969efb03787 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -13,6 +13,7 @@ #include <linux/debug_locks.h> #include <linux/mm_types.h> #include <linux/range.h> +#include <linux/pfn.h> struct mempolicy; struct anon_vma; @@ -106,6 +107,9 @@ extern unsigned int kobjsize(const void *objp); #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ +/* Bits set in the VMA until the stack is in its final location */ +#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) + #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif @@ -334,6 +338,7 @@ void put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); +int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a @@ -591,7 +596,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, static __always_inline void *lowmem_page_address(struct page *page) { - return __va(page_to_pfn(page) << PAGE_SHIFT); + return __va(PFN_PHYS(page_to_pfn(page))); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cf9e458e96b0..0fa491326c4a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -321,6 +321,15 @@ struct zone { unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ +#ifdef CONFIG_COMPACTION + /* + * On compaction failure, 1<<compact_defer_shift compactions + * are skipped before trying again. The number attempted since + * last failure is tracked with compact_considered. + */ + unsigned int compact_considered; + unsigned int compact_defer_shift; +#endif ZONE_PADDING(_pad1_) @@ -641,9 +650,10 @@ typedef struct pglist_data { #include <linux/memory_hotplug.h> +extern struct mutex zonelists_mutex; void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); -void build_all_zonelists(void); +void build_all_zonelists(void *data); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); @@ -972,7 +982,7 @@ struct mem_section { #endif #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) -#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) #ifdef CONFIG_SPARSEMEM_EXTREME diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 668cf1bef030..8f69d09a41a5 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -2,7 +2,7 @@ #define _LINUX_RATELIMIT_H #include <linux/param.h> -#include <linux/spinlock_types.h> +#include <linux/spinlock.h> #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) #define DEFAULT_RATELIMIT_BURST 10 @@ -25,6 +25,17 @@ struct ratelimit_state { .burst = burst_init, \ } +static inline void ratelimit_state_init(struct ratelimit_state *rs, + int interval, int burst) +{ + spin_lock_init(&rs->lock); + rs->interval = interval; + rs->burst = burst; + rs->printed = 0; + rs->missed = 0; + rs->begin = 0; +} + extern int ___ratelimit(struct ratelimit_state *rs, const char *func); #define __ratelimit(state) ___ratelimit(state, __func__) diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 234a8476cba8..e2980287245e 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -157,7 +157,11 @@ struct regulator_consumer_supply { * * Initialisation constraints, our supply and consumers supplies. * - * @supply_regulator_dev: Parent regulator (if any). + * @supply_regulator: Parent regulator. Specified using the regulator name + * as it appears in the name field in sysfs, which can + * be explicitly set using the constraints field 'name'. + * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour + * of supply_regulator. * * @constraints: Constraints. These must be specified for the regulator to * be usable. @@ -168,7 +172,8 @@ struct regulator_consumer_supply { * @driver_data: Data passed to regulator_init. */ struct regulator_init_data { - struct device *supply_regulator_dev; /* or NULL for LINE */ + const char *supply_regulator; /* or NULL for system supply */ + struct device *supply_regulator_dev; /* or NULL for system supply */ struct regulation_constraints constraints; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d25bd224d370..77216742c178 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,8 +26,17 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ -#ifdef CONFIG_KSM - atomic_t ksm_refcount; +#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) + + /* + * The external_refcount is taken by either KSM or page migration + * to take a reference to an anon_vma when there is no + * guarantee that the vma of page tables will exist for + * the duration of the operation. A caller that takes + * the reference is responsible for clearing up the + * anon_vma if they are the last user on release + */ + atomic_t external_refcount; #endif /* * NOTE: the LSB of the head.next is set by @@ -61,22 +70,22 @@ struct anon_vma_chain { }; #ifdef CONFIG_MMU -#ifdef CONFIG_KSM -static inline void ksm_refcount_init(struct anon_vma *anon_vma) +#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) +static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) { - atomic_set(&anon_vma->ksm_refcount, 0); + atomic_set(&anon_vma->external_refcount, 0); } -static inline int ksm_refcount(struct anon_vma *anon_vma) +static inline int anonvma_external_refcount(struct anon_vma *anon_vma) { - return atomic_read(&anon_vma->ksm_refcount); + return atomic_read(&anon_vma->external_refcount); } #else -static inline void ksm_refcount_init(struct anon_vma *anon_vma) +static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) { } -static inline int ksm_refcount(struct anon_vma *anon_vma) +static inline int anonvma_external_refcount(struct anon_vma *anon_vma) { return 0; } diff --git a/include/linux/sched.h b/include/linux/sched.h index b55e988988b5..c0151ffd3541 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -384,7 +384,7 @@ struct user_namespace; * 1-3 now and depends on arch. We use "5" as safe margin, here. */ #define MAPCOUNT_ELF_CORE_MARGIN (5) -#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) +#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) extern int sysctl_max_map_count; @@ -1421,6 +1421,7 @@ struct task_struct { #endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; /* Protected by alloc_lock */ + int mems_allowed_change_disable; int cpuset_mem_spread_rotor; #endif #ifdef CONFIG_CGROUPS diff --git a/include/linux/swap.h b/include/linux/swap.h index ec2b7a42b45f..b6b614364dd8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -152,6 +152,7 @@ enum { }; #define SWAP_CLUSTER_MAX 32 +#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ @@ -224,20 +225,15 @@ static inline void lru_cache_add_anon(struct page *page) __lru_cache_add(page, LRU_INACTIVE_ANON); } -static inline void lru_cache_add_active_anon(struct page *page) -{ - __lru_cache_add(page, LRU_ACTIVE_ANON); -} - static inline void lru_cache_add_file(struct page *page) { __lru_cache_add(page, LRU_INACTIVE_FILE); } -static inline void lru_cache_add_active_file(struct page *page) -{ - __lru_cache_add(page, LRU_ACTIVE_FILE); -} +/* LRU Isolation modes. */ +#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ +#define ISOLATE_ACTIVE 1 /* Isolate active pages. */ +#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ /* linux/mm/vmscan.c */ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 117f0dd8ad03..7f43ccdc1d38 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -43,6 +43,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, KSWAPD_SKIP_CONGESTION_WAIT, PAGEOUTRUN, ALLOCSTALL, PGROTATED, +#ifdef CONFIG_COMPACTION + COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, + COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, +#endif #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, #endif diff --git a/include/net/ip.h b/include/net/ip.h index 63548f0a44b1..452f229c380a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -358,11 +358,11 @@ enum ip_defrag_users { IP_DEFRAG_LOCAL_DELIVER, IP_DEFRAG_CALL_RA_CHAIN, IP_DEFRAG_CONNTRACK_IN, - __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHORT_MAX, + __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP_DEFRAG_CONNTRACK_OUT, - __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHORT_MAX, + __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP_DEFRAG_CONNTRACK_BRIDGE_IN, - __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, IP_DEFRAG_VS_IN, IP_DEFRAG_VS_OUT, IP_DEFRAG_VS_FWD diff --git a/include/net/ipv6.h b/include/net/ipv6.h index eba5cc00325a..2600b69757b8 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -354,11 +354,11 @@ struct inet_frag_queue; enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER, IP6_DEFRAG_CONNTRACK_IN, - __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHORT_MAX, + __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP6_DEFRAG_CONNTRACK_OUT, - __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHORT_MAX, + __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP6_DEFRAG_CONNTRACK_BRIDGE_IN, - __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, }; struct ip6_create_arg { diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 89d43b3d4cb9..6316cdabf73f 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h @@ -99,6 +99,7 @@ struct lcd_sync_arg { #define FBIPUT_COLOR _IOW('F', 6, int) #define FBIPUT_HSYNC _IOW('F', 9, int) #define FBIPUT_VSYNC _IOW('F', 10, int) +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t) #endif /* ifndef DA8XX_FB_H */ diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 2cc893fc1f85..288205457713 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h @@ -34,8 +34,6 @@ enum { LCDC_CLK_BUS, LCDC_CLK_PERIPHERAL, LCDC_CLK_EXTERNAL }; #define LCDC_FLAGS_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ #define LCDC_FLAGS_DWCNT (1 << 4) /* Disable dotclock during blanking */ -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) - struct sh_mobile_lcdc_sys_bus_cfg { unsigned long ldmt2r; unsigned long ldmt3r; |