From 88d60c32765716289abeb362c44adf6c35c6824c Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 8 Nov 2010 18:19:22 -0500 Subject: fanotify: remove packed from access response message Since fanotify has decided to be careful about alignment and packing rather than rely on __attribute__((packed)) for multiarch support. Since this attribute isn't doing anything on fanotify_response we just drop it. This does not break API/ABI. Suggested-by: Tvrtko Ursulin Signed-off-by: Eric Paris --- include/linux/fanotify.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 0f0121467fc4..bdbf9bb29b54 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -96,7 +96,7 @@ struct fanotify_event_metadata { struct fanotify_response { __s32 fd; __u32 response; -} __attribute__ ((packed)); +}; /* Legit userspace responses to a _PERM event */ #define FAN_ALLOW 0x01 -- cgit v1.2.3 From b1085ba80cd2784400a7beec3fda5099198ed01c Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Fri, 5 Nov 2010 17:05:27 +0100 Subject: fanotify: if set by user unset FMODE_NONOTIFY before fsnotify_perm() is called Unsetting FMODE_NONOTIFY in fsnotify_open() is too late, since fsnotify_perm() is called before. If FMODE_NONOTIFY is set fsnotify_perm() will skip permission checks, so a user can still disable permission checks by setting this flag in an open() call. This patch corrects this by unsetting the flag before fsnotify_perm is called. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- include/linux/fsnotify.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 5c185fa27089..b10bcdeaef76 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -235,9 +235,6 @@ static inline void fsnotify_open(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - /* FMODE_NONOTIFY must never be set from user */ - file->f_mode &= ~FMODE_NONOTIFY; - fsnotify_parent(path, NULL, mask); fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } -- cgit v1.2.3 From 09e5f14e57c70f9d357862bb56e57026c51092a1 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Fri, 19 Nov 2010 10:58:07 +0100 Subject: fanotify: on group destroy allow all waiters to bypass permission check When fanotify_release() is called, there may still be processes waiting for access permission. Currently only processes for which an event has already been queued into the groups access list will be woken up. Processes for which no event has been queued will continue to sleep and thus cause a deadlock when fsnotify_put_group() is called. Furthermore there is a race allowing further processes to be waiting on the access wait queue after wake_up (if they arrive before clear_marks_by_group() is called). This patch corrects this by setting a flag to inform processes that the group is about to be destroyed and thus not to wait for access permission. [additional changelog from eparis] Lets think about the 4 relevant code paths from the PoV of the 'operator' 'listener' 'responder' and 'closer'. Where operator is the process doing an action (like open/read) which could require permission. Listener is the task (or in this case thread) slated with reading from the fanotify file descriptor. The 'responder' is the thread responsible for responding to access requests. 'Closer' is the thread attempting to close the fanotify file descriptor. The 'operator' is going to end up in: fanotify_handle_event() get_response_from_access() (THIS BLOCKS WAITING ON USERSPACE) The 'listener' interesting code path fanotify_read() copy_event_to_user() prepare_for_access_response() (THIS CREATES AN fanotify_response_event) The 'responder' code path: fanotify_write() process_access_response() (REMOVE A fanotify_response_event, SET RESPONSE, WAKE UP 'operator') The 'closer': fanotify_release() (SUPPOSED TO CLEAN UP THE REST OF THIS MESS) What we have today is that in the closer we remove all of the fanotify_response_events and set a bit so no more response events are ever created in prepare_for_access_response(). The bug is that we never wake all of the operators up and tell them to move along. You fix that in fanotify_get_response_from_access(). You also fix other operators which haven't gotten there yet. So I agree that's a good fix. [/additional changelog from eparis] [remove additional changes to minimize patch size] [move initialization so it was inside CONFIG_FANOTIFY_PERMISSION] Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- include/linux/fsnotify_backend.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 0a68f924f06f..7380763595d3 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -166,7 +166,7 @@ struct fsnotify_group { struct mutex access_mutex; struct list_head access_list; wait_queue_head_t access_waitq; - bool bypass_perm; /* protected by access_mutex */ + atomic_t bypass_perm; #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ int f_flags; unsigned int max_marks; -- cgit v1.2.3 From e9a3854fd4ff3907e6c200a3980e19365ee695e9 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Wed, 24 Nov 2010 18:22:09 +0100 Subject: fanotify: Introduce FAN_NOFD FAN_NOFD is used in fanotify events that do not provide an open file descriptor (like the overflow_event). Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- include/linux/fanotify.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index bdbf9bb29b54..c73224315aee 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -101,6 +101,8 @@ struct fanotify_response { /* Legit userspace responses to a _PERM event */ #define FAN_ALLOW 0x01 #define FAN_DENY 0x02 +/* No fd set in event */ +#define FAN_NOFD -1 /* Helper functions to deal with fanotify_event_metadata buffers */ #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata)) -- cgit v1.2.3 From 5167695753c63444a9e6cbbef136200a16c7a225 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Dec 2010 14:18:20 +0100 Subject: perf: Fix duplicate events with multiple-pmu vs software events Because the multi-pmu bits can share contexts between struct pmu instances we could get duplicate events by iterating the pmu list. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index de2c41758e29..4f1279e105ee 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -887,6 +887,7 @@ struct perf_cpu_context { int exclusive; struct list_head rotation_list; int jiffies_interval; + struct pmu *active_pmu; }; struct perf_output_handle { -- cgit v1.2.3 From 0f004f5a696a9434b7214d0d3cbd0525ee77d428 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 30 Nov 2010 19:48:45 +0100 Subject: sched: Cure more NO_HZ load average woes There's a long-running regression that proved difficult to fix and which is hitting certain people and is rather annoying in its effects. Damien reported that after 74f5187ac8 (sched: Cure load average vs NO_HZ woes) his load average is unnaturally high, he also noted that even with that patch reverted the load avgerage numbers are not correct. The problem is that the previous patch only solved half the NO_HZ problem, it addressed the part of going into NO_HZ mode, not of comming out of NO_HZ mode. This patch implements that missing half. When comming out of NO_HZ mode there are two important things to take care of: - Folding the pending idle delta into the global active count. - Correctly aging the averages for the idle-duration. So with this patch the NO_HZ interaction should be complete and behaviour between CONFIG_NO_HZ=[yn] should be equivalent. Furthermore, this patch slightly changes the load average computation by adding a rounding term to the fixed point multiplication. Reported-by: Damien Wyart Reported-by: Tim McGrath Tested-by: Damien Wyart Tested-by: Orion Poplawski Tested-by: Kyle McMartin Signed-off-by: Peter Zijlstra Cc: stable@kernel.org Cc: Chase Douglas LKML-Reference: <1291129145.32004.874.camel@laptop> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c79e921a68b..223874538b33 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); -extern void calc_global_load(void); +extern void calc_global_load(unsigned long ticks); extern unsigned long get_parent_ip(unsigned long addr); -- cgit v1.2.3 From 53dde5f385bc56e312f78b7cb25ffaf8efd4735d Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 16 Nov 2010 13:23:50 -0800 Subject: bootmem: Add alloc_bootmem_align() Add an alloc_bootmem_align() interface to allocate bootmem with specified alignment. This is necessary to be able to allocate the xsave area in a subsequent patch. Signed-off-by: Suresh Siddha LKML-Reference: <20101116212441.977574826@sbsiddha-MOBL3.sc.intel.com> Acked-by: H. Peter Anvin Signed-off-by: H. Peter Anvin Cc: --- include/linux/bootmem.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 266ab9291232..499dfe982a0e 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_align(x, align) \ + __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_nopanic(x) \ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages(x) \ -- cgit v1.2.3 From ab4e0192196b8d4e43a3945742d4996da934a86f Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 14 Dec 2010 23:53:21 -0800 Subject: Input: define separate EVIOCGKEYCODE_V2/EVIOCSKEYCODE_V2 The desire to keep old names for the EVIOCGKEYCODE/EVIOCSKEYCODE while extending them to support large scancodes was a mistake. While we tried to keep ABI intact (and we succeeded in doing that, programs compiled on older kernels will work on newer ones) there is still a problem with recompiling existing software with newer kernel headers. New kernel headers will supply updated ioctl numbers and kernel will expect that userspace will use struct input_keymap_entry to set and retrieve keymap data. But since the names of ioctls are still the same userspace will happily compile even if not adjusted to make use of the new structure and will start miraculously fail in the field. To avoid this issue let's revert EVIOCGKEYCODE/EVIOCSKEYCODE definitions and add EVIOCGKEYCODE_V2/EVIOCSKEYCODE_V2 so that userspace can explicitly select the style of ioctls it wants to employ. Reviewed-by: Henrik Rydberg Acked-by: Jarod Wilson Acked-by: Mauro Carvalho Chehab Signed-off-by: Dmitry Torokhov --- include/linux/input.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/input.h b/include/linux/input.h index a8af21d42bc1..9777668883be 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -104,8 +104,10 @@ struct input_keymap_entry { #define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ #define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ -#define EVIOCGKEYCODE _IOR('E', 0x04, struct input_keymap_entry) /* get keycode */ -#define EVIOCSKEYCODE _IOW('E', 0x04, struct input_keymap_entry) /* set keycode */ +#define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */ +#define EVIOCGKEYCODE_V2 _IOR('E', 0x04, struct input_keymap_entry) +#define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */ +#define EVIOCSKEYCODE_V2 _IOW('E', 0x04, struct input_keymap_entry) #define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ #define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */ -- cgit v1.2.3 From 62731fa0c893515dc6cbc3e0a2879a92793c735f Mon Sep 17 00:00:00 2001 From: Alexey Zaytsev Date: Mon, 22 Nov 2010 00:33:03 +0000 Subject: fanotify: split version into version and metadata_len To implement per event type optional headers we are interested in knowing how long the metadata structure is. This patch slits the __u32 version field into a __u8 version and a __u16 metadata_len field (with __u8 left over). This should allow for backwards compat ABI. Signed-off-by: Alexey Zaytsev [rewrote descrtion and changed object sizes and ordering - eparis] Signed-off-by: Eric Paris --- include/linux/fanotify.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index c73224315aee..6c6133f76e16 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -83,11 +83,13 @@ FAN_ALL_PERM_EVENTS |\ FAN_Q_OVERFLOW) -#define FANOTIFY_METADATA_VERSION 2 +#define FANOTIFY_METADATA_VERSION 3 struct fanotify_event_metadata { __u32 event_len; - __u32 vers; + __u8 vers; + __u8 reserved; + __u16 metadata_len; __aligned_u64 mask; __s32 fd; __s32 pid; -- cgit v1.2.3 From f08f5a0add20834d3f3d876dfe08005a5df656db Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 16 Dec 2010 17:11:58 +0100 Subject: PM / Runtime: Fix pm_runtime_suspended() There are some situations (e.g. in __pm_generic_call()), where pm_runtime_suspended() is used to decide whether or not to execute a device's (system) ->suspend() callback. The callback is not executed if pm_runtime_suspended() returns true, but it does so for devices that don't even support runtime PM, because the power.disable_depth device field is ignored by it. This leads to problems (i.e. devices are not suspened when they should), so rework pm_runtime_suspended() so that it returns false if the device's power.disable_depth field is different from zero. Signed-off-by: Rafael J. Wysocki Cc: stable@kernel.org --- include/linux/pm_runtime.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3ec2358f8692..d19f1cca7f74 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -77,7 +77,8 @@ static inline void device_set_run_wake(struct device *dev, bool enable) static inline bool pm_runtime_suspended(struct device *dev) { - return dev->power.runtime_status == RPM_SUSPENDED; + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; } static inline void pm_runtime_mark_last_busy(struct device *dev) -- cgit v1.2.3 From 3f84622d7c7818077f5e6cf4b8a0d1b10dc65147 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 27 Nov 2010 19:26:32 +0100 Subject: SSB: Fix nvram_get on BCM47xx platform The nvram_get function was never in the mainline kernel, it only existed in an external OpenWrt patch. Use nvram_getenv function, which is in mainline and use an include instead of an extra function declaration. et0macaddr contains the mac address in text from like 00:11:22:33:44:55. We have to parse it before adding it into macaddr. nvram_parse_macaddr will be merged into asm/mach-bcm47xx/nvram.h through the MIPS git tree and will be available soon. It will not build now without nvram_parse_macaddr, but it hasn't before either. Signed-off-by: Hauke Mehrtens To: linux-mips@linux-mips.org Cc: mb@bu3sch.de Cc: netdev@vger.kernel.org Cc: Hauke Mehrtens Acked-by: Michael Buesch Patchwork: https://patchwork.linux-mips.org/patch/1849/ Signed-off-by: Ralf Baechle --- include/linux/ssb/ssb_driver_gige.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 942e38736901..eba52a100533 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h @@ -96,16 +96,21 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) return 0; } -extern char * nvram_get(const char *name); +#ifdef CONFIG_BCM47XX +#include /* Get the device MAC address */ static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) { -#ifdef CONFIG_BCM47XX - char *res = nvram_get("et0macaddr"); - if (res) - memcpy(macaddr, res, 6); -#endif + char buf[20]; + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) + return; + nvram_parse_macaddr(buf, macaddr); } +#else +static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ +} +#endif extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, struct pci_dev *pdev); -- cgit v1.2.3 From e692cb668fdd5a712c6ed2a2d6f2a36ee83997b4 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Wed, 1 Dec 2010 19:41:49 +0100 Subject: block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead When stacking devices, a request_queue is not always available. This forced us to have a no_cluster flag in the queue_limits that could be used as a carrier until the request_queue had been set up for a metadevice. There were several problems with that approach. First of all it was up to the stacking device to remember to set queue flag after stacking had completed. Also, the queue flag and the queue limits had to be kept in sync at all times. We got that wrong, which could lead to us issuing commands that went beyond the max scatterlist limit set by the driver. The proper fix is to avoid having two flags for tracking the same thing. We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the block layer merging functions. The queue_limit 'no_cluster' is turned into 'cluster' to avoid double negatives and to ease stacking. Clustering defaults to being enabled as before. The queue flag logic is removed from the stacking function, and explicitly setting the cluster flag is no longer necessary in DM and MD. Reported-by: Ed Lin Signed-off-by: Martin K. Petersen Acked-by: Mike Snitzer Cc: stable@kernel.org Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aae86fd10c4f..95aeeeb49e8b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -250,7 +250,7 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; - unsigned char no_cluster; + unsigned char cluster; signed char discard_zeroes_data; }; @@ -380,7 +380,6 @@ struct request_queue #endif }; -#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ @@ -403,7 +402,6 @@ struct request_queue #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_CLUSTER) | \ (1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) @@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define rq_data_dir(rq) ((rq)->cmd_flags & 1) +static inline unsigned int blk_queue_cluster(struct request_queue *q) +{ + return q->limits.cluster; +} + /* * We regard a request as sync, if either a read or a sync write */ -- cgit v1.2.3 From 72d4cd9f38b5ed96b75df4c622be25e1c2648dd3 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 17 Dec 2010 08:34:20 +0100 Subject: block: max hardware sectors limit wrapper Implement blk_limits_max_hw_sectors() and make blk_queue_max_hw_sectors() a wrapper around it. DM needs this to avoid setting queue_limits' max_hw_sectors and max_sectors directly. dm_set_device_limits() now leverages blk_limits_max_hw_sectors() logic to establish the appropriate max_hw_sectors minimum (PAGE_SIZE). Fixes issue where DM was incorrectly setting max_sectors rather than max_hw_sectors (which caused dm_merge_bvec()'s max_hw_sectors check to be ineffective). Signed-off-by: Mike Snitzer Cc: stable@kernel.org Acked-by: Martin K. Petersen Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 95aeeeb49e8b..36ab42c9bb99 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -808,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); -- cgit v1.2.3 From b6aa5901c7a2bd90d0b6b9866300d2648b2568f3 Mon Sep 17 00:00:00 2001 From: Henry C Chang Date: Wed, 15 Dec 2010 20:45:41 -0800 Subject: ceph: mark user pages dirty on direct-io reads For read operation, we have to set the argument _write_ of get_user_pages to 1 since we will write data to pages. Also, we need to SetPageDirty before releasing these pages. Signed-off-by: Henry C Chang Signed-off-by: Sage Weil --- include/linux/ceph/libceph.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 9e76d35670d2..72c72bfccb88 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -227,8 +227,10 @@ extern int ceph_open_session(struct ceph_client *client); extern void ceph_release_page_vector(struct page **pages, int num_pages); extern struct page **ceph_get_direct_page_vector(const char __user *data, - int num_pages); -extern void ceph_put_page_vector(struct page **pages, int num_pages); + int num_pages, + bool write_page); +extern void ceph_put_page_vector(struct page **pages, int num_pages, + bool dirty); extern void ceph_release_page_vector(struct page **pages, int num_pages); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); extern int ceph_copy_user_to_page_vector(struct page **pages, -- cgit v1.2.3 From c0f5ac5426f7fd82b23dd5c6a1e633b290294a08 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 16 Dec 2010 10:38:41 -0700 Subject: Revert "resources: support allocating space within a region from the top down" This reverts commit e7f8567db9a7f6b3151b0b275e245c1cef0d9c70. Acked-by: H. Peter Anvin Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- include/linux/ioport.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ioport.h b/include/linux/ioport.h index d377ea815d45..b22790268b64 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -112,7 +112,6 @@ struct resource_list { /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ extern struct resource ioport_resource; extern struct resource iomem_resource; -extern int resource_alloc_from_bottom; extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); extern int request_resource(struct resource *root, struct resource *new); -- cgit v1.2.3 From fcb119183c73bf0781009713f303e28b1fb13d3e Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 16 Dec 2010 10:38:46 -0700 Subject: resources: add arch hook for preventing allocation in reserved areas This adds arch_remove_reservations(), which an arch can implement if it needs to protect part of the address space from allocation. Sometimes that can be done by just putting a region in the resource tree, but there are cases where that doesn't work well. For example, x86 BIOS E820 reservations are not related to devices, so they may overlap part of, all of, or more than a device resource, so they may not end up at the correct spot in the resource tree. Acked-by: H. Peter Anvin Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- include/linux/ioport.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/ioport.h b/include/linux/ioport.h index b22790268b64..e9bb22cba764 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -123,6 +123,7 @@ extern void reserve_region_with_split(struct resource *root, extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); extern int insert_resource(struct resource *parent, struct resource *new); extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern void arch_remove_reservations(struct resource *avail); extern int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, -- cgit v1.2.3 From 4b8fe66300acb2fba8b16d62606e0d30204022fc Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Fri, 17 Dec 2010 12:03:14 -0800 Subject: netlink: fix gcc -Wconversion compilation warning $ cat << EOF | gcc -Wconversion -xc -S -o/dev/null - unsigned f(void) {return NLMSG_HDRLEN;} EOF : In function 'f': :3:26: warning: negative integer implicitly converted to unsigned type Signed-off-by: Dmitry V. Levin Signed-off-by: Kirill A. Shutemov Signed-off-by: David S. Miller --- include/linux/netlink.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 123566912d73..e2b9e63afa68 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -70,7 +70,7 @@ struct nlmsghdr { Check NLM_F_EXCL */ -#define NLMSG_ALIGNTO 4 +#define NLMSG_ALIGNTO 4U #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN)) -- cgit v1.2.3 From b8da46d3d55807037b58f14621a0949f18053bde Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 20 Dec 2010 00:29:32 -0500 Subject: clarify a usage constraint for cnt32_to_63() The cnt32_to_63 algorithm relies on proper counter data evaluation ordering to work properly. This was missing from the provided documentation. Let's augment the documentation with the missing usage constraint and fix the only instance that got it wrong. Signed-off-by: Nicolas Pitre Acked-by: David Howells Signed-off-by: Linus Torvalds --- include/linux/cnt32_to_63.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h index 7605fdd1eb65..e3d8bf26e5eb 100644 --- a/include/linux/cnt32_to_63.h +++ b/include/linux/cnt32_to_63.h @@ -61,13 +61,31 @@ union cnt32_to_63 { * * 2) this code must not be preempted for a duration longer than the * 32-bit counter half period minus the longest period between two - * calls to this code. + * calls to this code; * * Those requirements ensure proper update to the state bit in memory. * This is usually not a problem in practice, but if it is then a kernel * timer should be scheduled to manage for this code to be executed often * enough. * + * And finally: + * + * 3) the cnt_lo argument must be seen as a globally incrementing value, + * meaning that it should be a direct reference to the counter data which + * can be evaluated according to a specific ordering within the macro, + * and not the result of a previous evaluation stored in a variable. + * + * For example, this is wrong: + * + * u32 partial = get_hw_count(); + * u64 full = cnt32_to_63(partial); + * return full; + * + * This is fine: + * + * u64 full = cnt32_to_63(get_hw_count()); + * return full; + * * Note that the top bit (bit 63) in the returned value should be considered * as garbage. It is not cleared here because callers are likely to use a * multiplier on the returned value which can get rid of the top bit -- cgit v1.2.3 From 4f32e9b1f812fd6c00cc85a127583fefbdedaedc Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Wed, 22 Dec 2010 10:27:53 +0100 Subject: kthread_work: make lockdep happy spinlock in kthread_worker and wait_queue_head in kthread_work both should be lockdep sensible, so change the interface to make it suiltable for CONFIG_LOCKDEP. tj: comment update Reported-by: Nicolas Signed-off-by: Yong Zhang Signed-off-by: Andy Walls Tested-by: Andy Walls Cc: Tejun Heo Cc: Andrew Morton Signed-off-by: Tejun Heo --- include/linux/kthread.h | 45 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 685ea65eb803..ce0775aa64c3 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -81,16 +81,41 @@ struct kthread_work { #define DEFINE_KTHREAD_WORK(work, fn) \ struct kthread_work work = KTHREAD_WORK_INIT(work, fn) -static inline void init_kthread_worker(struct kthread_worker *worker) -{ - *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); -} - -static inline void init_kthread_work(struct kthread_work *work, - kthread_work_func_t fn) -{ - *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); -} +/* + * kthread_worker.lock and kthread_work.done need their own lockdep class + * keys if they are defined on stack with lockdep enabled. Use the + * following macros when defining them on stack. + */ +#ifdef CONFIG_LOCKDEP +# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ + ({ init_kthread_worker(&worker); worker; }) +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) +# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ + ({ init_kthread_work((&work), fn); work; }) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) +#else +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) +#endif + +extern void __init_kthread_worker(struct kthread_worker *worker, + const char *name, struct lock_class_key *key); + +#define init_kthread_worker(worker) \ + do { \ + static struct lock_class_key __key; \ + __init_kthread_worker((worker), "("#worker")->lock", &__key); \ + } while (0) + +#define init_kthread_work(work, fn) \ + do { \ + memset((work), 0, sizeof(struct kthread_work)); \ + INIT_LIST_HEAD(&(work)->node); \ + (work)->func = (fn); \ + init_waitqueue_head(&(work)->done); \ + } while (0) int kthread_worker_fn(void *worker_ptr); -- cgit v1.2.3 From 4e06fd14d5fa78826397c891654a37e5a36ee827 Mon Sep 17 00:00:00 2001 From: Will Newton Date: Tue, 21 Dec 2010 17:24:29 -0800 Subject: include/linux/unaligned: pack the whole struct rather than just the field The current packed struct implementation of unaligned access adds the packed attribute only to the field within the unaligned struct rather than to the struct as a whole. This is not sufficient to enforce proper behaviour on architectures with a default struct alignment of more than one byte. For example, the current implementation of __get_unaligned_cpu16 when compiled for arm with gcc -O1 -mstructure-size-boundary=32 assumes the struct is on a 4 byte boundary so performs the load of the 16bit packed field as if it were on a 4 byte boundary: __get_unaligned_cpu16: ldrh r0, [r0, #0] bx lr Moving the packed attribute to the struct rather than the field causes the proper unaligned access code to be generated: __get_unaligned_cpu16: ldrb r3, [r0, #0] @ zero_extendqisi2 ldrb r0, [r0, #1] @ zero_extendqisi2 orr r0, r3, r0, asl #8 bx lr Signed-off-by: Will Newton Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/unaligned/packed_struct.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h index 2498bb9fe002..c9a6abd972a1 100644 --- a/include/linux/unaligned/packed_struct.h +++ b/include/linux/unaligned/packed_struct.h @@ -3,9 +3,9 @@ #include -struct __una_u16 { u16 x __attribute__((packed)); }; -struct __una_u32 { u32 x __attribute__((packed)); }; -struct __una_u64 { u64 x __attribute__((packed)); }; +struct __una_u16 { u16 x; } __attribute__((packed)); +struct __una_u32 { u32 x; } __attribute__((packed)); +struct __una_u64 { u64 x; } __attribute__((packed)); static inline u16 __get_unaligned_cpu16(const void *p) { -- cgit v1.2.3 From 4be2c95d1f7706ca0e74499f2bd118e1cee19669 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 21 Dec 2010 17:24:30 -0800 Subject: taskstats: pad taskstats netlink response for aligment issues on ia64 The taskstats structure is internally aligned on 8 byte boundaries but the layout of the aggregrate reply, with two NLA headers and the pid (each 4 bytes), actually force the entire structure to be unaligned. This causes the kernel to issue unaligned access warnings on some architectures like ia64. Unfortunately, some software out there doesn't properly unroll the NLA packet and assumes that the start of the taskstats structure will always be 20 bytes from the start of the netlink payload. Aligning the start of the taskstats structure breaks this software, which we don't want. So, for now the alignment only happens on architectures that require it and those users will have to update to fixed versions of those packages. Space is reserved in the packet only when needed. This ifdef should be removed in several years e.g. 2012 once we can be confident that fixed versions are installed on most systems. We add the padding before the aggregate since the aggregate is already a defined type. Commit 85893120 ("delayacct: align to 8 byte boundary on 64-bit systems") previously addressed the alignment issues by padding out the pid field. This was supposed to be a compatible change but the circumstances described above mean that it wasn't. This patch backs out that change, since it was a hack, and introduces a new NULL attribute type to provide the padding. Padding the response with 4 bytes avoids allocating an aligned taskstats structure and copying it back. Since the structure weighs in at 328 bytes, it's too big to do it on the stack. Signed-off-by: Jeff Mahoney Reported-by: Brian Rogers Cc: Jeff Mahoney Cc: Guillaume Chazarain Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/taskstats.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h index 341dddb55090..2466e550a41d 100644 --- a/include/linux/taskstats.h +++ b/include/linux/taskstats.h @@ -33,7 +33,7 @@ */ -#define TASKSTATS_VERSION 7 +#define TASKSTATS_VERSION 8 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -188,6 +188,7 @@ enum { TASKSTATS_TYPE_STATS, /* taskstats structure */ TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */ TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */ + TASKSTATS_TYPE_NULL, /* contains nothing */ __TASKSTATS_TYPE_MAX, }; -- cgit v1.2.3 From 8f33d5277fada0291ea495f7fd44a3e7b7aa41d3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 22 Dec 2010 14:46:46 +0100 Subject: dmaengine: provide dummy functions for DMA_ENGINE=n This lets drivers, optionally using the dmaengine, build with DMA_ENGINE unselected. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- include/linux/dmaengine.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9d8688b92d8b..8cd00ad98d37 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -824,6 +824,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); #ifdef CONFIG_DMA_ENGINE enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); +struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +void dma_release_channel(struct dma_chan *chan); #else static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { @@ -831,7 +833,14 @@ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descript } static inline void dma_issue_pending_all(void) { - do { } while (0); +} +static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + return NULL; +} +static inline void dma_release_channel(struct dma_chan *chan) +{ } #endif @@ -842,8 +851,6 @@ void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) -struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); -void dma_release_channel(struct dma_chan *chan); /* --- Helper iov-locking functions --- */ -- cgit v1.2.3