summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-28 10:18:00 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:30 -0400
commit3b1d58a4c96799eb4c92039e1b851b86f853548a (patch)
tree3d72b6c0506c0a5138ef44dec8ab5c02fd5b29ba /include
parentf604156751db77e08afe47ce29fe8f3d51ad9b04 (diff)
list_lru: per-node list infrastructure
Now that we have an LRU list API, we can start to enhance the implementation. This splits the single LRU list into per-node lists and locks to enhance scalability. Items are placed on lists according to the node the memory belongs to. To make scanning the lists efficient, also track whether the per-node lists have entries in them in a active nodemask. Note: We use a fixed-size array for the node LRU, this struct can be very big if MAX_NUMNODES is big. If this becomes a problem this is fixable by turning this into a pointer and dynamically allocating this to nr_node_ids. This quantity is firwmare-provided, and still would provide room for all nodes at the cost of a pointer lookup and an extra allocation. Because that allocation will most likely come from a may very well fail. [glommer@openvz.org: fix warnings, added note about node lru] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/list_lru.h23
1 files changed, 18 insertions, 5 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 1a548b0b7578..f4d4cb608c02 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -8,6 +8,7 @@
#define _LRU_LIST_H
#include <linux/list.h>
+#include <linux/nodemask.h>
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
@@ -18,11 +19,26 @@ enum lru_status {
internally, but has to return locked. */
};
-struct list_lru {
+struct list_lru_node {
spinlock_t lock;
struct list_head list;
/* kept as signed so we can catch imbalance bugs */
long nr_items;
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ /*
+ * Because we use a fixed-size array, this struct can be very big if
+ * MAX_NUMNODES is big. If this becomes a problem this is fixable by
+ * turning this into a pointer and dynamically allocating this to
+ * nr_node_ids. This quantity is firwmare-provided, and still would
+ * provide room for all nodes at the cost of a pointer lookup and an
+ * extra allocation. Because that allocation will most likely come from
+ * a different slab cache than the main structure holding this
+ * structure, we may very well fail.
+ */
+ struct list_lru_node node[MAX_NUMNODES];
+ nodemask_t active_nodes;
};
int list_lru_init(struct list_lru *lru);
@@ -66,10 +82,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock.
*/
-static inline unsigned long list_lru_count(struct list_lru *lru)
-{
- return lru->nr_items;
-}
+unsigned long list_lru_count(struct list_lru *lru);
typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);