summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-07-31 16:44:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:46 -0700
commit381760eadc393bcb1bb328510ad75cf13431806d (patch)
tree4db4e80b82ea78e1b486ebde7feb835a2de5c2ef
parentb4b9e3558508980fc0cd161a545ffb55a1f13ee9 (diff)
mm: micro-optimise slab to avoid a function call
Getting and putting objects in SLAB currently requires a function call but the bulk of the work is related to PFMEMALLOC reserves which are only consumed when network-backed storage is critical. Use an inline function to determine if the function call is required. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: David Miller <davem@davemloft.net> Cc: Neil Brown <neilb@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Eric B Munson <emunson@mgebm.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Mel Gorman <mgorman@suse.de> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 77be18dab73c..f8b0d539b482 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -118,6 +118,8 @@
#include <linux/memory.h>
#include <linux/prefetch.h>
+#include <net/sock.h>
+
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -965,7 +967,7 @@ out:
spin_unlock_irqrestore(&l3->list_lock, flags);
}
-static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
+static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
gfp_t flags, bool force_refill)
{
int i;
@@ -1012,7 +1014,20 @@ static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
return objp;
}
-static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
+static inline void *ac_get_obj(struct kmem_cache *cachep,
+ struct array_cache *ac, gfp_t flags, bool force_refill)
+{
+ void *objp;
+
+ if (unlikely(sk_memalloc_socks()))
+ objp = __ac_get_obj(cachep, ac, flags, force_refill);
+ else
+ objp = ac->entry[--ac->avail];
+
+ return objp;
+}
+
+static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
void *objp)
{
if (unlikely(pfmemalloc_active)) {
@@ -1022,6 +1037,15 @@ static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
set_obj_pfmemalloc(&objp);
}
+ return objp;
+}
+
+static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
+ void *objp)
+{
+ if (unlikely(sk_memalloc_socks()))
+ objp = __ac_put_obj(cachep, ac, objp);
+
ac->entry[ac->avail++] = objp;
}