diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/mm/slub.c b/mm/slub.c index 2de3c996f327..797271f5afb8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -311,7 +311,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) * and whatever may come after it. */ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) - return s->objsize; + return s->object_size; #endif /* @@ -609,11 +609,11 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) if (p > addr + 16) print_section("Bytes b4 ", p - 16, 16); - print_section("Object ", p, min_t(unsigned long, s->objsize, + print_section("Object ", p, min_t(unsigned long, s->object_size, PAGE_SIZE)); if (s->flags & SLAB_RED_ZONE) - print_section("Redzone ", p + s->objsize, - s->inuse - s->objsize); + print_section("Redzone ", p + s->object_size, + s->inuse - s->object_size); if (s->offset) off = s->offset + sizeof(void *); @@ -655,12 +655,12 @@ static void init_object(struct kmem_cache *s, void *object, u8 val) u8 *p = object; if (s->flags & __OBJECT_POISON) { - memset(p, POISON_FREE, s->objsize - 1); - p[s->objsize - 1] = POISON_END; + memset(p, POISON_FREE, s->object_size - 1); + p[s->object_size - 1] = POISON_END; } if (s->flags & SLAB_RED_ZONE) - memset(p + s->objsize, val, s->inuse - s->objsize); + memset(p + s->object_size, val, s->inuse - s->object_size); } static void restore_bytes(struct kmem_cache *s, char *message, u8 data, @@ -705,10 +705,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) * - * object + s->objsize + * object + s->object_size * Padding to reach word boundary. This is also used for Redzoning. * Padding is extended by another word if Redzoning is enabled and - * objsize == inuse. + * object_size == inuse. * * We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 0xcc (RED_ACTIVE) for objects in use. @@ -727,7 +727,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, * object + s->size * Nothing is used beyond s->size. * - * If slabcaches are merged then the objsize and inuse boundaries are mostly + * If slabcaches are merged then the object_size and inuse boundaries are mostly * ignored. And therefore no slab options that rely on these boundaries * may be used with merged slabcaches. */ @@ -787,25 +787,25 @@ static int check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) { u8 *p = object; - u8 *endobject = object + s->objsize; + u8 *endobject = object + s->object_size; if (s->flags & SLAB_RED_ZONE) { if (!check_bytes_and_report(s, page, object, "Redzone", - endobject, val, s->inuse - s->objsize)) + endobject, val, s->inuse - s->object_size)) return 0; } else { - if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { + if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { check_bytes_and_report(s, page, p, "Alignment padding", - endobject, POISON_INUSE, s->inuse - s->objsize); + endobject, POISON_INUSE, s->inuse - s->object_size); } } if (s->flags & SLAB_POISON) { if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && (!check_bytes_and_report(s, page, p, "Poison", p, - POISON_FREE, s->objsize - 1) || + POISON_FREE, s->object_size - 1) || !check_bytes_and_report(s, page, p, "Poison", - p + s->objsize - 1, POISON_END, 1))) + p + s->object_size - 1, POISON_END, 1))) return 0; /* * check_pad_bytes cleans up on its own. @@ -926,7 +926,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, page->freelist); if (!alloc) - print_section("Object ", (void *)object, s->objsize); + print_section("Object ", (void *)object, s->object_size); dump_stack(); } @@ -942,14 +942,14 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) lockdep_trace_alloc(flags); might_sleep_if(flags & __GFP_WAIT); - return should_failslab(s->objsize, flags, s->flags); + return should_failslab(s->object_size, flags, s->flags); } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) { flags &= gfp_allowed_mask; kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); - kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); + kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); } static inline void slab_free_hook(struct kmem_cache *s, void *x) @@ -966,13 +966,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) unsigned long flags; local_irq_save(flags); - kmemcheck_slab_free(s, x, s->objsize); - debug_check_no_locks_freed(x, s->objsize); + kmemcheck_slab_free(s, x, s->object_size); + debug_check_no_locks_freed(x, s->object_size); local_irq_restore(flags); } #endif if (!(s->flags & SLAB_DEBUG_OBJECTS)) - debug_check_no_obj_freed(x, s->objsize); + debug_check_no_obj_freed(x, s->object_size); } /* @@ -1207,7 +1207,7 @@ out: __setup("slub_debug", setup_slub_debug); -static unsigned long kmem_cache_flags(unsigned long objsize, +static unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -1237,7 +1237,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page, static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} static inline void remove_full(struct kmem_cache *s, struct page *page) {} -static inline unsigned long kmem_cache_flags(unsigned long objsize, +static inline unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -2098,10 +2098,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", nid, gfpflags); printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " - "default order: %d, min order: %d\n", s->name, s->objsize, + "default order: %d, min order: %d\n", s->name, s->object_size, s->size, oo_order(s->oo), oo_order(s->min)); - if (oo_order(s->min) > get_order(s->objsize)) + if (oo_order(s->min) > get_order(s->object_size)) printk(KERN_WARNING " %s debugging increased min order, use " "slub_debug=O to disable.\n", s->name); @@ -2374,7 +2374,7 @@ redo: } if (unlikely(gfpflags & __GFP_ZERO) && object) - memset(object, 0, s->objsize); + memset(object, 0, s->object_size); slab_post_alloc_hook(s, gfpflags, object); @@ -2385,7 +2385,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); - trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); + trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); return ret; } @@ -2415,7 +2415,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); trace_kmem_cache_alloc_node(_RET_IP_, ret, - s->objsize, s->size, gfpflags, node); + s->object_size, s->size, gfpflags, node); return ret; } @@ -2910,7 +2910,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min) static int calculate_sizes(struct kmem_cache *s, int forced_order) { unsigned long flags = s->flags; - unsigned long size = s->objsize; + unsigned long size = s->object_size; unsigned long align = s->align; int order; @@ -2939,7 +2939,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * end of the object and the free pointer. If not then add an * additional word to have some bytes to store Redzone information. */ - if ((flags & SLAB_RED_ZONE) && size == s->objsize) + if ((flags & SLAB_RED_ZONE) && size == s->object_size) size += sizeof(void *); #endif @@ -2987,7 +2987,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * user specified and the dynamic determination of cache line size * on bootup. */ - align = calculate_alignment(flags, align, s->objsize); + align = calculate_alignment(flags, align, s->object_size); s->align = align; /* @@ -3035,7 +3035,7 @@ static int kmem_cache_open(struct kmem_cache *s, memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; - s->objsize = size; + s->object_size = size; s->align = align; s->flags = kmem_cache_flags(size, flags, name, ctor); s->reserved = 0; @@ -3050,7 +3050,7 @@ static int kmem_cache_open(struct kmem_cache *s, * Disable debugging flags that store metadata if the min slab * order increased. */ - if (get_order(s->size) > get_order(s->objsize)) { + if (get_order(s->size) > get_order(s->object_size)) { s->flags &= ~DEBUG_METADATA_FLAGS; s->offset = 0; if (!calculate_sizes(s, -1)) @@ -3124,7 +3124,7 @@ error: */ unsigned int kmem_cache_size(struct kmem_cache *s) { - return s->objsize; + return s->object_size; } EXPORT_SYMBOL(kmem_cache_size); @@ -3853,11 +3853,11 @@ void __init kmem_cache_init(void) if (s && s->size) { char *name = kasprintf(GFP_NOWAIT, - "dma-kmalloc-%d", s->objsize); + "dma-kmalloc-%d", s->object_size); BUG_ON(!name); kmalloc_dma_caches[i] = create_kmalloc_cache(name, - s->objsize, SLAB_CACHE_DMA); + s->object_size, SLAB_CACHE_DMA); } } #endif @@ -3951,7 +3951,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, * Adjust the object sizes so that we clear * the complete object on kzalloc. */ - s->objsize = max(s->objsize, (int)size); + s->object_size = max(s->object_size, (int)size); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); if (sysfs_slab_alias(s, name)) { @@ -4634,7 +4634,7 @@ SLAB_ATTR_RO(align); static ssize_t object_size_show(struct kmem_cache *s, char *buf) { - return sprintf(buf, "%d\n", s->objsize); + return sprintf(buf, "%d\n", s->object_size); } SLAB_ATTR_RO(object_size); @@ -5438,7 +5438,7 @@ __initcall(slab_sysfs_init); static void print_slabinfo_header(struct seq_file *m) { seq_puts(m, "slabinfo - version: 2.1\n"); - seq_puts(m, "# name <active_objs> <num_objs> <objsize> " + seq_puts(m, "# name <active_objs> <num_objs> <object_size> " "<objperslab> <pagesperslab>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); |