summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2018-05-18 15:01:16 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2018-06-04 00:39:19 +1000
commit1128bb7813a896bd608fb622eee3c26aaf33b473 (patch)
tree38866fc629ccaa69be843be50329319f7b0ed033 /arch/powerpc/include
parent56b04d568f880a48d892e840cfaf4efc0f0ce39b (diff)
powerpc/lib: Adjust .balign inside string functions for PPC32
commit 87a156fb18fe1 ("Align hot loops of some string functions") degraded the performance of string functions by adding useless nops A simple benchmark on an 8xx calling 100000x a memchr() that matches the first byte runs in 41668 TB ticks before this patch and in 35986 TB ticks after this patch. So this gives an improvement of approx 10% Another benchmark doing the same with a memchr() matching the 128th byte runs in 1011365 TB ticks before this patch and 1005682 TB ticks after this patch, so regardless on the number of loops, removing those useless nops improves the test by 5683 TB ticks. Fixes: 87a156fb18fe1 ("Align hot loops of some string functions") Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/cache.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index c1d257aa4c2d..66298461b640 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -9,11 +9,14 @@
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
+#define IFETCH_ALIGN_SHIFT 2
#elif defined(CONFIG_PPC_E500MC)
#define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3
#elif defined(CONFIG_PPC32)
#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
#if defined(CONFIG_PPC_47x)
#define L1_CACHE_SHIFT 7
#else