summaryrefslogtreecommitdiff
path: root/arch/score/mm/cache.c
diff options
context:
space:
mode:
authorChen Liqin <liqin.chen@sunplusct.com>2009-06-12 22:01:00 +0800
committerArnd Bergmann <arnd@arndb.de>2009-06-19 11:38:47 +0200
commit6bc9a3966f0395419b09b2ec90f89f7f00341b37 (patch)
tree9c0d9d5376020266f5602501c8376d4a4f13142d /arch/score/mm/cache.c
parent0732f87761dbe417cb6e084b712d07e879e876ef (diff)
score: Add support for Sunplus S+core architecture
This is the complete set of new arch Score's files for linux. Score instruction set support 16bits, 32bits and 64bits instruction, Score SOC had been used in game machine and LCD TV. Signed-off-by: Chen Liqin <liqin.chen@sunplusct.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/score/mm/cache.c')
-rw-r--r--arch/score/mm/cache.c308
1 files changed, 308 insertions, 0 deletions
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c
new file mode 100644
index 000000000000..1ebc67f18c6d
--- /dev/null
+++ b/arch/score/mm/cache.c
@@ -0,0 +1,308 @@
+/*
+ * arch/score/mm/cache.c
+ *
+ * Score Processor version.
+ *
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/mmu_context.h>
+
+/* Cache operations. */
+void (*flush_cache_all)(void);
+void (*__flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *mm);
+void (*flush_cache_range)(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *vma,
+ unsigned long page, unsigned long pfn);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*__flush_cache_vmap)(void);
+void (*__flush_cache_vunmap)(void);
+void (*flush_cache_sigtramp)(unsigned long addr);
+void (*flush_data_cache_page)(unsigned long addr);
+EXPORT_SYMBOL(flush_data_cache_page);
+void (*flush_icache_all)(void);
+
+/*Score 7 cache operations*/
+static inline void s7___flush_cache_all(void);
+static void s7_flush_cache_mm(struct mm_struct *mm);
+static void s7_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+static void s7_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long page, unsigned long pfn);
+static void s7_flush_icache_range(unsigned long start, unsigned long end);
+static void s7_flush_cache_sigtramp(unsigned long addr);
+static void s7_flush_data_cache_page(unsigned long addr);
+static void s7_flush_dcache_range(unsigned long start, unsigned long end);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC);
+
+ pfn = pte_pfn(pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
+ addr = (unsigned long) page_address(page);
+ if (exec)
+ s7_flush_data_cache_page(addr);
+ clear_bit(PG_arch_1, &page->flags);
+ }
+}
+
+static inline void setup_protection_map(void)
+{
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+}
+
+void __devinit cpu_cache_init(void)
+{
+ flush_cache_all = s7_flush_cache_all;
+ __flush_cache_all = s7___flush_cache_all;
+ flush_cache_mm = s7_flush_cache_mm;
+ flush_cache_range = s7_flush_cache_range;
+ flush_cache_page = s7_flush_cache_page;
+ flush_icache_range = s7_flush_icache_range;
+ flush_cache_sigtramp = s7_flush_cache_sigtramp;
+ flush_data_cache_page = s7_flush_data_cache_page;
+
+ setup_protection_map();
+}
+
+void s7_flush_icache_all(void)
+{
+ __asm__ __volatile__(
+ "la r8, s7_flush_icache_all\n"
+ "cache 0x10, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ : : : "r8");
+}
+
+void s7_flush_dcache_all(void)
+{
+ __asm__ __volatile__(
+ "la r8, s7_flush_dcache_all\n"
+ "cache 0x1f, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1a, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ : : : "r8");
+}
+
+void s7_flush_cache_all(void)
+{
+ __asm__ __volatile__(
+ "la r8, s7_flush_cache_all\n"
+ "cache 0x10, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1f, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1a, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ : : : "r8");
+}
+
+void s7___flush_cache_all(void)
+{
+ __asm__ __volatile__(
+ "la r8, s7_flush_cache_all\n"
+ "cache 0x10, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1f, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1a, [r8, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\nnop\n"
+ : : : "r8");
+}
+
+static void s7_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!(mm->context))
+ return;
+ s7_flush_cache_all();
+}
+
+/*if we flush a range precisely , the processing may be very long.
+We must check each page in the range whether present. If the page is present,
+we can flush the range in the page. Be careful, the range may be cross two
+page, a page is present and another is not present.
+*/
+/*
+The interface is provided in hopes that the port can find
+a suitably efficient method for removing multiple page
+sized regions from the cache.
+*/
+static void
+s7_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int exec = vma->vm_flags & VM_EXEC;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (!(mm->context))
+ return;
+
+ pgdp = pgd_offset(mm, start);
+ pudp = pud_offset(pgdp, start);
+ pmdp = pmd_offset(pudp, start);
+ ptep = pte_offset(pmdp, start);
+
+ while (start <= end) {
+ unsigned long tmpend;
+ pgdp = pgd_offset(mm, start);
+ pudp = pud_offset(pgdp, start);
+ pmdp = pmd_offset(pudp, start);
+ ptep = pte_offset(pmdp, start);
+
+ if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
+ start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
+ continue;
+ }
+ tmpend = (start | (PAGE_SIZE-1)) > end ?
+ end : (start | (PAGE_SIZE-1));
+
+ s7_flush_dcache_range(start, tmpend);
+ if (exec)
+ s7_flush_icache_range(start, tmpend);
+ start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
+ }
+}
+
+static void
+s7_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ int exec = vma->vm_flags & VM_EXEC;
+ unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
+
+ s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
+
+ if (exec)
+ s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
+}
+
+static void s7_flush_cache_sigtramp(unsigned long addr)
+{
+ __asm__ __volatile__(
+ "cache 0x02, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ "cache 0x02, [%0, 0x4]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+
+ "cache 0x0d, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ "cache 0x0d, [%0, 0x4]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+
+ "cache 0x1a, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ : : "r" (addr));
+}
+
+/*
+Just flush entire Dcache!!
+You must ensure the page doesn't include instructions, because
+the function will not flush the Icache.
+The addr must be cache aligned.
+*/
+static void s7_flush_data_cache_page(unsigned long addr)
+{
+ unsigned int i;
+ for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
+ __asm__ __volatile__(
+ "cache 0x0e, [%0, 0]\n"
+ "cache 0x1a, [%0, 0]\n"
+ "nop\n"
+ : : "r" (addr));
+ addr += L1_CACHE_BYTES;
+ }
+}
+
+/*
+1. WB and invalid a cache line of Dcache
+2. Drain Write Buffer
+the range must be smaller than PAGE_SIZE
+*/
+static void s7_flush_dcache_range(unsigned long start, unsigned long end)
+{
+ int size, i;
+
+ start = start & ~(L1_CACHE_BYTES - 1);
+ end = end & ~(L1_CACHE_BYTES - 1);
+ size = end - start;
+ /* flush dcache to ram, and invalidate dcache lines. */
+ for (i = 0; i < size; i += L1_CACHE_BYTES) {
+ __asm__ __volatile__(
+ "cache 0x0e, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ "cache 0x1a, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ : : "r" (start));
+ start += L1_CACHE_BYTES;
+ }
+}
+
+static void s7_flush_icache_range(unsigned long start, unsigned long end)
+{
+ int size, i;
+ start = start & ~(L1_CACHE_BYTES - 1);
+ end = end & ~(L1_CACHE_BYTES - 1);
+
+ size = end - start;
+ /* invalidate icache lines. */
+ for (i = 0; i < size; i += L1_CACHE_BYTES) {
+ __asm__ __volatile__(
+ "cache 0x02, [%0, 0]\n"
+ "nop\nnop\nnop\nnop\nnop\n"
+ : : "r" (start));
+ start += L1_CACHE_BYTES;
+ }
+}