1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
|
/* MN10300 CPU core caching routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
#if mn10300_local_dcache_inv_range_intr_interval > 0xff
#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
#endif
.am33_2
.globl mn10300_local_icache_inv_page
.globl mn10300_local_icache_inv_range
.globl mn10300_local_icache_inv_range2
mn10300_local_icache_inv_page = mn10300_local_icache_inv
mn10300_local_icache_inv_range = mn10300_local_icache_inv
mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
#ifndef CONFIG_SMP
.globl mn10300_icache_inv
.globl mn10300_icache_inv_page
.globl mn10300_icache_inv_range
.globl mn10300_icache_inv_range2
.globl mn10300_dcache_inv
.globl mn10300_dcache_inv_page
.globl mn10300_dcache_inv_range
.globl mn10300_dcache_inv_range2
mn10300_icache_inv = mn10300_local_icache_inv
mn10300_icache_inv_page = mn10300_local_icache_inv_page
mn10300_icache_inv_range = mn10300_local_icache_inv_range
mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
mn10300_dcache_inv = mn10300_local_dcache_inv
mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
#endif /* !CONFIG_SMP */
###############################################################################
#
# void mn10300_local_icache_inv(void)
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl mn10300_local_icache_inv
.type mn10300_local_icache_inv,@function
mn10300_local_icache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
.size mn10300_local_icache_inv,.-mn10300_local_icache_inv
###############################################################################
#
# void mn10300_local_dcache_inv(void)
# Invalidate the entire dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv
.type mn10300_local_dcache_inv,@function
mn10300_local_dcache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
.size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
###############################################################################
#
# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
# void mn10300_local_dcache_inv_page(unsigned long start)
# Invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv_page
.globl mn10300_local_dcache_inv_range
.globl mn10300_local_dcache_inv_range2
.type mn10300_local_dcache_inv_page,@function
.type mn10300_local_dcache_inv_range,@function
.type mn10300_local_dcache_inv_range2,@function
mn10300_local_dcache_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_inv_range2:
add d0,d1
mn10300_local_dcache_inv_range:
# If we are in writeback mode we check the start and end alignments,
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
2:
#endif /* CONFIG_MN10300_CACHE_WBACK */
movm [d2,d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_inv_range_end
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_MASK,d0 # round start addr down
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
#endif /* !CONFIG_MN10300_CACHE_WBACK */
mov d0,a1
clr d2 # we're going to clear tag RAM
# entries
# read the tags from the tag RAM, and if they indicate a valid dirty
# cache line then invalidate that line
mov DCACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache tag RAM
# access address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
mn10300_local_dcache_inv_range_outer_loop:
LOCAL_CLI_SAVE(d3)
# disable the dcache
movhu (a2),d0
and ~CHCTR_DCEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
mn10300_local_dcache_inv_range_loop:
# process the way 0 slot
mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_0:
# process the way 1 slot
mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_1:
# process the way 2 slot
mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_2:
# process the way 3 slot
mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_3:
# approx every N steps we re-enable the cache and see if there are any
# interrupts to be processed
# we also break out if we've reached the end of the loop
# (the bottom nibble of the count is zero in both cases)
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0
add -1,d1
btst mn10300_local_dcache_inv_range_intr_interval,d1
bne mn10300_local_dcache_inv_range_loop
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
# - we don't bother with delay NOPs as we'll have enough instructions
# before we disable interrupts again to give the interrupts a chance
# to happen
LOCAL_IRQ_RESTORE(d3)
# go around again if the counter hasn't yet reached zero
add 0,d1
bne mn10300_local_dcache_inv_range_outer_loop
mn10300_local_dcache_inv_range_end:
ret [d2,d3,a2],12
.size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
.size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
.size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
|