1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
|
/*
* This file contains assembly-language implementations
* of IP-style 1's complement checksum routines.
*
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
*/
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
*
* __csum_partial(r3=buff, r4=len, r5=sum)
*/
_GLOBAL(__csum_partial)
addic r0,r5,0 /* clear carry */
srdi. r6,r4,3 /* less than 8 bytes? */
beq .Lcsum_tail_word
/*
* If only halfword aligned, align to a double word. Since odd
* aligned addresses should be rare and they would require more
* work to calculate the correct checksum, we ignore that case
* and take the potential slowdown of unaligned loads.
*/
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcsum_aligned
li r7,4
sub r6,r7,r6
mtctr r6
1:
lhz r6,0(r3) /* align to doubleword */
subi r4,r4,2
addi r3,r3,2
adde r0,r0,r6
bdnz 1b
.Lcsum_aligned:
/*
* We unroll the loop such that each iteration is 64 bytes with an
* entry and exit limb of 64 bytes, meaning a minimum size of
* 128 bytes.
*/
srdi. r6,r4,7
beq .Lcsum_tail_doublewords /* len < 128 */
srdi r6,r4,6
subi r6,r6,1
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
ld r6,0(r3)
ld r9,8(r3)
ld r10,16(r3)
ld r11,24(r3)
/*
* On POWER6 and POWER7 back to back addes take 2 cycles because of
* the XER dependency. This means the fastest this loop can go is
* 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
2:
adde r0,r0,r6
ld r12,32(r3)
ld r14,40(r3)
adde r0,r0,r9
ld r15,48(r3)
ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
adde r0,r0,r11
adde r0,r0,r12
adde r0,r0,r14
adde r0,r0,r15
ld r6,0(r3)
ld r9,8(r3)
adde r0,r0,r16
ld r10,16(r3)
ld r11,24(r3)
bdnz 2b
adde r0,r0,r6
ld r12,32(r3)
ld r14,40(r3)
adde r0,r0,r9
ld r15,48(r3)
ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
adde r0,r0,r11
adde r0,r0,r12
adde r0,r0,r14
adde r0,r0,r15
adde r0,r0,r16
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r4,r4,63
.Lcsum_tail_doublewords: /* Up to 127 bytes to go */
srdi. r6,r4,3
beq .Lcsum_tail_word
mtctr r6
3:
ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
bdnz 3b
andi. r4,r4,7
.Lcsum_tail_word: /* Up to 7 bytes to go */
srdi. r6,r4,2
beq .Lcsum_tail_halfword
lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
subi r4,r4,4
.Lcsum_tail_halfword: /* Up to 3 bytes to go */
srdi. r6,r4,1
beq .Lcsum_tail_byte
lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
subi r4,r4,2
.Lcsum_tail_byte: /* Up to 1 byte to go */
andi. r6,r4,1
beq .Lcsum_finish
lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
.Lcsum_finish:
addze r0,r0 /* add in final carry */
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
add r3,r4,r0
srdi r3,r3,32
blr
.macro srcnr
100:
.section __ex_table,"a"
.align 3
.llong 100b,.Lsrc_error_nr
.previous
.endm
.macro source
150:
.section __ex_table,"a"
.align 3
.llong 150b,.Lsrc_error
.previous
.endm
.macro dstnr
200:
.section __ex_table,"a"
.align 3
.llong 200b,.Ldest_error_nr
.previous
.endm
.macro dest
250:
.section __ex_table,"a"
.align 3
.llong 250b,.Ldest_error
.previous
.endm
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively. The caller must take any action
* required in this case (zeroing memory, recalculating partial checksum etc).
*
* csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
*/
_GLOBAL(csum_partial_copy_generic)
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
beq .Lcopy_tail_word
/*
* If only halfword aligned, align to a double word. Since odd
* aligned addresses should be rare and they would require more
* work to calculate the correct checksum, we ignore that case
* and take the potential slowdown of unaligned loads.
*
* If the source and destination are relatively unaligned we only
* align the source. This keeps things simple.
*/
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcopy_aligned
li r9,4
sub r6,r9,r6
mtctr r6
1:
srcnr; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
addi r3,r3,2
adde r0,r0,r6
dstnr; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
.Lcopy_aligned:
/*
* We unroll the loop such that each iteration is 64 bytes with an
* entry and exit limb of 64 bytes, meaning a minimum size of
* 128 bytes.
*/
srdi. r6,r5,7
beq .Lcopy_tail_doublewords /* len < 128 */
srdi r6,r5,6
subi r6,r6,1
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
source; ld r6,0(r3)
source; ld r9,8(r3)
source; ld r10,16(r3)
source; ld r11,24(r3)
/*
* On POWER6 and POWER7 back to back addes take 2 cycles because of
* the XER dependency. This means the fastest this loop can go is
* 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
2:
adde r0,r0,r6
source; ld r12,32(r3)
source; ld r14,40(r3)
adde r0,r0,r9
source; ld r15,48(r3)
source; ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
dest; std r6,0(r4)
dest; std r9,8(r4)
adde r0,r0,r11
dest; std r10,16(r4)
dest; std r11,24(r4)
adde r0,r0,r12
dest; std r12,32(r4)
dest; std r14,40(r4)
adde r0,r0,r14
dest; std r15,48(r4)
dest; std r16,56(r4)
addi r4,r4,64
adde r0,r0,r15
source; ld r6,0(r3)
source; ld r9,8(r3)
adde r0,r0,r16
source; ld r10,16(r3)
source; ld r11,24(r3)
bdnz 2b
adde r0,r0,r6
source; ld r12,32(r3)
source; ld r14,40(r3)
adde r0,r0,r9
source; ld r15,48(r3)
source; ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
dest; std r6,0(r4)
dest; std r9,8(r4)
adde r0,r0,r11
dest; std r10,16(r4)
dest; std r11,24(r4)
adde r0,r0,r12
dest; std r12,32(r4)
dest; std r14,40(r4)
adde r0,r0,r14
dest; std r15,48(r4)
dest; std r16,56(r4)
addi r4,r4,64
adde r0,r0,r15
adde r0,r0,r16
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r5,r5,63
.Lcopy_tail_doublewords: /* Up to 127 bytes to go */
srdi. r6,r5,3
beq .Lcopy_tail_word
mtctr r6
3:
srcnr; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
dstnr; std r6,0(r4)
addi r4,r4,8
bdnz 3b
andi. r5,r5,7
.Lcopy_tail_word: /* Up to 7 bytes to go */
srdi. r6,r5,2
beq .Lcopy_tail_halfword
srcnr; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
dstnr; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
.Lcopy_tail_halfword: /* Up to 3 bytes to go */
srdi. r6,r5,1
beq .Lcopy_tail_byte
srcnr; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
dstnr; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
.Lcopy_tail_byte: /* Up to 1 byte to go */
andi. r6,r5,1
beq .Lcopy_finish
srcnr; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
dstnr; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
add r3,r4,r0
srdi r3,r3,32
blr
.Lsrc_error:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
stw r6,0(r7)
blr
.Ldest_error:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT
stw r6,0(r8)
blr
|