1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.align 7
_GLOBAL(memcpy)
std r3,48(r1) /* save destination pointer for return value */
mtcrf 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beq 3f
addi r3,r3,16
ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: ld r3,48(r1) /* return dest pointer */
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beq 4f
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
ble cr1,.Ldo_tail
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: ld r3,48(r1) /* return dest pointer */
blr
|