1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
; Copyright 2016 Castle Technology Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; This file is duplicated in the kernel sources and CallASWI. Please try and
; keep them in sync!
[ :LNOT: :DEF: InKernel
GBLL InKernel
InKernel SETL {TRUE}
]
; Potential non-feature register flags:
; * NOP after banked LDM?
; * StrongARM MSR bug?
; * ARM2 TEQP bug?
FeatureOp_BIC * 0 << 3
FeatureOp_AND * 1 << 3
FeatureOp_OR * 2 << 3
MACRO
CPUFeature $register, $field, $minval, $feature, $op
ASSERT ($register < 8)
ASSERT ($field :AND: 28) == $field
ASSERT ($minval < 16)
[ "$op" <> ""
DCB $register + $op
|
DCB $register + FeatureOp_OR
]
DCB $field
DCB $minval
DCB CPUFeature_$feature
MEND
FeatureRegsTable
CPUFeature 0, 24, 2, UDIV_SDIV
CPUFeature 0, 20, 1, BKPT
CPUFeature 0, 08, 1, BFC_BFI_SBFX_UBFX
CPUFeature 0, 04, 1, CLZ
CPUFeature 0, 00, 1, SWP_SWPB
CPUFeature 1, 24, 1, BX
CPUFeature 1, 24, 2, BLX
CPUFeature 1, 24, 3, Interworking_MOV_pc
CPUFeature 1, 20, 1, MOVW_MOVT
CPUFeature 1, 12, 1, SXTB_SXTH_UXTB_UXTH
CPUFeature 1, 12, 2, SXTAB_SXTAH_UXTAB_UXTAH
CPUFeature 1, 12, 2, SXTB16_SXTAB16_UXTB16_UXTAB16
CPUFeature 1, 08, 1, SRS_RFE_CPS
CPUFeature 2, 28, 1, REV_REV16_REVSH
CPUFeature 2, 28, 2, RBIT
CPUFeature 2, 24, 1, MRS_MSR
CPUFeature 2, 20, 1, UMULL_UMLAL
CPUFeature 2, 20, 2, UMAAL
CPUFeature 2, 16, 1, SMULL_SMLAL
CPUFeature 2, 16, 2, SMLAxy_SMLALxy_SMLAWy_SMULxy_SMULWy
CPUFeature 2, 16, 2, PSR_Q_bit
CPUFeature 2, 16, 3, SMLAlDx_SMLSlDx_SMMLAr_SMMLSr_SMMULr_SMUADx_SMUSDx
CPUFeature 2, 12, 2, MLS
CPUFeature 2, 08, 0, LDM_STM_noninterruptible
CPUFeature 2, 08, 1, LDM_STM_noninterruptible, FeatureOp_BIC
CPUFeature 2, 08, 1, LDM_STM_restartable
CPUFeature 2, 08, 2, LDM_STM_restartable, FeatureOp_BIC
CPUFeature 2, 08, 2, LDM_STM_continuable
CPUFeature 2, 04, 1, PLD
CPUFeature 2, 04, 3, PLI
CPUFeature 2, 04, 4, PLDW
CPUFeature 2, 00, 1, LDRD_STRD
CPUFeature 2, 00, 2, LDAx_STLx
CPUFeature 3, 24, 1, NOP_hints
CPUFeature 3, 12, 1, LDREX_STREX
CPUFeature 3, 12, 1, CLREX_LDREXB_LDREXH_STREXB_STREXH
CPUFeature 4, 20, 3, CLREX_LDREXB_LDREXH_STREXB_STREXH, FeatureOp_AND
CPUFeature 3, 12, 2, CLREX_LDREXB_LDREXH_STREXB_STREXH
CPUFeature 3, 12, 2, LDREXD_STREXD
CPUFeature 3, 04, 1, SSAT_USAT
CPUFeature 3, 04, 1, PSR_Q_bit
CPUFeature 3, 04, 3, PKHxy_xADD16_xADD8_xASX_xSUB16_xSUB8_xSAX_SEL
CPUFeature 3, 04, 3, PSR_GE_bits
CPUFeature 3, 04, 3, SXTB16_SXTAB16_UXTB16_UXTAB16, FeatureOp_AND
CPUFeature 3, 00, 1, QADD_QDADD_QDSUB_QSUB
CPUFeature 3, 00, 1, PSR_Q_bit
CPUFeature 4, 28, 1, SWP_SWPB_uniproc
CPUFeature 0, 00, 1, SWP_SWPB_uniproc, FeatureOp_BIC
CPUFeature 4, 16, 1, DMB_DSB_ISB
CPUFeature 4, 12, 1, SMC
CPUFeature 4, 00, 2, LDRHT_LDRSBT_LDRSHT_STRHT
CPUFeature 5, 16, 1, CRC32B_CRC32H_CRC32W_CRC32CB_CRC32CH_CRC32CW
CPUFeature 5, 12, 1, SHA256H_SHA256H2_SHA256SU0_SHA256SU1
CPUFeature 5, 08, 1, SHA1C_SHA1P_SHA1M_SHA1H_SHA1SU0_SHA1SU1
CPUFeature 5, 04, 1, AESE_AESD_AESMC_AESIMC
CPUFeature 5, 00, 1, SEVL
FeatureRegsTableEnd
[ :LNOT: InKernel
ARMv3 * 0
ARMv4 * 1
ARMv4T * 2
ARMv5 * 3
ARMv5T * 4
ARMv5TE * 5
ARMv5TEJ * 6
ARMv6 * 7
ARMvF * &F
; int Init_ARMarch(void)
; Returns architecture, as above in r2. Also EQ if ARMv3, NE if ARMv4 or later.
Init_ARMarch ROUT
Entry "ip"
MRC p15, 0, ip, c0, c0, 0
ANDS r2, ip, #&0000F000
EXIT EQ ; ARM 3 or ARM 6
TEQ r2, #&00007000
BNE %FT20
TST ip, #&00800000 ; ARM 7 - check for Thumb
MOVNE r2, #ARMv4T
MOVEQ r2, #ARMv3
EXIT
20 ANDS r2, ip, #&000F0000 ; post-ARM 7
MOV r2, r2, LSR #16
EXIT
CPUFeatures
DCD 0
DCD 0
CPUFeatures_Size * 8
|
CPUFeatures_Size * ?CPUFeatures
]
MACRO
SetFeature$cc $feature
LDR$cc.B lr, [r2, #CPUFeature_$feature :SHR: 3]
ORR$cc lr, lr, #1 :SHL: (CPUFeature_$feature :AND: 7)
STR$cc.B lr, [r2, #CPUFeature_$feature :SHR: 3]
MEND
ASSERT CPUFeatures_Size * 8 >= CPUFeature_Max
CalcCPUFeatures ROUT
; In:
; r0 -> instruction set feature registers
; r1 = number of registers
; r2 = CPU architecture field from MIDR, or -1 if ARM3 or older
; Out:
; CPUFeatures populated
Entry "r2-r8"
[ InKernel
LDR r2, =ZeroPage+CPUFeatures
|
ADR r2, CPUFeatures
]
ASSERT CPUFeatures_Size == 8
MOV r3, #0
STR r3, [r2]
STR r3, [r2, #4]
; Fill in the features defined by the instruction set feature registers
ADR r3, FeatureRegsTable
ADR r4, FeatureRegsTableEnd
MOV r8, #1
10
LDRB r5, [r3], #4
AND r6, r5, #7
CMP r6, r1
LDRLT r6, [r0, r6, LSL #2]
MOVGE r6, #0 ; n.b. can't skip registers which are zero, we must still process them
LDRB r7, [r3, #-3]
MOV r6, r6, LSR r7
LDRB r7, [r3, #-2]
AND r6, r6, #15
CMP r6, r7
LDRB r6, [r3, #-1]
BIC r5, r5, #7
LDRB r7, [r2, r6, LSR #3]
AND lr, r6, #7
ASSERT FeatureOp_BIC < FeatureOp_AND
ASSERT FeatureOp_AND < FeatureOp_OR
; If we failed the test, massage the flags so that AND becomes BIC, and BIC and OR become a no-op (AND)
EORLT r5, r5, #FeatureOp_BIC :EOR: FeatureOp_AND
BICLT r5, r5, #FeatureOp_OR
CMP r5, #FeatureOp_AND
BICLT r7, r7, r8, LSL lr
ORRGT r7, r7, r8, LSL lr
STRNEB r7, [r2, r6, LSR #3]
CMP r3, r4
BNE %BT10
; Fill in extra features which we care about
[ InKernel
LDR r3, =ZeroPage
LDR r3, [r3, #ProcessorFlags]
|
Push "r0-r1"
MOV r0, #OSPlatformFeatures_ReadCodeFeatures
SWI XOS_PlatformFeatures
MOVVC r3, r0
MOVVS r3, #0
Pull "r0-r1"
]
TST r3, #CPUFlag_No26bitMode ; Trust the current kernel about whether 26bit modes are supported, rather than trying to work it out ourselves
SetFeatureEQ TEQP
FRAMLDR r4,,r2
CMP r4, #ARMv4
SetFeatureGE LDRSB
SetFeatureGE SYS_mode
SetFeatureLE MULS_flag_corruption
[ InKernel ; Kernel init is too early for SWIs, just check target machine type instead
[ "$Machine" <> "IOMD"
SetFeatureGE LDRH_LDRSH_STRH
]
|
BNE %FT35
; ARMv4. For simplicity assume that if IOMD is present LDRH/STRH won't work.
Push "r0-r4"
MOV r0, #2
SWI XOS_ReadSysInfo
AND r0, r0, #&ff00
CMP r0, #&ff00
Pull "r0-r4"
35
SetFeatureGE LDRH_LDRSH_STRH ; ARMv5+, or ARMv4 without IOMD
]
; Be careful with ARMv6 comparisons, GT condition could still be ARMv6
CMP r4, #ARMv6
SetFeatureLT MUL_Rd_Rn_restriction
SetFeatureLT LDR_STR_Rd_Rn_restriction
SetFeatureLE Rotated_loads
BLT %FT40
SetFeature Unaligned_loads
; Look at the cache type register to work out whether this is ARMv6 or ARMv7+
MRC p15, 0, r5, c0, c0, 1 ; Cache type register
TST r5, #1<<31 ; EQ = ARMv6, NE = ARMv7+
SetFeatureEQ Rotated_loads
; Guess whether WFE does something useful by whether we're a multicore chip
MRC p15, 0, r5, c0, c0, 5 ; MPIDR
MRCEQ p15, 0, r6, c0, c0, 0 ; ARMv6: Register is optional, so compare value against MIDR to see if it's implemented. There's no multiprocessing extensions flag so assume the check against MIDR will be good enough.
BICNE r6, r5, #1<<31 ; ARMv7+: Register is mandatory, but useful WFE still not guaranteed. So check if multiprocessing extensions implemented (bit 31 set).
TEQ r5, r6
SetFeatureNE WFE
; Detect instructions introduced by virtualisation extensions
MRC p15, 0, r5, c0, c1, 1 ; ID_PFR1
TST r5, #15<<12 ; ARMv7
SetFeatureNE HVC
TSTEQ r5, #15<<24 ; ARMv8
SetFeatureNE ERET_MSR_MRS_banked
40
EXIT
ReadCPUFeatures ROUT
; Out:
; CPUFeatures populated
Entry "r0-r2"
[ InKernel
LDR r2, =ZeroPage
LDRB r2, [r2, #ProcessorArch]
|
; Check if the MIDR should be present (do a dummy MRS)
MOV r0, #0
MRS r0, CPSR
TEQ r0, #0
ADRNE lr, %FT20
BNE Init_ARMarch
; ARM2, ARM250 or ARM3
; Use the information that was collected by the main CallASWI code to work out which it is
MOV r2, #-1
ADR r0, Features_ARM3
LDR r1, nomrc
CMP r1, #0
ADRNE r0, Features_ARM250
LDRNE r1, noswp
CMPNE r1, #0
ADRNE r0, Features_ARM2
B %FT30
]
20
CMP r2, #ARMvF
BGE %FT50
; Old architecture, use fake ISAR registers
ADR r0, Features_ARMv3
ASSERT Features_ARMv4 = Features_ARMv3 + 20
ASSERT Features_ARMv4T = Features_ARMv4 + 20
ASSERT Features_ARMv5T = Features_ARMv4T + 20
ASSERT Features_ARMv5TE = Features_ARMv5T + 20
ASSERT Features_ARMv5TEJ = Features_ARMv5TE + 20
ASSERT Features_ARMv6 = Features_ARMv5TEJ + 20
ADD r0, r0, r2, LSL #4
ADD r0, r0, r2, LSL #2
; n.b. not dealing with plain ARMv5 properly
CMP r2, #ARMv5
SUBGE r0, r0, #20
30
MOV r1, #5
BL CalcCPUFeatures
EXIT
50
; Read the ISAR registers directly
MRC p15, 0, lr, c0, c2, 5
MRC p15, 0, r1, c0, c2, 4
MRC p15, 0, r0, c0, c2, 3
STMDB sp!, {r0-r1,lr}
MRC p15, 0, lr, c0, c2, 2
MRC p15, 0, r1, c0, c2, 1
MRC p15, 0, r0, c0, c2, 0
STMDB sp!, {r0-r1,lr}
MOV r0, sp
MOV r1, #6
BL CalcCPUFeatures
ADD sp, sp, #24
EXIT
PlatFeatSWI_ReadCPUFeatures ROUT
; In: r1 = >=0: flag bit to read
; <0: return bit masks for page NOT r1
; LR stacked
; Out: Reading single flag:
; r0 = 0/1 flag bit value, or -1 if not known
; Reading bit masks:
; r0-r3 = flag values
; r4-r7 = validity masks
ASSERT CPUFeature_Max <= 128
CMP r1, #-1
BEQ %FT20
BLT %FT40
; Read single flag
CMP r1, #CPUFeature_Max
MOVGE r0, #-1
BGE %FT30
[ InKernel
LDR r0, =ZeroPage+CPUFeatures
|
ADR r0, CPUFeatures
]
AND lr, r1, #7
LDRB r0, [r0, r1, LSR #3]
MOV r0, r0, LSR lr
AND r0, r0, #1
B %FT30
20
; Read page 0 of flags (the only page)
[ InKernel
LDR r0, =ZeroPage+CPUFeatures
|
ADR r0, CPUFeatures
]
ASSERT CPUFeatures_Size == 8
LDMIA r0, {r0-r1}
ASSERT CPUFeature_Max >= 32
MOV r4, #-1
ASSERT CPUFeature_Max <= 64
LDR r5, =&ffffffff :SHR: (64 - CPUFeature_Max)
25
MOV r2, #0
MOV r3, #0
MOV r6, #0
MOV r7, #0
30
Pull "lr"
B SLVK
40
; Read unimplemented page of flags
MOV r0, #0
MOV r1, #0
MOV r4, #0
MOV r5, #0
B %BT25
; Fake ISAR registers for old architectures
; Thumb instruction details might not be entirely correct, but we don't really care about that
[ :LNOT: InKernel
Features_ARM2
DCD &00000000
DCD &00000010
DCD &00001000
DCD &00000100
DCD &00000141
Features_ARM250
DCD &00000001
DCD &00000010
DCD &00001000
DCD &00000100
DCD &00000141
Features_ARM3
DCD &00010001
DCD &00000010
DCD &00001000
DCD &00000100
DCD &00000141
]
Features_ARMv3
DCD &00010001
DCD &00000010
DCD &01001000
DCD &00000100
DCD &00000141
Features_ARMv4
DCD &00010001
DCD &00000010
DCD &01111000
DCD &00000100
DCD &00000141
Features_ARMv4T
DCD &00010001
DCD &01000010
DCD &01111000
DCD &00000100
DCD &00000141
Features_ARMv5T
DCD &00120011
DCD &02000010
DCD &01111000
DCD &00000100
DCD &00000141
Features_ARMv5TE
DCD &00130011
DCD &02000010
DCD &01121011
DCD &00000101
DCD &00000141
Features_ARMv5TEJ
DCD &00130011
DCD &12000010
DCD &01121011
DCD &00000101
DCD &00000141
Features_ARMv6
DCD &00140011
DCD &12002111
DCD &11231011
DCD &00001131
DCD &00000141
; Assume any ARMv6 chip with extensions will use the CPUID scheme
LTORG
END