ARMops 162 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
; Copyright 2000 Pace Micro Technology plc
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
;     http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
  ;      GET     Hdr:ListOpts
  ;      GET     Hdr:Macros
  ;      GET     Hdr:System
  ;      $GetCPU
  ;      $GetMEMM

  ;      GET     hdr.Options

  ;      GET     Hdr:PublicWS
  ;      GET     Hdr:KernelWS

  ;      GET     hdr.Copro15ops
  ;      GET     hdr.ARMops

v7      RN      10

  ;      EXPORT  Init_ARMarch
  ;      EXPORT  ARM_Analyse
  ;      EXPORT  ARM_PrintProcessorType

 ;       AREA    KernelCode,CODE,READONLY

; ARM keep changing their mind about ID field layout.
38
; Here's a summary, courtesy of the ARM ARM:
39 40 41
;
; pre-ARM 7:   xxxx0xxx
; ARM 7:       xxxx7xxx where bit 23 indicates v4T/~v3
42
; post-ARM 7:  xxxanxxx where n<>0 or 7 and a = architecture (1=v4,2=v4T,3=v5,4=v5T,5=v5TE,6=v5TEJ,7=v6)
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
;

; int Init_ARMarch(void)
; Returns architecture, as above in a1. Also EQ if ARMv3, NE if ARMv4 or later.
; Corrupts only ip, no RAM usage.
Init_ARMarch
        ARM_read_ID ip
        ANDS    a1, ip, #&0000F000
        MOVEQ   pc, lr                          ; ARM 3 or ARM 6
        TEQ     a1, #&00007000
        BNE     %FT20
        TST     ip, #&00800000                  ; ARM 7 - check for Thumb
        MOVNE   a1, #ARMv4T
        MOVEQ   a1, #ARMv3
        MOV     pc, lr
20      ANDS    a1, ip, #&000F0000              ; post-ARM 7
        MOV     a1, a1, LSR #16
        MOV     pc, lr

62 63
; Called pre-MMU to set up some (temporary) PCBTrans and PPLTrans pointers,
; and the initial PageTable_PageFlags value
64 65 66 67 68 69
; Also used post-MMU for VMSAv6 case
; In:
;   a1 -> ZeroPage
; Out:
;   a1-a4, ip corrupt
Init_PCBTrans   ROUT
70 71
        LDR     a2, =AreaFlags_PageTablesAccess :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable
        STR     a2, [a1, #PageTable_PageFlags]
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
 [ MEMM_Type = "VMSAv6"
        ADRL    a2, XCBTableVMSAv6
        STR     a2, [a1, #MMU_PCBTrans]

        ; Use shareable pages if we're a multicore chip
        ; N.B. it's important that we get this correct - single-core chips may
        ; treat shareable memory as non-cacheable (e.g. ARM11)

        ADRL    a4, PPLTransNonShareable
        ; Look at the cache type register to work out whether this is ARMv6 or ARMv7+
        MRC     p15, 0, a2, c0, c0, 1   ; Cache type register
        TST     a2, #1<<31              ; EQ = ARMv6, NE = ARMv7+
        MRC     p15, 0, a2, c0, c0, 5   ; MPIDR
        BNE     %FT50
        MRC     p15, 0, a3, c0, c0, 0   ; ARMv6: MPIDR is optional, so compare value against MIDR to see if it's implemented. There's no multiprocessing extensions flag so assume the check against MIDR will be good enough.
        TEQ     a2, a3
        ADDNE   a4, a4, #PPLTransShareable-PPLTransNonShareable
        B       %FT90
50
        AND     a2, a2, #&c0000000      ; ARMv7+: MPIDR is mandatory, but multicore not guaranteed. Check if multiprocessing extensions implemented (bit 31 set), and not uniprocessor (bit 30 clear).
        TEQ     a2, #&80000000
        ADDEQ   a4, a4, #PPLTransShareable-PPLTransNonShareable
90
        STR     a4, [a1, #MMU_PPLTrans]
 |
        ; Detecting the right PCBTrans table to use is complex
        ; However we know that, pre-MMU, we only use the default cache policy,
        ; and we don't use CNB memory
        ; So just go for a safe PCBTrans, like SA110, and the non-extended
        ; PPLTrans
        ADRL    a2, XCBTableSA110
        STR     a2, [a1, #MMU_PCBTrans]
        ADRL    a2, PPLTrans
     [ ARM6support
        ARM_6   a3
        ADDEQ   a2, a2, #PPLTransARM6-PPLTrans
     ]
        STR     a2, [a1, #MMU_PPLTrans]
 ]
        MOV     pc, lr
112 113

ARM_Analyse
114
        MOV     a2, lr
115
        BL      Init_ARMarch
116
        MOV     lr, a2
117
 [ MEMM_Type = "VMSAv6"
118
        CMP     a1, #ARMvF
119
        BEQ     ARM_Analyse_Fancy ; New ARM; use the feature regs to perform all the setup
120
 ]
121 122 123
        Push    "v1,v2,v5,v6,v7,lr"
        ARM_read_ID v1
        ARM_read_cachetype v2
Jeffrey Lee's avatar
Jeffrey Lee committed
124
        LDR     v6, =ZeroPage
125

126
        ADRL    v7, KnownCPUTable
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
FindARMloop
        LDMIA   v7!, {a1, a2}                   ; See if it's a known ARM
        CMP     a1, #-1
        BEQ     %FT20
        AND     a2, v1, a2
        TEQ     a1, a2
        ADDNE   v7, v7, #8
        BNE     FindARMloop
        TEQ     v2, v1                          ; If we don't have cache attributes, read from table
        LDREQ   v2, [v7]

20      TEQ     v2, v1
        BEQ     %BT20                           ; Cache unknown: panic

        CMP     a1, #-1
        LDRNEB  a2, [v7, #4]
        MOVEQ   a2, #ARMunk
        STRB    a2, [v6, #ProcessorType]

        ASSERT  CT_Isize_pos = 0
        MOV     a1, v2
        ADD     a2, v6, #ICache_Info
        BL      EvaluateCache
        MOV     a1, v2, LSR #CT_Dsize_pos
        ADD     a2, v6, #DCache_Info
        BL      EvaluateCache

        AND     a1, v2, #CT_ctype_mask
        MOV     a1, a1, LSR #CT_ctype_pos
        STRB    a1, [v6, #Cache_Type]

Kevin Bracey's avatar
Kevin Bracey committed
158
        MOV     v5, #CPUFlag_32bitOS
Jeffrey Lee's avatar
Jeffrey Lee committed
159 160 161
        [ HiProcVecs
        ORR     v5, v5, #CPUFlag_HiProcVecs
        ]
162 163

        TST     v2, #CT_S
Kevin Bracey's avatar
Kevin Bracey committed
164 165 166 167 168 169 170 171 172
        ORRNE   v5, v5, #CPUFlag_SplitCache+CPUFlag_SynchroniseCodeAreas

        [ CacheOff
        ORR     v5, v5, #CPUFlag_SynchroniseCodeAreas
        |
        ARM_read_control a1                     ; if Z bit set then we have branch prediction,
        TST     a1, #MMUC_Z                     ; so we need OS_SynchroniseCodeAreas even if not
        ORRNE   v5, v5, #CPUFlag_SynchroniseCodeAreas   ; split caches
        ]
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191

        ; Test abort timing (base restored or base updated)
        MOV     a1, #&8000
        LDR     a2, [a1], #4                    ; Will abort - DAb handler will continue execution
        TEQ     a1, #&8000
        ORREQ   v5, v5, #CPUFlag_BaseRestored

        ; Check store of PC
30      STR     pc, [sp, #-4]!
        ADR     a2, %BT30 + 8
        LDR     a1, [sp], #4
        TEQ     a1, a2
        ORREQ   v5, v5, #CPUFlag_StorePCplus8

35

        BL      Init_ARMarch
        STRB    a1, [v6, #ProcessorArch]

Kevin Bracey's avatar
Kevin Bracey committed
192 193 194 195 196 197 198 199 200 201 202 203
        TEQ     a1, #ARMv3                      ; assume long multiply available
        ORRNE   v5, v5, #CPUFlag_LongMul        ; if v4 or later
        TEQNE   a1, #ARMv4                      ; assume 26-bit available
        ORRNE   v5, v5, #CPUFlag_No26bitMode    ; iff v3 or v4 (not T)
        TEQNE   a1, #ARMv5                      ; assume Thumb available
        ORRNE   v5, v5, #CPUFlag_Thumb          ; iff not v3,v4,v5

        MSR     CPSR_f, #Q32_bit
        MRS     lr, CPSR
        TST     lr, #Q32_bit
        ORRNE   v5, v5, #CPUFlag_DSP

204 205 206
        TEQ     a1, #ARMv6
        ORREQ   v5, v5, #CPUFlag_LoadStoreEx    ; Implicit clear of CPUFlag_NoSWP for <= ARMv6

207 208 209
        LDRB    v4, [v6, #ProcessorType]

        TEQ     v4, #ARMunk                     ; Modify deduced flags
210
        ADRNEL  lr, KnownCPUFlags
211
        ADDNE   lr, lr, v4, LSL #3
Kevin Bracey's avatar
Kevin Bracey committed
212 213 214
        LDMNEIA lr, {a2, a3}
        ORRNE   v5, v5, a2
        BICNE   v5, v5, a3
215

Kevin Bracey's avatar
Kevin Bracey committed
216 217 218 219 220 221 222 223 224 225 226 227 228
 [ XScaleJTAGDebug
        TST     v5, #CPUFlag_XScale
        BEQ     %FT40

        MRC     p14, 0, a2, c10, c0             ; Read debug control register
        TST     a2, #&80000000
        ORRNE   v5, v5, #CPUFlag_XScaleJTAGconnected
        MOVEQ   a2, #&C000001C                  ; enable hot debug
        MCREQ   p14, 0, a2, c10, c0
        BNE     %FT40
40
 ]

229
        ORR     v5, v5, #CPUFlag_ExtraReasonCodesFixed
230 231 232 233 234 235
        STR     v5, [v6, #ProcessorFlags]

        ; Now, a1 = processor architecture (ARMv3, ARMv4 ...)
        ;      v4 = processor type (ARM600, ARM610, ...)
        ;      v5 = processor flags

236 237 238
        LDRB    a2, [v6, #Cache_Type]

 [ MEMM_Type = "ARM600"
239
        CMP     a1, #ARMv4
240
        BLO     Analyse_ARMv3                   ; eg. ARM710
241 242 243

        TEQ     a2, #CT_ctype_WT
        TSTEQ   v5, #CPUFlag_SplitCache
244
        BEQ     Analyse_WriteThroughUnified     ; eg. ARM7TDMI derivative
245

246 247 248
        TEQ     a2, #CT_ctype_WB_Crd
        BEQ     Analyse_WB_Crd                  ; eg. StrongARM

Kevin Bracey's avatar
Kevin Bracey committed
249 250
        TEQ     a2, #CT_ctype_WB_Cal_LD
        BEQ     Analyse_WB_Cal_LD               ; assume XScale
251 252 253 254
 ] ; MEMM_Type = "ARM600"

        TEQ     a2, #CT_ctype_WB_CR7_LDa
        BEQ     Analyse_WB_CR7_LDa              ; eg. ARM9
Kevin Bracey's avatar
Kevin Bracey committed
255

256 257 258 259
        ; others ...

WeirdARMPanic
        B       WeirdARMPanic                   ; stiff :)
260

261
 [ MEMM_Type = "ARM600"
262
Analyse_ARMv3
263 264
        ADRL    a1, NullOp
        ADRL    a2, Cache_Invalidate_ARMv3
265
        ADRL    a3, DSB_ReadWrite_ARMv3
266 267
        ADRL    a4, TLB_Invalidate_ARMv3
        ADRL    ip, TLB_InvalidateEntry_ARMv3
268 269

        STR     a1, [v6, #Proc_Cache_CleanAll]
270
        STR     a1, [v6, #Proc_Cache_CleanRange]
271
        STR     a2, [v6, #Proc_Cache_CleanInvalidateAll]
Jeffrey Lee's avatar
Jeffrey Lee committed
272
        STR     a2, [v6, #Proc_Cache_CleanInvalidateRange]
273
        STR     a2, [v6, #Proc_Cache_InvalidateAll]
274 275 276
        STR     a2, [v6, #Proc_Cache_InvalidateRange]
        STR     a1, [v6, #Proc_ICache_InvalidateAll]
        STR     a1, [v6, #Proc_ICache_InvalidateRange]
277 278 279 280 281 282
        STR     a3, [v6, #Proc_DSB_ReadWrite]
        STR     a3, [v6, #Proc_DSB_Write]
        STR     a1, [v6, #Proc_DSB_Read]
        STR     a3, [v6, #Proc_DMB_ReadWrite]
        STR     a3, [v6, #Proc_DMB_Write]
        STR     a1, [v6, #Proc_DMB_Read]
283 284 285 286
        STR     a4, [v6, #Proc_TLB_InvalidateAll]
        STR     ip, [v6, #Proc_TLB_InvalidateEntry]
        STR     a1, [v6, #Proc_IMB_Full]
        STR     a1, [v6, #Proc_IMB_Range]
287
        STR     a1, [v6, #Proc_IMB_List]
288

289 290 291 292
        ADRL    a1, MMU_Changing_ARMv3
        ADRL    a2, MMU_ChangingEntry_ARMv3
        ADRL    a3, MMU_ChangingUncached_ARMv3
        ADRL    a4, MMU_ChangingUncachedEntry_ARMv3
293 294
        STR     a1, [v6, #Proc_MMU_Changing]
        STR     a2, [v6, #Proc_MMU_ChangingEntry]
295 296
        STR     a3, [v6, #Proc_MMU_ChangingUncached]
        STR     a4, [v6, #Proc_MMU_ChangingUncachedEntry]
297

298 299 300
        ADRL    a1, MMU_ChangingEntries_ARMv3
        ADRL    a2, MMU_ChangingUncachedEntries_ARMv3
        ADRL    a3, Cache_RangeThreshold_ARMv3
301
        ADRL    a4, Cache_Examine_Simple
302 303 304
        STR     a1, [v6, #Proc_MMU_ChangingEntries]
        STR     a2, [v6, #Proc_MMU_ChangingUncachedEntries]
        STR     a3, [v6, #Proc_Cache_RangeThreshold]
305
        STR     a4, [v6, #Proc_Cache_Examine]
Kevin Bracey's avatar
Kevin Bracey committed
306 307 308

        ADRL    a1, XCBTableWT
        STR     a1, [v6, #MMU_PCBTrans]
309 310 311
        B       %FT90

Analyse_WriteThroughUnified
312 313
        ADRL    a1, NullOp
        ADRL    a2, Cache_InvalidateUnified
314
        TST     v5, #CPUFlag_NoWBDrain
315 316
        ADRNEL  a3, DSB_ReadWrite_OffOn
        ADREQL  a3, DSB_ReadWrite
317 318
        ADRL    a4, TLB_Invalidate_Unified
        ADRL    ip, TLB_InvalidateEntry_Unified
319 320

        STR     a1, [v6, #Proc_Cache_CleanAll]
321
        STR     a1, [v6, #Proc_Cache_CleanRange]
322
        STR     a2, [v6, #Proc_Cache_CleanInvalidateAll]
Jeffrey Lee's avatar
Jeffrey Lee committed
323
        STR     a2, [v6, #Proc_Cache_CleanInvalidateRange]
324
        STR     a2, [v6, #Proc_Cache_InvalidateAll]
325 326 327
        STR     a2, [v6, #Proc_Cache_InvalidateRange]
        STR     a1, [v6, #Proc_ICache_InvalidateAll]
        STR     a1, [v6, #Proc_ICache_InvalidateRange]
328 329 330 331 332 333
        STR     a3, [v6, #Proc_DSB_ReadWrite]
        STR     a3, [v6, #Proc_DSB_Write]
        STR     a1, [v6, #Proc_DSB_Read]
        STR     a3, [v6, #Proc_DMB_ReadWrite]
        STR     a3, [v6, #Proc_DMB_Write]
        STR     a1, [v6, #Proc_DMB_Read]
334 335 336 337
        STR     a4, [v6, #Proc_TLB_InvalidateAll]
        STR     ip, [v6, #Proc_TLB_InvalidateEntry]
        STR     a1, [v6, #Proc_IMB_Full]
        STR     a1, [v6, #Proc_IMB_Range]
338
        STR     a1, [v6, #Proc_IMB_List]
339

340 341 342 343
        ADRL    a1, MMU_Changing_Writethrough
        ADRL    a2, MMU_ChangingEntry_Writethrough
        ADRL    a3, MMU_ChangingUncached
        ADRL    a4, MMU_ChangingUncachedEntry
344 345
        STR     a1, [v6, #Proc_MMU_Changing]
        STR     a2, [v6, #Proc_MMU_ChangingEntry]
346 347
        STR     a3, [v6, #Proc_MMU_ChangingUncached]
        STR     a4, [v6, #Proc_MMU_ChangingUncachedEntry]
348

349 350 351
        ADRL    a1, MMU_ChangingEntries_Writethrough
        ADRL    a2, MMU_ChangingUncachedEntries
        ADRL    a3, Cache_RangeThreshold_Writethrough
352
        ADRL    a4, Cache_Examine_Simple
353 354 355
        STR     a1, [v6, #Proc_MMU_ChangingEntries]
        STR     a2, [v6, #Proc_MMU_ChangingUncachedEntries]
        STR     a3, [v6, #Proc_Cache_RangeThreshold]
356
        STR     a4, [v6, #Proc_Cache_Examine]
Kevin Bracey's avatar
Kevin Bracey committed
357 358 359

        ADRL    a1, XCBTableWT
        STR     a1, [v6, #MMU_PCBTrans]
360
        B       %FT90
361
 ] ; MEMM_Type = "ARM600"
362

363 364 365 366 367 368 369
Analyse_WB_CR7_LDa
        TST     v5, #CPUFlag_SplitCache
        BEQ     WeirdARMPanic             ; currently, only support harvard caches here (eg. ARM920)

        ADRL    a1, Cache_CleanInvalidateAll_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_CleanInvalidateAll]

Jeffrey Lee's avatar
Jeffrey Lee committed
370 371 372
        ADRL    a1, Cache_CleanInvalidateRange_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_CleanInvalidateRange]

373 374 375
        ADRL    a1, Cache_CleanAll_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_CleanAll]

376 377 378
        ADRL    a1, Cache_CleanRange_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_CleanRange]

379 380 381
        ADRL    a1, Cache_InvalidateAll_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_InvalidateAll]

382 383 384
        ADRL    a1, Cache_InvalidateRange_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_InvalidateRange]

385 386 387
        ADRL    a1, Cache_RangeThreshold_WB_CR7_LDa
        STR     a1, [v6, #Proc_Cache_RangeThreshold]

388 389 390
        ADRL    a1, Cache_Examine_Simple
        STR     a1, [v6, #Proc_Cache_Examine]

391 392 393 394 395 396
        ADRL    a1, ICache_InvalidateAll_WB_CR7_LDa
        STR     a1, [v6, #Proc_ICache_InvalidateAll]

        ADRL    a1, ICache_InvalidateRange_WB_CR7_LDa
        STR     a1, [v6, #Proc_ICache_InvalidateRange]

397 398 399 400 401 402
        ADRL    a1, TLB_InvalidateAll_WB_CR7_LDa
        STR     a1, [v6, #Proc_TLB_InvalidateAll]

        ADRL    a1, TLB_InvalidateEntry_WB_CR7_LDa
        STR     a1, [v6, #Proc_TLB_InvalidateEntry]

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
 [ MEMM_Type = "ARM600"
        ; <= ARMv5, just use the drain write buffer MCR
        ADRL    a1, DSB_ReadWrite_WB_CR7_LDa
        ADRL    a2, NullOp
        STR     a1, [v6, #Proc_DSB_ReadWrite]
        STR     a1, [v6, #Proc_DSB_Write]
        STR     a2, [v6, #Proc_DSB_Read]
        STR     a1, [v6, #Proc_DMB_ReadWrite]
        STR     a1, [v6, #Proc_DMB_Write]
        STR     a2, [v6, #Proc_DMB_Read]
 |
        ; ARMv6(+), use the ARMv6 barrier MCRs
        ADRL    a1, DSB_ReadWrite_ARMv6
        STR     a1, [v6, #Proc_DSB_ReadWrite]
        STR     a1, [v6, #Proc_DSB_Write]
        STR     a1, [v6, #Proc_DSB_Read]
        ADRL    a1, DMB_ReadWrite_ARMv6
        STR     a1, [v6, #Proc_DMB_ReadWrite]
        STR     a1, [v6, #Proc_DMB_Write]
        STR     a1, [v6, #Proc_DMB_Read]
 ]
424 425 426 427 428 429 430

        ADRL    a1, IMB_Full_WB_CR7_LDa
        STR     a1, [v6, #Proc_IMB_Full]

        ADRL    a1, IMB_Range_WB_CR7_LDa
        STR     a1, [v6, #Proc_IMB_Range]

431 432 433
        ADRL    a1, IMB_List_WB_CR7_LDa
        STR     a1, [v6, #Proc_IMB_List]

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
        ADRL    a1, MMU_Changing_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_Changing]

        ADRL    a1, MMU_ChangingEntry_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_ChangingEntry]

        ADRL    a1, MMU_ChangingUncached_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_ChangingUncached]

        ADRL    a1, MMU_ChangingUncachedEntry_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntry]

        ADRL    a1, MMU_ChangingEntries_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_ChangingEntries]

        ADRL    a1, MMU_ChangingUncachedEntries_WB_CR7_LDa
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntries]

Jeffrey Lee's avatar
Jeffrey Lee committed
452
        LDRB    a2, [v6, #DCache_Associativity]
453 454 455 456 457 458 459 460 461 462 463 464 465

        MOV     a3, #256
        MOV     a4, #8           ; to find log2(ASSOC), rounded up
Analyse_WB_CR7_LDa_L1
        MOV     a3, a3, LSR #1
        SUB     a4, a4, #1
        CMP     a2, a3
        BLO     Analyse_WB_CR7_LDa_L1
        ADDHI   a4, a4, #1

        RSB     a2, a4, #32
        MOV     a3, #1
        MOV     a3, a3, LSL a2
Jeffrey Lee's avatar
Jeffrey Lee committed
466 467 468
        STR     a3, [v6, #DCache_IndexBit]
        LDR     a4, [v6, #DCache_NSets]
        LDRB    a2, [v6, #DCache_LineLen]
469 470
        SUB     a4, a4, #1
        MUL     a4, a2, a4
Jeffrey Lee's avatar
Jeffrey Lee committed
471
        STR     a4, [v6, #DCache_IndexSegStart]
472

473
        MOV     a2, #64*1024                         ; arbitrary-ish
Jeffrey Lee's avatar
Jeffrey Lee committed
474
        STR     a2, [v6, #DCache_RangeThreshold]
475

476
 [ MEMM_Type = "ARM600"
Kevin Bracey's avatar
Kevin Bracey committed
477 478
        ADRL    a1, XCBTableWBR                      ; assume read-allocate WB/WT cache
        STR     a1, [v6, #MMU_PCBTrans]
479
 ]
Kevin Bracey's avatar
Kevin Bracey committed
480

481 482
        B       %FT90

483
 [ MEMM_Type = "ARM600"
484 485 486 487 488 489 490
Analyse_WB_Crd
        TST     v5, #CPUFlag_SplitCache
        BEQ     WeirdARMPanic             ; currently, only support harvard

        ADRL    a1, Cache_CleanInvalidateAll_WB_Crd
        STR     a1, [v6, #Proc_Cache_CleanInvalidateAll]

Jeffrey Lee's avatar
Jeffrey Lee committed
491 492 493
        ADRL    a1, Cache_CleanInvalidateRange_WB_Crd
        STR     a1, [v6, #Proc_Cache_CleanInvalidateRange]

494 495 496
        ADRL    a1, Cache_CleanAll_WB_Crd
        STR     a1, [v6, #Proc_Cache_CleanAll]

497 498 499
        ADRL    a1, Cache_CleanRange_WB_Crd
        STR     a1, [v6, #Proc_Cache_CleanRange]

500 501 502
        ADRL    a1, Cache_InvalidateAll_WB_Crd
        STR     a1, [v6, #Proc_Cache_InvalidateAll]

503 504 505
        ADRL    a1, Cache_InvalidateRange_WB_Crd
        STR     a1, [v6, #Proc_Cache_InvalidateRange]

506 507 508
        ADRL    a1, Cache_RangeThreshold_WB_Crd
        STR     a1, [v6, #Proc_Cache_RangeThreshold]

509 510 511
        ADRL    a1, Cache_Examine_Simple
        STR     a1, [v6, #Proc_Cache_Examine]

512 513 514 515 516 517
        ADRL    a1, ICache_InvalidateAll_WB_Crd
        STR     a1, [v6, #Proc_ICache_InvalidateAll]

        ADRL    a1, ICache_InvalidateRange_WB_Crd
        STR     a1, [v6, #Proc_ICache_InvalidateRange]

518 519 520 521 522 523
        ADRL    a1, TLB_InvalidateAll_WB_Crd
        STR     a1, [v6, #Proc_TLB_InvalidateAll]

        ADRL    a1, TLB_InvalidateEntry_WB_Crd
        STR     a1, [v6, #Proc_TLB_InvalidateEntry]

524 525 526 527 528 529 530 531
        ADRL    a1, DSB_ReadWrite_WB_Crd
        ADRL    a2, NullOp
        STR     a1, [v6, #Proc_DSB_ReadWrite]
        STR     a1, [v6, #Proc_DSB_Write]
        STR     a2, [v6, #Proc_DSB_Read]
        STR     a1, [v6, #Proc_DMB_ReadWrite]
        STR     a1, [v6, #Proc_DMB_Write]
        STR     a2, [v6, #Proc_DMB_Read]
532 533 534 535 536 537 538

        ADRL    a1, IMB_Full_WB_Crd
        STR     a1, [v6, #Proc_IMB_Full]

        ADRL    a1, IMB_Range_WB_Crd
        STR     a1, [v6, #Proc_IMB_Range]

539 540 541
        ADRL    a1, IMB_List_WB_Crd
        STR     a1, [v6, #Proc_IMB_List]

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
        ADRL    a1, MMU_Changing_WB_Crd
        STR     a1, [v6, #Proc_MMU_Changing]

        ADRL    a1, MMU_ChangingEntry_WB_Crd
        STR     a1, [v6, #Proc_MMU_ChangingEntry]

        ADRL    a1, MMU_ChangingUncached_WB_Crd
        STR     a1, [v6, #Proc_MMU_ChangingUncached]

        ADRL    a1, MMU_ChangingUncachedEntry_WB_Crd
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntry]

        ADRL    a1, MMU_ChangingEntries_WB_Crd
        STR     a1, [v6, #Proc_MMU_ChangingEntries]

        ADRL    a1, MMU_ChangingUncachedEntries_WB_Crd
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntries]

        LDR     a2, =DCacheCleanAddress
Jeffrey Lee's avatar
Jeffrey Lee committed
561 562
        STR     a2, [v6, #DCache_CleanBaseAddress]
        STR     a2, [v6, #DCache_CleanNextAddress]
563
        MOV     a2, #64*1024                       ;arbitrary-ish threshold
Jeffrey Lee's avatar
Jeffrey Lee committed
564
        STR     a2, [v6, #DCache_RangeThreshold]
565

Jeffrey Lee's avatar
Jeffrey Lee committed
566
        LDRB    a2, [v6, #ProcessorType]
Kevin Bracey's avatar
Kevin Bracey committed
567
        TEQ     a2, #SA110
568
        TEQNE   a2, #SA110_preRevT
Kevin Bracey's avatar
Kevin Bracey committed
569 570 571 572 573 574 575
        ADREQL  a2, XCBTableSA110
        BEQ     Analyse_WB_Crd_finish
        TEQ     a2, #SA1100
        TEQNE   a2, #SA1110
        ADREQL  a2, XCBTableSA1110
        ADRNEL  a2, XCBTableWBR
Analyse_WB_Crd_finish
Jeffrey Lee's avatar
Jeffrey Lee committed
576
        STR     a2, [v6, #MMU_PCBTrans]
577 578
        B       %FT90

Kevin Bracey's avatar
Kevin Bracey committed
579 580 581 582 583 584 585
Analyse_WB_Cal_LD
        TST     v5, #CPUFlag_SplitCache
        BEQ     WeirdARMPanic             ; currently, only support harvard

        ADRL    a1, Cache_CleanInvalidateAll_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_CleanInvalidateAll]

Jeffrey Lee's avatar
Jeffrey Lee committed
586 587 588
        ADRL    a1, Cache_CleanInvalidateRange_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_CleanInvalidateRange]

Kevin Bracey's avatar
Kevin Bracey committed
589 590 591
        ADRL    a1, Cache_CleanAll_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_CleanAll]

592 593 594
        ADRL    a1, Cache_CleanRange_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_CleanRange]

Kevin Bracey's avatar
Kevin Bracey committed
595 596 597
        ADRL    a1, Cache_InvalidateAll_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_InvalidateAll]

598 599 600
        ADRL    a1, Cache_InvalidateRange_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_InvalidateRange]

Kevin Bracey's avatar
Kevin Bracey committed
601 602 603
        ADRL    a1, Cache_RangeThreshold_WB_Cal_LD
        STR     a1, [v6, #Proc_Cache_RangeThreshold]

604 605 606
        ADRL    a1, Cache_Examine_Simple
        STR     a1, [v6, #Proc_Cache_Examine]

607 608 609 610 611 612
        ADRL    a1, ICache_InvalidateAll_WB_Cal_LD
        STR     a1, [v6, #Proc_ICache_InvalidateAll]

        ADRL    a1, ICache_InvalidateRange_WB_Cal_LD
        STR     a1, [v6, #Proc_ICache_InvalidateRange]

Kevin Bracey's avatar
Kevin Bracey committed
613 614 615 616 617 618
        ADRL    a1, TLB_InvalidateAll_WB_Cal_LD
        STR     a1, [v6, #Proc_TLB_InvalidateAll]

        ADRL    a1, TLB_InvalidateEntry_WB_Cal_LD
        STR     a1, [v6, #Proc_TLB_InvalidateEntry]

619 620 621 622 623 624 625 626
        ADRL    a1, DSB_ReadWrite_WB_Cal_LD
        ADRL    a2, NullOp ; Assuming barriers are only used for non-cacheable memory, a read barrier routine isn't necessary on XScale because all non-cacheable reads complete in-order with read/write accesses to other NC locations
        STR     a1, [v6, #Proc_DSB_ReadWrite]
        STR     a1, [v6, #Proc_DSB_Write]
        STR     a2, [v6, #Proc_DSB_Read]
        STR     a1, [v6, #Proc_DMB_ReadWrite]
        STR     a1, [v6, #Proc_DMB_Write]
        STR     a2, [v6, #Proc_DMB_Read]
Kevin Bracey's avatar
Kevin Bracey committed
627 628 629 630 631 632 633

        ADRL    a1, IMB_Full_WB_Cal_LD
        STR     a1, [v6, #Proc_IMB_Full]

        ADRL    a1, IMB_Range_WB_Cal_LD
        STR     a1, [v6, #Proc_IMB_Range]

634 635 636
        ADRL    a1, IMB_List_WB_Cal_LD
        STR     a1, [v6, #Proc_IMB_List]

Kevin Bracey's avatar
Kevin Bracey committed
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
        ADRL    a1, MMU_Changing_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_Changing]

        ADRL    a1, MMU_ChangingEntry_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_ChangingEntry]

        ADRL    a1, MMU_ChangingUncached_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_ChangingUncached]

        ADRL    a1, MMU_ChangingUncachedEntry_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntry]

        ADRL    a1, MMU_ChangingEntries_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_ChangingEntries]

        ADRL    a1, MMU_ChangingUncachedEntries_WB_Cal_LD
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntries]

        LDR     a2, =DCacheCleanAddress
Jeffrey Lee's avatar
Jeffrey Lee committed
656 657
        STR     a2, [v6, #DCache_CleanBaseAddress]
        STR     a2, [v6, #DCache_CleanNextAddress]
Kevin Bracey's avatar
Kevin Bracey committed
658 659 660 661

  [ XScaleMiniCache
        !       1, "You need to arrange for XScale mini-cache clean area to be mini-cacheable"
        LDR     a2, =DCacheCleanAddress + 4 * 32*1024
Jeffrey Lee's avatar
Jeffrey Lee committed
662 663
        STR     a2, [v6, #MCache_CleanBaseAddress]
        STR     a2, [v6, #MCache_CleanNextAddress]
Kevin Bracey's avatar
Kevin Bracey committed
664 665 666 667 668 669 670 671 672
  ]


  ; arbitrary-ish values, mini cache makes global op significantly more expensive
  [ XScaleMiniCache
        MOV     a2, #128*1024
  |
        MOV     a2, #32*1024
  ]
Jeffrey Lee's avatar
Jeffrey Lee committed
673
        STR     a2, [v6, #DCache_RangeThreshold]
Kevin Bracey's avatar
Kevin Bracey committed
674 675 676 677 678

        ; enable full coprocessor access
        LDR     a2, =&3FFF
        MCR     p15, 0, a2, c15, c1

679 680 681 682
        LDR     a2, [v6, #ProcessorFlags]
        TST     a2, #CPUFlag_ExtendedPages
        ADREQL  a2, XCBTableXScaleNoExt
        ADRNEL  a2, XCBTableXScaleWA ; choose between RA and WA here
Jeffrey Lee's avatar
Jeffrey Lee committed
683
        STR     a2, [v6, #MMU_PCBTrans]
Kevin Bracey's avatar
Kevin Bracey committed
684 685

        B       %FT90
686
 ] ; MEMM_Type = "ARM600"
687

688
 [ MEMM_Type = "VMSAv6"
689 690 691 692
Analyse_WB_CR7_Lx
        TST     v5, #CPUFlag_SplitCache
        BEQ     WeirdARMPanic             ; currently, only support harvard caches here

693 694 695 696 697
        ; Read smallest instruction & data/unified cache line length
        MRC     p15, 0, a1, c0, c0, 1 ; Cache type register
        MOV     v2, a1, LSR #16
        AND     a4, a1, #&F
        AND     v2, v2, #&F
698 699 700 701 702
        MOV     a1, #4
        MOV     a4, a1, LSL a4
        MOV     v2, a1, LSL v2
        STRB    a4, [v6, #ICache_LineLen]
        STRB    v2, [v6, #DCache_LineLen]
703

704 705
        ; Read the cache info into Cache_Lx_*
        MRC     p15, 1, a1, c0, c0, 1 ; Cache level ID register
706
        MOV     v2, v6 ; Work around DTable/ITable alignment issues
707
        STR     a1, [v2, #Cache_Lx_Info]!
Jeffrey Lee's avatar
Jeffrey Lee committed
708
        ADD     a2, v2, #Cache_Lx_DTable-Cache_Lx_Info
709 710
        MOV     a3, #0
10
Jeffrey Lee's avatar
Jeffrey Lee committed
711
        ANDS    v1, a1, #6 ; Data or unified cache at this level?
Ben Avison's avatar
Ben Avison committed
712
        BEQ     %FT11
Jeffrey Lee's avatar
Jeffrey Lee committed
713 714 715
        MCRNE   p15, 2, a3, c0, c0, 0 ; Program cache size selection register
        myISB   ,v1
        MRCNE   p15, 1, v1, c0, c0, 0 ; Get size info (data/unified)
Ben Avison's avatar
Ben Avison committed
716
11      STR     v1, [a2]
717
        ADD     a3, a3, #1
Jeffrey Lee's avatar
Jeffrey Lee committed
718
        ANDS    v1, a1, #1 ; Instruction cache at this level?
Ben Avison's avatar
Ben Avison committed
719
        BEQ     %FT12
Jeffrey Lee's avatar
Jeffrey Lee committed
720 721 722
        MCRNE   p15, 2, a3, c0, c0, 0 ; Program cache size selection register
        myISB   ,v1
        MRCNE   p15, 1, v1, c0, c0, 0 ; Get size info (instruction)
Ben Avison's avatar
Ben Avison committed
723
12      STR     v1, [a2, #Cache_Lx_ITable-Cache_Lx_DTable]
Jeffrey Lee's avatar
Jeffrey Lee committed
724 725 726 727 728 729 730 731 732 733
        ; Shift the cache level ID register along to get the type of the next
        ; cache level
        ; However, we need to stop once we reach the first blank entry, because
        ; ARM have been sneaky and started to reuse some of the bits from the
        ; high end of the register (the Cortex-A8 TRM lists bits 21-23 as being
        ; for cache level 8, but the ARMv7 ARM lists them as being for the level
        ; of unification for inner shareable memory). The ARMv7 ARM does warn
        ; about making sure you stop once you find the first blank entry, but
        ; it doesn't say why!
        TST     a1, #7
734
        ADD     a3, a3, #1
Jeffrey Lee's avatar
Jeffrey Lee committed
735
        MOVNE   a1, a1, LSR #3
Jeffrey Lee's avatar
Jeffrey Lee committed
736
        CMP     a3, #Cache_Lx_MaxLevel*2 ; Stop at the last level we support
Jeffrey Lee's avatar
Jeffrey Lee committed
737
        ADD     a2, a2, #4
738 739
        BLT     %BT10

Jeffrey Lee's avatar
Jeffrey Lee committed
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
        ADRL    a1, DSB_ReadWrite_ARMv7
        ADRL    a2, DSB_Write_ARMv7
        STR     a1, [v6, #Proc_DSB_ReadWrite]
        STR     a2, [v6, #Proc_DSB_Write]
        STR     a1, [v6, #Proc_DSB_Read]

        ADRL    a1, DMB_ReadWrite_ARMv7
        ADRL    a2, DMB_Write_ARMv7
        STR     a1, [v6, #Proc_DMB_ReadWrite]
        STR     a2, [v6, #Proc_DMB_Write]
        STR     a1, [v6, #Proc_DMB_Read]

      [ SMP
        ; If we're a multicore chip, use MP-safe ARMops
        MRC     p15, 0, a1, c0, c0, 5 ; MPIDR
        AND     a1, a1, #&c0000000
        TEQ     a1, #&80000000
        BNE     %FT50

        ; Set high DCache_RangeThreshold to discourage global clean/invalidate ops (no broadcast)
        MVN     a1, #0
        STR     a1, [v6, #DCache_RangeThreshold]

        ADRL    a1, Cache_CleanInvalidateAll_ARMv7MP
        STR     a1, [v6, #Proc_Cache_CleanInvalidateAll]

        ADRL    a1, Cache_CleanInvalidateRange_ARMv7MP
        STR     a1, [v6, #Proc_Cache_CleanInvalidateRange]

        ADRL    a1, Cache_CleanAll_ARMv7MP
        STR     a1, [v6, #Proc_Cache_CleanAll]

        ADRL    a1, Cache_CleanRange_ARMv7MP
        STR     a1, [v6, #Proc_Cache_CleanRange]

        ADRL    a1, Cache_InvalidateAll_ARMv7MP
        STR     a1, [v6, #Proc_Cache_InvalidateAll]

        ADRL    a1, Cache_InvalidateRange_ARMv7MP
        STR     a1, [v6, #Proc_Cache_InvalidateRange]

        ADRL    a1, Cache_RangeThreshold_ARMv7MP
        STR     a1, [v6, #Proc_Cache_RangeThreshold]

        ADRL    a1, Cache_Examine_ARMv7MP
        STR     a1, [v6, #Proc_Cache_Examine]

        ADRL    a1, ICache_InvalidateAll_ARMv7MP
        STR     a1, [v6, #Proc_ICache_InvalidateAll]

        ADRL    a1, ICache_InvalidateRange_ARMv7MP
        STR     a1, [v6, #Proc_ICache_InvalidateRange]

        ADRL    a1, TLB_InvalidateAll_ARMv7MP
        STR     a1, [v6, #Proc_TLB_InvalidateAll]

        ADRL    a1, TLB_InvalidateEntry_ARMv7MP
        STR     a1, [v6, #Proc_TLB_InvalidateEntry]

        ADRL    a1, IMB_Full_ARMv7MP
        STR     a1, [v6, #Proc_IMB_Full]

        ADRL    a1, IMB_Range_ARMv7MP
        STR     a1, [v6, #Proc_IMB_Range]

        ADRL    a1, IMB_List_ARMv7MP
        STR     a1, [v6, #Proc_IMB_List]

        ADRL    a1, MMU_Changing_ARMv7MP
        STR     a1, [v6, #Proc_MMU_Changing]

        ADRL    a1, MMU_ChangingEntry_ARMv7MP
        STR     a1, [v6, #Proc_MMU_ChangingEntry]

        ADRL    a1, MMU_ChangingUncached_ARMv7MP
        STR     a1, [v6, #Proc_MMU_ChangingUncached]

        ADRL    a1, MMU_ChangingUncachedEntry_ARMv7MP
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntry]

        ADRL    a1, MMU_ChangingEntries_ARMv7MP
        STR     a1, [v6, #Proc_MMU_ChangingEntries]

        ADRL    a1, MMU_ChangingUncachedEntries_ARMv7MP
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntries]

        B       %FT90
50
      ]
829 830 831 832 833 834 835
        ; Calculate DCache_RangeThreshold
        MOV     a1, #128*1024 ; Arbitrary-ish
        STR     a1, [v6, #DCache_RangeThreshold]

        ADRL    a1, Cache_CleanInvalidateAll_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_CleanInvalidateAll]

Jeffrey Lee's avatar
Jeffrey Lee committed
836 837 838
        ADRL    a1, Cache_CleanInvalidateRange_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_CleanInvalidateRange]

839 840 841
        ADRL    a1, Cache_CleanAll_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_CleanAll]

842 843 844
        ADRL    a1, Cache_CleanRange_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_CleanRange]

845 846 847
        ADRL    a1, Cache_InvalidateAll_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_InvalidateAll]

848 849 850
        ADRL    a1, Cache_InvalidateRange_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_InvalidateRange]

851 852 853
        ADRL    a1, Cache_RangeThreshold_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_RangeThreshold]

854 855 856
        ADRL    a1, Cache_Examine_WB_CR7_Lx
        STR     a1, [v6, #Proc_Cache_Examine]

857 858 859 860 861 862
        ADRL    a1, ICache_InvalidateAll_WB_CR7_Lx
        STR     a1, [v6, #Proc_ICache_InvalidateAll]

        ADRL    a1, ICache_InvalidateRange_WB_CR7_Lx
        STR     a1, [v6, #Proc_ICache_InvalidateRange]

863 864 865 866 867 868 869 870 871 872 873 874
        ADRL    a1, TLB_InvalidateAll_WB_CR7_Lx
        STR     a1, [v6, #Proc_TLB_InvalidateAll]

        ADRL    a1, TLB_InvalidateEntry_WB_CR7_Lx
        STR     a1, [v6, #Proc_TLB_InvalidateEntry]

        ADRL    a1, IMB_Full_WB_CR7_Lx
        STR     a1, [v6, #Proc_IMB_Full]

        ADRL    a1, IMB_Range_WB_CR7_Lx
        STR     a1, [v6, #Proc_IMB_Range]

875 876 877
        ADRL    a1, IMB_List_WB_CR7_Lx
        STR     a1, [v6, #Proc_IMB_List]

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
        ADRL    a1, MMU_Changing_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_Changing]

        ADRL    a1, MMU_ChangingEntry_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_ChangingEntry]

        ADRL    a1, MMU_ChangingUncached_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_ChangingUncached]

        ADRL    a1, MMU_ChangingUncachedEntry_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntry]

        ADRL    a1, MMU_ChangingEntries_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_ChangingEntries]

        ADRL    a1, MMU_ChangingUncachedEntries_WB_CR7_Lx
        STR     a1, [v6, #Proc_MMU_ChangingUncachedEntries]

        B       %FT90
897
 ] ; MEMM_Type = "VMSAv6"
898

899
90
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
 [ MEMM_Type = "VMSAv6"
        ; Reuse Init_PCBTrans
        MOV     a1, v6
        BL      Init_PCBTrans
        ADRL    a1, PPLAccess
        STR     a1, [v6, #MMU_PPLAccess]
 |
        TST     v5, #CPUFlag_ExtendedPages
        ADRNEL  a1, PPLTransX
        ADREQL  a1, PPLTrans
     [ ARM6support
        ARM_6   lr
        ADREQL  a1, PPLTransARM6
     ]
        STR     a1, [v6, #MMU_PPLTrans]
        ADRL    a1, PPLAccess
     [ ARM6support
        ADREQL  a1, PPLAccessARM6
     ]
        STR     a1, [v6, #MMU_PPLAccess]
 ]
921 922 923
        Pull    "v1,v2,v5,v6,v7,pc"


924 925
; This routine works out the values LINELEN, ASSOCIATIVITY, NSETS and CACHE_SIZE defined
; in section B2.3.3 of the ARMv5 ARM.
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
EvaluateCache
        AND     a3, a1, #CT_assoc_mask+CT_M
        TEQ     a3, #(CT_assoc_0:SHL:CT_assoc_pos)+CT_M
        BEQ     %FT80
        MOV     ip, #1
        ASSERT  CT_len_pos = 0
        AND     a4, a1, #CT_len_mask
        ADD     a4, a4, #3
        MOV     a4, ip, LSL a4                  ; LineLen = 1 << (len+3)
        STRB    a4, [a2, #ICache_LineLen-ICache_Info]
        MOV     a3, #2
        TST     a1, #CT_M
        ADDNE   a3, a3, #1                      ; Multiplier = 2 + M
        AND     a4, a1, #CT_assoc_mask
        RSB     a4, ip, a4, LSR #CT_assoc_pos
        MOV     a4, a3, LSL a4                  ; Associativity = Multiplier << (assoc-1)
        STRB    a4, [a2, #ICache_Associativity-ICache_Info]
        AND     a4, a1, #CT_size_mask
        MOV     a4, a4, LSR #CT_size_pos
        MOV     a3, a3, LSL a4
        MOV     a3, a3, LSL #8                  ; Size = Multiplier << (size+8)
        STR     a3, [a2, #ICache_Size-ICache_Info]
        ADD     a4, a4, #6
        AND     a3, a1, #CT_assoc_mask
        SUB     a4, a4, a3, LSR #CT_assoc_pos
        AND     a3, a1, #CT_len_mask
        ASSERT  CT_len_pos = 0
        SUB     a4, a4, a3
        MOV     a4, ip, LSL a4                  ; NSets = 1 << (size + 6 - assoc - len)
        STR     a4, [a2, #ICache_NSets-ICache_Info]
        MOV     pc, lr


80      MOV     a1, #0
        STR     a1, [a2, #ICache_NSets-ICache_Info]
        STR     a1, [a2, #ICache_Size-ICache_Info]
        STRB    a1, [a2, #ICache_LineLen-ICache_Info]
        STRB    a1, [a2, #ICache_Associativity-ICache_Info]
        MOV     pc, lr


; Create a list of CPUs, 16 bytes per entry:
;    ID bits (1 word)
;    Test mask for ID (1 word)
;    Cache type register value (1 word)
;    Processor type (1 byte)
;    Architecture type (1 byte)
;    Reserved (2 bytes)
        GBLA    tempcpu

        MACRO
        CPUDesc $proc, $id, $mask, $arch, $type, $s, $dsz, $das, $dln, $isz, $ias, $iln
        LCLA    type
type    SETA    (CT_ctype_$type:SHL:CT_ctype_pos)+($s:SHL:CT_S_pos)
tempcpu CSzDesc $dsz, $das, $dln
type    SETA    type+(tempcpu:SHL:CT_Dsize_pos)
        [ :LNOT:($s=0 :LAND: "$isz"="")
tempcpu CSzDesc $isz, $ias, $iln
        ]
type    SETA    type+(tempcpu:SHL:CT_Isize_pos)
        ASSERT  ($id :AND: :NOT: $mask) = 0
        DCD     $id, $mask, type
        DCB     $proc, $arch, 0, 0
        MEND

        MACRO
$var    CSzDesc $sz, $as, $ln
$var    SETA    (CT_size_$sz:SHL:CT_size_pos)+(CT_assoc_$as:SHL:CT_assoc_pos)+(CT_len_$ln:SHL:CT_len_pos)
$var    SETA    $var+(CT_M_$sz:SHL:CT_M_pos)
        MEND


998
; CPUDesc table for ARMv3-ARMv6
999
KnownCPUTable
1000
;                                                        /------Cache Type register fields-----\.
1001
;                              ID reg   Mask     Arch    Type         S  Dsz Das Dln Isz Ias Iln
1002
 [ MEMM_Type = "ARM600"
1003 1004 1005 1006 1007 1008
        CPUDesc ARM600,        &000600, &00FFF0, ARMv3,   WT,         0,  4K, 64, 4
        CPUDesc ARM610,        &000610, &00FFF0, ARMv3,   WT,         0,  4K, 64, 4
        CPUDesc ARMunk,        &000000, &00F000, ARMv3,   WT,         0,  4K, 64, 4
        CPUDesc ARM700,        &007000, &FFFFF0, ARMv3,   WT,         0,  8K,  4, 8
        CPUDesc ARM710,        &007100, &FFFFF0, ARMv3,   WT,         0,  8K,  4, 8
        CPUDesc ARM710a,       &047100, &FDFFF0, ARMv3,   WT,         0,  8K,  4, 4
1009
        CPUDesc ARM7500,       &027100, &FFFFF0, ARMv3,   WT,         0,  4K,  4, 4
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
        CPUDesc ARM7500FE,     &077100, &FFFFF0, ARMv3,   WT,         0,  4K,  4, 4
        CPUDesc ARMunk,        &007000, &80F000, ARMv3,   WT,         0,  8K,  4, 4
        CPUDesc ARM720T,       &807200, &FFFFF0, ARMv4T,  WT,         0,  8K,  4, 4
        CPUDesc ARMunk,        &807000, &80F000, ARMv4T,  WT,         0,  8K,  4, 4
        CPUDesc SA110_preRevT, &01A100, &0FFFFC, ARMv4,   WB_Crd,     1, 16K, 32, 8, 16K, 32, 8
        CPUDesc SA110,         &01A100, &0FFFF0, ARMv4,   WB_Crd,     1, 16K, 32, 8, 16K, 32, 8
        CPUDesc SA1100,        &01A110, &0FFFF0, ARMv4,   WB_Crd,     1,  8K, 32, 8, 16K, 32, 8
        CPUDesc SA1110,        &01B110, &0FFFF0, ARMv4,   WB_Crd,     1,  8K, 32, 8, 16K, 32, 8
        CPUDesc ARM920T,       &029200, &0FFFF0, ARMv4T,  WB_CR7_LDa, 1, 16K, 64, 8, 16K, 64, 8
        CPUDesc ARM922T,       &029220, &0FFFF0, ARMv4T,  WB_CR7_LDa, 1,  8K, 64, 8,  8K, 64, 8
        CPUDesc X80200,        &052000, &0FFFF0, ARMv5TE, WB_Cal_LD,  1, 32K, 32, 8, 32K, 32, 8
Kevin Bracey's avatar
Kevin Bracey committed
1021
        CPUDesc X80321,    &69052400, &FFFFF700, ARMv5TE, WB_Cal_LD,  1, 32K, 32, 8, 32K, 32, 8
1022
 ] ; MEMM_Type = "ARM600"
1023 1024
        DCD     -1

1025
 [ MEMM_Type = "VMSAv6"
Jeffrey Lee's avatar
Jeffrey Lee committed
1026 1027
; Simplified CPUDesc table for ARMvF
; The cache size data is ignored for ARMv7.
1028
KnownCPUTable_Fancy
Ben Avison's avatar
Ben Avison committed
1029
        CPUDesc ARM1176JZF_S,  &00B760, &00FFF0, ARMvF,   WB_CR7_LDc, 1, 16K,  4, 8, 16K,  4, 8
1030 1031
        CPUDesc Cortex_A5,     &00C050, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 16K, 32,16, 16K, 32,16
        CPUDesc Cortex_A7,     &00C070, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 16K, 32,16, 16K, 32,16
Ben Avison's avatar
Ben Avison committed
1032 1033
        CPUDesc Cortex_A8,     &00C080, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 16K, 32,16, 16K, 32,16
        CPUDesc Cortex_A9,     &00C090, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
1034 1035 1036
        CPUDesc Cortex_A12,    &00C0D0, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
        CPUDesc Cortex_A15,    &00C0F0, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
        CPUDesc Cortex_A17,    &00C0E0, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
Ben Avison's avatar
Ben Avison committed
1037 1038 1039
        CPUDesc Cortex_A53,    &00D030, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
        CPUDesc Cortex_A57,    &00D070, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
        CPUDesc Cortex_A72,    &00D080, &00FFF0, ARMvF,   WB_CR7_Lx,  1, 32K, 32,16, 32K, 32,16
1040
        DCD     -1
1041
 ] ; MEMM_Type = "VMSAv6"
1042 1043 1044 1045

; Peculiar characteristics of individual ARMs not deducable otherwise. First field is
; flags to set, second flags to clear.
KnownCPUFlags
1046 1047 1048 1049 1050
        DCD     0,                            0    ; ARM 600
        DCD     0,                            0    ; ARM 610
        DCD     0,                            0    ; ARM 700
        DCD     0,                            0    ; ARM 710
        DCD     0,                            0    ; ARM 710a
Kevin Bracey's avatar
Kevin Bracey committed
1051 1052
        DCD     CPUFlag_AbortRestartBroken+CPUFlag_InterruptDelay,   0    ; SA 110 pre revT
        DCD     CPUFlag_InterruptDelay,       0    ; SA 110 revT or later
1053 1054
        DCD     0,                            0    ; ARM 7500
        DCD     0,                            0    ; ARM 7500FE
Kevin Bracey's avatar
Kevin Bracey committed
1055 1056
        DCD     CPUFlag_InterruptDelay,       0    ; SA 1100
        DCD     CPUFlag_InterruptDelay,       0    ; SA 1110
1057 1058 1059
        DCD     CPUFlag_NoWBDrain,            0    ; ARM 720T
        DCD     0,                            0    ; ARM 920T
        DCD     0,                            0    ; ARM 922T
Kevin Bracey's avatar
Kevin Bracey committed
1060 1061
        DCD     CPUFlag_ExtendedPages+CPUFlag_XScale,  0    ; X80200
        DCD     CPUFlag_XScale,               0    ; X80321
1062 1063 1064
        DCD     0,                            0    ; ARM1176JZF_S
        DCD     0,                            0    ; Cortex_A5
        DCD     0,                            0    ; Cortex_A7
1065
        DCD     0,                            0    ; Cortex_A8
Robert Sprowson's avatar
Robert Sprowson committed
1066
        DCD     0,                            0    ; Cortex_A9
1067
        DCD     0,                            0    ; Cortex_A12
1068
        DCD     CPUFlag_NoDCacheDisable,      0    ; Cortex_A15
1069
        DCD     0,                            0    ; Cortex_A17
1070
        DCD     CPUFlag_NoDCacheDisable,      0    ; Cortex_A53
Ben Avison's avatar
Ben Avison committed
1071 1072
        DCD     0,                            0    ; Cortex_A57
        DCD     0,                            0    ; Cortex_A72
1073

1074
 [ MEMM_Type = "VMSAv6"
1075 1076 1077 1078
; --------------------------------------------------------------------------
; ----- ARM_Analyse_Fancy --------------------------------------------------
; --------------------------------------------------------------------------
;
1079 1080
; For ARMv7 ARMs (arch=&F), we can detect everything via the feature registers
; TODO - There's some stuff in here that can be tidied up/removed
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

; Things we need to set up:
; ProcessorType     (as listed in hdr.ARMops)
; Cache_Type        (CT_ctype_* from hdr:MEMM.ARM600)
; ProcessorArch     (as reported by Init_ARMarch)
; ProcessorFlags    (CPUFlag_* from hdr.ARMops)
; Proc_*            (Cache/TLB/IMB/MMU function pointers)
; MMU_PCBTrans      (Points to lookup table for translating page table cache options)
; ICache_*, DCache_* (ICache, DCache properties - optional, since not used externally?)

ARM_Analyse_Fancy
        Push    "v1,v2,v5,v6,v7,lr"
        ARM_read_ID v1
Jeffrey Lee's avatar
Jeffrey Lee committed
1094
        LDR     v6, =ZeroPage
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
        ADRL    v7, KnownCPUTable_Fancy
10
        LDMIA   v7!, {a1, a2}
        CMP     a1, #-1
        BEQ     %FT20
        AND     a2, v1, a2
        TEQ     a1, a2
        ADDNE   v7, v7, #8
        BNE     %BT10
20
        LDR     v2, [v7]
        CMP     a1, #-1
        LDRNEB  a2, [v7, #4]
        MOVEQ   a2, #ARMunk
        STRB    a2, [v6, #ProcessorType]

        AND     a1, v2, #CT_ctype_mask
        MOV     a1, a1, LSR #CT_ctype_pos
        STRB    a1, [v6, #Cache_Type]

1115 1116 1117 1118
        ; STM should always store PC+8
        ; Should always be base restored abort model
        ; 26bit has been obsolete for a long time
        MOV     v5, #CPUFlag_StorePCplus8+CPUFlag_BaseRestored+CPUFlag_32bitOS+CPUFlag_No26bitMode
Jeffrey Lee's avatar
Jeffrey Lee committed
1119 1120 1121
        [ HiProcVecs
        ORR     v5, v5, #CPUFlag_HiProcVecs
        ]
1122

Jeffrey Lee's avatar
Jeffrey Lee committed
1123
        ; Work out whether the cache info is in ARMv6 or ARMv7 style
1124 1125 1126 1127 1128 1129 1130
        ; Top 3 bits of the cache type register give the register format
        ARM_read_cachetype v2
        MOV     a1, v2, LSR #29
        TEQ     a1, #4
        BEQ     %FT25
        TEQ     a1, #0
        BNE     WeirdARMPanic
Jeffrey Lee's avatar
Jeffrey Lee committed
1131 1132

        ; ARMv6 format cache type register.
Ben Avison's avatar
Ben Avison committed
1133 1134 1135 1136 1137 1138
        ; CPUs like the ARM1176JZF-S are available with a range of cache sizes,
        ; so it's not safe to rely on the values in the CPU table. Fortunately
        ; all ARMv6 CPUs implement the register (by contrast, for the "plain"
        ; ARM case, no ARMv3 CPUs, some ARMv4 CPUs and all ARMv5 CPUs, so it
        ; needs to drop back to the table in some cases).
        MOV     a1, v2, LSR #CT_Isize_pos
Jeffrey Lee's avatar
Jeffrey Lee committed
1139 1140 1141 1142 1143
        ADD     a2, v6, #ICache_Info
        BL      EvaluateCache
        MOV     a1, v2, LSR #CT_Dsize_pos
        ADD     a2, v6, #DCache_Info
        BL      EvaluateCache
1144

Ben Avison's avatar
Ben Avison committed
1145 1146
        TST     v2, #CT_S
        ORRNE   v5, v5, #CPUFlag_SynchroniseCodeAreas+CPUFlag_SplitCache
1147

Jeffrey Lee's avatar
Jeffrey Lee committed
1148 1149 1150
        B       %FT27

25
Robert Sprowson's avatar
Robert Sprowson committed
1151 1152 1153
        ; ARMv7 format cache type register.
        ; This should(!) mean that we have the cache level ID register,
        ; and all the other ARMv7 cache registers.
Jeffrey Lee's avatar
Jeffrey Lee committed
1154

1155 1156 1157 1158 1159 1160
        ; Do we have a split cache?
        MRC     p15, 1, a1, c0, c0, 1
        AND     a2, a1, #7
        TEQ     a2, #3
        ORREQ   v5, v5, #CPUFlag_SynchroniseCodeAreas+CPUFlag_SplitCache

Jeffrey Lee's avatar
Jeffrey Lee committed
1161
27
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
        [ CacheOff
        ORR     v5, v5, #CPUFlag_SynchroniseCodeAreas
        |
        ARM_read_control a1                     ; if Z bit set then we have branch prediction,
        TST     a1, #MMUC_Z                     ; so we need OS_SynchroniseCodeAreas even if not
        ORRNE   v5, v5, #CPUFlag_SynchroniseCodeAreas   ; split caches
        ]

        BL      Init_ARMarch
        STRB    a1, [v6, #ProcessorArch]

        MRC     p15, 0, a1, c0, c2, 2
1174
        TST     a1, #&FF0000                    ; MultU_instrs OR MultS_instrs
1175 1176 1177
        ORRNE   v5, v5, #CPUFlag_LongMul

        MRC     p15, 0, a1, c0, c1, 0
1178
        TST     a1, #&F0                        ; State1
1179 1180
        ORRNE   v5, v5, #CPUFlag_Thumb

1181 1182 1183
        MRC     p15, 0, a1, c0, c2, 3
        TST     a1, #&F                         ; Saturate_instrs
        ORRNE   v5, v5, #CPUFlag_DSP
1184

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
        MRC     p15, 0, a1, c0, c2, 0
        TST     a1, #&F                         ; Swap_instrs
        MRC     p15, 0, a1, c0, c2, 4
        TSTEQ   a1, #&F0000000                  ; SWP_frac
        ORREQ   v5, v5, #CPUFlag_NoSWP

        MRC     p15, 0, a2, c0, c2, 3
        AND     a2, a2, #&00F000                ; SynchPrim_instrs
        AND     a1, a1, #&F00000                ; SynchPrim_instrs_frac
        ORR     a1, a2, a1, LSR #12
        TEQ     a1, #2_00010000:SHL:8
        ORREQ   v5, v5, #CPUFlag_LoadStoreEx
        TEQ     a1, #2_00010011:SHL:8
        TEQNE   a1, #2_00100000:SHL:8
        ORREQ   v5, v5, #CPUFlag_LoadStoreEx :OR: CPUFlag_LoadStoreClearExSizes

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
        ; Other flags not checked for above:
        ; CPUFlag_InterruptDelay
        ; CPUFlag_VectorReadException
        ; CPUFlag_ExtendedPages
        ; CPUFlag_NoWBDrain
        ; CPUFlag_AbortRestartBroken
        ; CPUFlag_XScale
        ; CPUFlag_XScaleJTAGconnected

        LDRB    v4, [v6, #ProcessorType]

        TEQ     v4, #ARMunk                     ; Modify deduced flags
        ADRNEL  lr, KnownCPUFlags
        ADDNE   lr, lr, v4, LSL #3
        LDMNEIA lr, {a2, a3}
        ORRNE   v5, v5, a2
        BICNE   v5, v5, a3

1219
        ORR     v5, v5, #CPUFlag_ExtraReasonCodesFixed
1220 1221 1222 1223 1224 1225
        STR     v5, [v6, #ProcessorFlags]

        ; Cache analysis

        LDRB    a2, [v6, #Cache_Type]

Ben Avison's avatar
Ben Avison committed
1226 1227 1228
        TEQ     a2, #CT_ctype_WB_CR7_LDa        ; eg. ARM9
        TEQNE   a2, #CT_ctype_WB_CR7_LDc        ; eg. ARM1176JZF-S - differs only in cache lockdown
        BEQ     Analyse_WB_CR7_LDa
1229 1230

        TEQ     a2, #CT_ctype_WB_CR7_Lx
Ben Avison's avatar
Ben Avison committed
1231
        BEQ     Analyse_WB_CR7_Lx               ; eg. Cortex-A8, Cortex-A9
1232 1233 1234 1235

        ; others ...

        B       WeirdARMPanic                   ; stiff :)
1236
 ] ; MEMM_Type = "VMSAv6"
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
; --------------------------------------------------------------------------
; ----- ARMops -------------------------------------------------------------
; --------------------------------------------------------------------------
;
; ARMops are the routines required by the kernel for cache/MMU control
; the kernel vectors to the appropriate ops for the given ARM at boot
;
; The Rules:
;   - These routines may corrupt a1 and lr only
;   - (lr can of course only be corrupted whilst still returning to correct
;     link address)
;   - stack is available, at least 16 words can be stacked
;   - a NULL op would be a simple MOV pc, lr
;
1252

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
; In:  r1 = cache level (0-based)
; Out: r0 = Flags
;           bits 0-2: cache type:
;              000 -> none
;              001 -> instruction
;              010 -> data
;              011 -> split
;              100 -> unified
;              1xx -> reserved
;           Other bits: reserved
;      r1 = D line length
;      r2 = D size
;      r3 = I line length
;      r4 = I size
;      r0-r4 = zero if cache level not present
Cache_Examine_Simple
        TEQ     r1, #0
        MOVNE   r0, #0
        MOVNE   r1, #0
        MOVNE   r2, #0
        MOVNE   r3, #0
        MOVNE   r4, #0
        MOVNE   pc, lr
        LDR     r4, =ZeroPage
        LDR     r0, [r4, #ProcessorFlags]
        TST     r0, #CPUFlag_SplitCache
        MOVNE   r0, #3
        MOVEQ   r0, #4
        LDRB    r1, [r4, #DCache_LineLen]
        LDR     r2, [r4, #DCache_Size]
        LDRB    r3, [r4, #ICache_LineLen]
        LDR     r4, [r4, #ICache_Size]
1285
NullOp  MOV     pc, lr
1286 1287 1288

 [ MEMM_Type = "ARM600"

1289 1290 1291 1292 1293 1294
; --------------------------------------------------------------------------
; ----- ARMops for ARMv3 ---------------------------------------------------
; --------------------------------------------------------------------------
;
; ARMv3 ARMs include ARM710, ARM610, ARM7500
;
1295 1296 1297

Cache_Invalidate_ARMv3
        MCR     p15, 0, a1, c7, c0
1298
        MOV     pc, lr
1299

1300
DSB_ReadWrite_ARMv3
1301
        ;swap always forces unbuffered write, stalling till WB empty
Kevin Bracey's avatar
Kevin Bracey committed
1302 1303 1304
        SUB     sp, sp, #4
        SWP     a1, a1, [sp]
        ADD     sp, sp, #4
1305 1306 1307 1308 1309 1310
        MOV     pc, lr

TLB_Invalidate_ARMv3
        MCR     p15, 0, a1, c5, c0
        MOV     pc, lr

1311 1312
; a1 = page entry to invalidate (page aligned address)
;
1313 1314 1315 1316 1317
TLB_InvalidateEntry_ARMv3
        MCR     p15, 0, a1, c6, c0
        MOV     pc, lr

MMU_Changing_ARMv3
1318 1319 1320 1321 1322
 [ CacheablePageTables
        SUB     sp, sp, #4
        SWP     a1, a1, [sp]
        ADD     sp, sp, #4
 ]
1323
        MCR     p15, 0, a1, c5, c0      ; invalidate TLB
1324
        MCR     p15, 0, a1, c7, c0      ; invalidate cache
1325 1326
        MOV     pc, lr

1327
MMU_ChangingUncached_ARMv3
1328 1329 1330 1331 1332
 [ CacheablePageTables
        SUB     sp, sp, #4
        SWP     a1, a1, [sp]
        ADD     sp, sp, #4
 ]
1333 1334 1335
        MCR     p15, 0, a1, c5, c0      ; invalidate TLB
        MOV     pc, lr

1336 1337
; a1 = page affected (page aligned address)
;
1338
MMU_ChangingEntry_ARMv3
1339 1340 1341 1342 1343
 [ CacheablePageTables
        Push    "a1"
        SWP     a1, a1, [sp]
        ADD     sp, sp, #4
 ]
1344
        MCR     p15, 0, a1, c6, c0      ; invalidate TLB entry
1345
        MCR     p15, 0, a1, c7, c0      ; invalidate cache
1346 1347
        MOV     pc, lr

1348 1349 1350
; a1 = first page affected (page aligned address)
; a2 = number of pages
;
1351
MMU_ChangingEntries_ARMv3 ROUT
1352 1353
        CMP     a2, #16                 ; arbitrary-ish threshold
        BHS     MMU_Changing_ARMv3
1354
        Push    "a2"
1355 1356 1357
 [ CacheablePageTables
        SWP     a2, a2, [sp]
 ]
1358 1359 1360 1361 1362
10
        MCR     p15, 0, a1, c6, c0      ; invalidate TLB entry
        SUBS    a2, a2, #1              ; next page
        ADD     a1, a1, #PageSize
        BNE     %BT10
1363
        MCR     p15, 0, a1, c7, c0      ; invalidate cache
1364 1365 1366
        Pull    "a2"
        MOV     pc, lr

1367 1368
; a1 = page affected (page aligned address)
;
1369
MMU_ChangingUncachedEntry_ARMv3
1370 1371 1372 1373 1374
 [ CacheablePageTables
        Push    "a1"
        SWP     a1, a1, [sp]
        ADD     sp, sp, #4
 ]
1375 1376 1377
        MCR     p15, 0, a1, c6, c0      ; invalidate TLB entry
        MOV     pc, lr

1378 1379 1380
; a1 = first page affected (page aligned address)
; a2 = number of pages
;
1381
MMU_ChangingUncachedEntries_ARMv3 ROUT
1382 1383
        CMP     a2, #16                 ; arbitrary-ish threshold
        BHS     MMU_ChangingUncached_ARMv3
1384
        Push    "a2"
1385 1386 1387
 [ CacheablePageTables
        SWP     a2, a2, [sp]
 ]
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
10
        MCR     p15, 0, a1, c6, c0      ; invalidate TLB entry
        SUBS    a2, a2, #1              ; next page
        ADD     a1, a1, #PageSize
        BNE     %BT10
        Pull    "a2"
        MOV     pc, lr

Cache_RangeThreshold_ARMv3
        ! 0, "arbitrary Cache_RangeThreshold_ARMv3"
        MOV     a1, #16*PageSize
        MOV     pc, lr

1401 1402
        LTORG

1403 1404 1405 1406 1407 1408
; --------------------------------------------------------------------------
; ----- generic ARMops for simple ARMs, ARMv4 onwards ----------------------
; --------------------------------------------------------------------------
;
; eg. ARM7TDMI based ARMs, unified, writethrough cache
;
1409

1410 1411 1412 1413 1414
Cache_InvalidateUnified
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c7
        MOV     pc, lr

1415
DSB_ReadWrite_OffOn
1416
        ; used if ARM has no drain WBuffer MCR op
1417
        Push    "a2"
1418 1419 1420 1421
        ARM_read_control a1
        BIC     a2, a1, #MMUC_W
        ARM_write_control a2
        ARM_write_control a1
1422
        Pull    "a2"
1423 1424
        MOV     pc, lr

1425
DSB_ReadWrite
1426
        ; used if ARM has proper drain WBuffer MCR op
1427 1428 1429 1430 1431 1432 1433 1434 1435
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4
        MOV     pc, lr

TLB_Invalidate_Unified
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7
        MOV     pc, lr

1436 1437
; a1 = page entry to invalidate (page aligned address)
;
1438 1439 1440 1441 1442
TLB_InvalidateEntry_Unified
        MCR     p15, 0, a1, c8, c7, 1
        MOV     pc, lr

MMU_Changing_Writethrough
1443 1444 1445 1446 1447 1448 1449 1450
 [ CacheablePageTables
        ; Yuck - this is probably going to be quite slow. Something to fix
        ; properly if/when we port to a system that uses this type of CPU.
        Push    "lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "lr"
 ]
1451 1452
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7      ; invalidate TLB
1453
        MCR     p15, 0, a1, c7, c7      ; invalidate cache
1454 1455 1456
        MOV     pc, lr

MMU_ChangingUncached
1457 1458 1459 1460 1461 1462
 [ CacheablePageTables
        Push    "lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "lr"
 ]
1463 1464
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7      ; invalidate TLB
1465 1466
        MOV     pc, lr

1467 1468
; a1 = page affected (page aligned address)
;
1469
MMU_ChangingEntry_Writethrough
1470 1471 1472 1473 1474 1475
 [ CacheablePageTables
        Push    "a1,lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "a1,lr"
 ]
1476
        MCR     p15, 0, a1, c8, c7, 1   ; invalidate TLB entry
1477 1478
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c7      ; invalidate cache
1479 1480
        MOV     pc, lr

1481 1482 1483
; a1 = first page affected (page aligned address)
; a2 = number of pages
;
1484
MMU_ChangingEntries_Writethrough  ROUT
1485 1486
        CMP     a2, #16                 ; arbitrary-ish threshold
        BHS     MMU_Changing_Writethrough
1487
        Push    "a2"
1488 1489 1490 1491 1492 1493
 [ CacheablePageTables
        Push    "a1,lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "a1,lr"
 ]
1494 1495 1496 1497 1498
10
        MCR     p15, 0, a1, c8, c7, 1   ; invalidate TLB entry
        SUBS    a2, a2, #1              ; next page
        ADD     a1, a1, #PageSize
        BNE     %BT10
1499 1500
        MCR     p15, 0, a2, c7, c7      ; invalidate cache
        Pull    "a2"
1501 1502
        MOV     pc, lr

1503 1504
; a1 = page affected (page aligned address)
;
1505
MMU_ChangingUncachedEntry
1506 1507 1508 1509 1510 1511
 [ CacheablePageTables
        Push    "a1,lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "a1,lr"
 ]
1512 1513 1514
        MCR     p15, 0, a1, c8, c7, 1   ; invalidate TLB entry
        MOV     pc, lr

1515 1516 1517
; a1 = first page affected (page aligned address)
; a2 = number of pages
;
1518
MMU_ChangingUncachedEntries ROUT
1519 1520
        CMP     a2, #16                 ; arbitrary-ish threshold
        BHS     MMU_ChangingUncached
1521
        Push    "a2"
1522 1523 1524 1525 1526 1527
 [ CacheablePageTables
        Push    "a1,lr"
        LDR     a1, =ZeroPage
        ARMop   DSB_ReadWrite,,,a1
        Pull    "a1,lr"
 ]
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
10
        MCR     p15, 0, a1, c8, c7, 1   ; invalidate TLB entry
        SUBS    a2, a2, #1              ; next page
        ADD     a1, a1, #PageSize
        BNE     %BT10
        Pull    "a2"
        MOV     pc, lr

Cache_RangeThreshold_Writethrough
        ! 0, "arbitrary Cache_RangeThreshold_Writethrough"
        MOV     a1, #16*PageSize
        MOV     pc, lr
1540

1541 1542
 ] ; MEMM_Type = "ARM600"

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
; --------------------------------------------------------------------------
; ----- ARMops for ARM9 and the like ---------------------------------------
; --------------------------------------------------------------------------

; WB_CR7_LDa refers to ARMs with writeback data cache, cleaned with
; register 7, lockdown available (format A)
;
; Note that ARM920 etc have writeback/writethrough data cache selectable
; by MMU regions. For simpliciity, we assume cacheable pages are mostly
; writeback. Any writethrough pages will have redundant clean operations
; applied when moved, for example, but this is a small overhead (cleaning
; a clean line is very quick on ARM 9).

Cache_CleanAll_WB_CR7_LDa ROUT
;
; only guarantees to clean lines not involved in interrupts (so we can
; clean without disabling interrupts)
;
; Clean cache by traversing all segment and index values
; As a concrete example, for ARM 920 (16k+16k caches) we would have:
;
;    DCache_LineLen       = 32         (32 byte cache line, segment field starts at bit 5)
;    DCache_IndexBit      = &04000000  (index field starts at bit 26)
Kevin Bracey's avatar
Kevin Bracey committed
1566
;    DCache_IndexSegStart = &000000E0  (start at index=0, segment = 7)
1567 1568
;
        Push    "a2, ip"
Jeffrey Lee's avatar
Jeffrey Lee committed
1569
        LDR     ip, =ZeroPage
1570 1571
        LDRB    a1, [ip, #DCache_LineLen]        ; segment field starts at this bit
        LDR     a2, [ip, #DCache_IndexBit]       ; index field starts at this bit
Kevin Bracey's avatar
Kevin Bracey committed
1572
        LDR     ip, [ip, #DCache_IndexSegStart]  ; starting value, with index at min, seg at max
1573 1574
10
        MCR     p15, 0, ip, c7, c10, 2           ; clean DCache entry by segment/index
Kevin Bracey's avatar
Kevin Bracey committed
1575 1576 1577 1578
        ADDS    ip, ip, a2                       ; next index, counting up, CS if wrapped back to 0
        BCC     %BT10
        SUBS    ip, ip, a1                       ; next segment, counting down, CC if wrapped back to max
        BCS     %BT10                            ; if segment wrapped, then we've finished
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
        MOV     ip, #0
        MCR     p15, 0, ip, c7, c10, 4           ; drain WBuffer
        Pull    "a2, ip"
        MOV     pc, lr

Cache_CleanInvalidateAll_WB_CR7_LDa ROUT
;
; similar to Cache_CleanAll, but does clean&invalidate of Dcache, and invalidates ICache
;
        Push    "a2, ip"
Jeffrey Lee's avatar
Jeffrey Lee committed
1589
        LDR     ip, =ZeroPage
1590 1591
        LDRB    a1, [ip, #DCache_LineLen]        ; segment field starts at this bit
        LDR     a2, [ip, #DCache_IndexBit]       ; index field starts at this bit
Kevin Bracey's avatar
Kevin Bracey committed
1592
        LDR     ip, [ip, #DCache_IndexSegStart]  ; starting value, with index at min, seg at max
1593 1594
10
        MCR     p15, 0, ip, c7, c14, 2           ; clean&invalidate DCache entry by segment/index
Kevin Bracey's avatar
Kevin Bracey committed
1595 1596 1597 1598
        ADDS    ip, ip, a2                       ; next index, counting up, CS if wrapped back to 0
        BCC     %BT10
        SUBS    ip, ip, a1                       ; next segment, counting down, CC if wrapped back to max
        BCS     %BT10                            ; if segment wrapped, then we've finished
1599 1600 1601 1602 1603 1604
        MOV     ip, #0
        MCR     p15, 0, ip, c7, c10, 4           ; drain WBuffer
        MCR     p15, 0, ip, c7, c5, 0            ; invalidate ICache
        Pull    "a2, ip"
        MOV     pc, lr

Jeffrey Lee's avatar
Jeffrey Lee committed
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
 [ MEMM_Type = "ARM600"
Cache_CleanInvalidateRange_WB_CR7_LDa ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c14, 1             ; clean&invalidate DCache entry
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
        MCR     p15, 0, a1, c7, c5, 6              ; flush branch predictors
        Pull    "a2, a3, pc"
;
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_CR7_LDa

Cache_CleanRange_WB_CR7_LDa ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ; clean DCache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanAll_WB_CR7_LDa

Cache_InvalidateRange_WB_CR7_LDa ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3, LSL #1                     ;assume clean+invalidate slower than just invalidate
        BHS     %FT30
        ADD     a2, a2, a1                         ;end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c6, 1              ; invalidate DCache entry
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
        MCR     p15, 0, a1, c7, c5, 6              ; flush branch predictors
        Pull    "a2, a3, pc"
;
Jeffrey Lee's avatar
Jeffrey Lee committed
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_CR7_LDa
 |
; Bodge for ARM11
; The OS assumes that address-based cache maintenance operations will operate
; on pages which are currently marked non-cacheable (so that we can make a page
; non-cacheable and then clean/invalidate the cache, to ensure prefetch or
; anything else doesn't pull any data for the page back into the cache once
; we've cleaned it). For ARMv7+ this is guaranteed behaviour, but prior to that
; it's implementation defined, and the ARM11 in particular seems to ignore
; address-based maintenance which target non-cacheable addresses.
; As a workaround, perform a full clean & invalidate instead
1688 1689 1690
;
; Note that this also provides us protection against erratum 720013 (or possibly
; it's that erratum which I was experiencing when I first made this change)
Jeffrey Lee's avatar
Jeffrey Lee committed
1691
Cache_CleanInvalidateRange_WB_CR7_LDa * Cache_CleanInvalidateAll_WB_CR7_LDa
1692 1693
Cache_CleanRange_WB_CR7_LDa * Cache_CleanAll_WB_CR7_LDa
Cache_InvalidateRange_WB_CR7_LDa * Cache_CleanInvalidateAll_WB_CR7_LDa
Jeffrey Lee's avatar
Jeffrey Lee committed
1694
 ]
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705

Cache_InvalidateAll_WB_CR7_LDa ROUT
;
; no clean, assume caller knows what's happening
;
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c7, 0           ; invalidate ICache and DCache
        MOV     pc, lr


Cache_RangeThreshold_WB_CR7_LDa ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
1706
        LDR     a1, =ZeroPage
1707 1708 1709
        LDR     a1, [a1, #DCache_RangeThreshold]
        MOV     pc, lr

1710 1711 1712 1713 1714
ICache_InvalidateAll_WB_CR7_LDa ROUT
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c5, 0            ; invalidate ICache + branch predictors
        MOV     pc, lr

1715
 [ MEMM_Type = "ARM600"
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
ICache_InvalidateRange_WB_CR7_LDa ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024                     ; arbitrary-ish range threshold
        ADD     a2, a2, a1
        BHS     ICache_InvalidateAll_WB_CR7_LDa
        Push    "lr"
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #ICache_LineLen]
10
        MCR     p15, 0, a1, c7, c5, 1            ; invalidate ICache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c5, 6            ; flush branch predictors
        Pull    "pc"
1735 1736 1737 1738 1739 1740 1741
 |
; ARM11 erratum 720013: I-cache invalidation can fail
; One workaround (for MVA ops) is to perform the operation twice, but that would
; presumably need interrupts to be disabled to be fully safe. So go with the
; other workaround of doing a full invalidate instead.
ICache_InvalidateRange_WB_CR7_LDa * ICache_InvalidateAll_WB_CR7_LDa
 ]
1742

1743

1744 1745 1746 1747 1748 1749 1750 1751 1752
MMU_ChangingUncached_WB_CR7_LDa ROUT
 [ CacheablePageTables
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a1, c7, c5, 4           ; ISB
   ]
 ]
TLB_InvalidateAll_WB_CR7_LDa
1753 1754 1755 1756 1757 1758 1759
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7, 0           ; invalidate ITLB and DTLB
        MOV     pc, lr


; a1 = page affected (page aligned address)
;
1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
MMU_ChangingUncachedEntry_WB_CR7_LDa ROUT
 [ CacheablePageTables
        Push    "a1"
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a1, c7, c5, 4           ; ISB
   ]
        Pull    "a1"
 ]
TLB_InvalidateEntry_WB_CR7_LDa
1771 1772 1773 1774 1775
        MCR     p15, 0, a1, c8, c5, 1           ; invalidate ITLB entry
        MCR     p15, 0, a1, c8, c6, 1           ; invalidate DTLB entry
        MOV     pc, lr


1776
DSB_ReadWrite_WB_CR7_LDa ROUT
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
        MOV     pc, lr


IMB_Full_WB_CR7_LDa ROUT
;
; do: clean DCache; drain WBuffer, invalidate ICache
;
        Push    "lr"
        BL      Cache_CleanAll_WB_CR7_LDa       ; also drains Wbuffer
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c5, 0           ; invalidate ICache
        Pull    "pc"

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
IMB_Range_WB_CR7_LDa ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024                     ; arbitrary-ish range threshold
        ADD     a2, a2, a1
        BHS     IMB_Full_WB_CR7_LDa
        Push    "lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
1801
        LDR     lr, =ZeroPage
1802 1803 1804
        LDRB    lr, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1           ; clean DCache entry by VA
1805
 [ MEMM_Type = "ARM600"
1806
        MCR     p15, 0, a1, c7, c5, 1            ; invalidate ICache entry
1807
 ]
1808 1809 1810 1811 1812
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4           ; drain WBuffer
1813
 [ MEMM_Type = "ARM600"
1814
        MCR     p15, 0, a1, c7, c5, 6            ; flush branch predictors
1815 1816 1817
 |
        MCR     p15, 0, a1, c7, c5, 0            ; invalidate ICache + branch predictors (erratum 720013)
 ]
1818 1819
        Pull    "pc"

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
;  a1 = pointer to list of (start, end) address pairs
;  a2 = pointer to end of list
;  a3 = total amount of memory to be synchronised
;
IMB_List_WB_CR7_LDa ROUT
        CMP     a3, #32*1024                     ; arbitrary-ish range threshold
        BHS     IMB_Full_WB_CR7_LDa
        Push    "v1-v2,lr"
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #DCache_LineLen]
05
        LDMIA   a1!, {v1-v2}
10
        MCR     p15, 0, v1, c7, c10, 1           ; clean DCache entry by VA
1834
 [ MEMM_Type = "ARM600"
1835
        MCR     p15, 0, v1, c7, c5, 1            ; invalidate ICache entry
1836
 ]
1837 1838 1839 1840 1841 1842 1843
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4           ; drain WBuffer
1844
 [ MEMM_Type = "ARM600"
1845
        MCR     p15, 0, a1, c7, c5, 6            ; flush branch predictors
1846 1847 1848
 |
        MCR     p15, 0, a1, c7, c5, 0            ; invalidate ICache + branch predictors (erratum 720013)
 ]
1849 1850
        Pull    "v1-v2,pc"

1851
MMU_Changing_WB_CR7_LDa ROUT
1852 1853 1854 1855 1856 1857 1858 1859 1860
 [ CacheablePageTables
        Push    "a1"
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a1, c7, c5, 4           ; ISB
   ]
        Pull    "a1"
 ]
1861 1862
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7, 0           ; invalidate ITLB and DTLB
1863
        B       Cache_CleanInvalidateAll_WB_CR7_LDa
1864 1865 1866 1867

; a1 = page affected (page aligned address)
;
MMU_ChangingEntry_WB_CR7_LDa ROUT
1868 1869 1870 1871 1872 1873 1874 1875 1876
 [ CacheablePageTables
        Push    "a1"
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a1, c7, c5, 4           ; ISB
   ]
        Pull    "a1"
 ]
1877
 [ MEMM_Type = "ARM600"
1878
        Push    "a2, lr"
1879 1880
        MCR     p15, 0, a1, c8, c6, 1           ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1           ; invalidate ITLB entry
1881
        ADD     a2, a1, #PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
1882
        LDR     lr, =ZeroPage
1883 1884 1885 1886 1887 1888 1889 1890 1891
        LDRB    lr, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c14, 1          ; clean&invalidate DCache entry
        MCR     p15, 0, a1, c7, c5, 1           ; invalidate ICache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        MOV     lr, #0
        MCR     p15, 0, lr, c7, c10, 4          ; drain WBuffer
1892
        MCR     p15, 0, a1, c7, c5, 6           ; flush branch predictors
1893 1894 1895
        Pull    "a2, pc"
 |
; See above re: ARM11 cache cleaning not working on non-cacheable pages
1896 1897
        MCR     p15, 0, a1, c8, c6, 1           ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1           ; invalidate ITLB entry
1898 1899
        B       Cache_CleanInvalidateAll_WB_CR7_LDa
 ]
1900 1901 1902 1903 1904 1905

; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingEntries_WB_CR7_LDa ROUT
        Push    "a2, a3, lr"
1906 1907 1908 1909 1910 1911 1912
 [ CacheablePageTables
        MOV     a3, #0
        MCR     p15, 0, a3, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a3, c7, c5, 4           ; ISB
   ]
 ]
1913
        MOV     a2, a2, LSL #Log2PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
1914 1915
        LDR     lr, =ZeroPage
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
1916 1917 1918
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
Jeffrey Lee's avatar
Jeffrey Lee committed
1919
        LDRB    a3, [lr, #DCache_LineLen]
1920 1921
        MOV     lr, a1
10
1922 1923 1924 1925 1926 1927 1928 1929
        MCR     p15, 0, a1, c8, c6, 1              ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1              ; invalidate ITLB entry
        ADD     a1, a1, #PageSize
        CMP     a1, a2
        BLO     %BT10
 [ MEMM_Type = "ARM600"
        MOV     a1, lr                             ; restore start address
20
1930 1931 1932 1933
        MCR     p15, 0, a1, c7, c14, 1             ; clean&invalidate DCache entry
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
        ADD     a1, a1, a3
        CMP     a1, a2
1934
        BLO     %BT20
1935 1936
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
1937
        MCR     p15, 0, a1, c7, c5, 6              ; flush branch predictors
1938 1939
        Pull    "a2, a3, pc"
;
1940 1941 1942 1943
 |
; See above re: ARM11 cache cleaning not working on non-cacheable pages
        B       %FT40
 ]
1944 1945 1946
30
        MOV     a1, #0
        MCR     p15, 0, a1, c8, c7, 0              ; invalidate ITLB and DTLB
1947 1948
40
        BL      Cache_CleanInvalidateAll_WB_CR7_LDa
1949 1950 1951 1952 1953 1954
        Pull    "a2, a3, pc"

; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingUncachedEntries_WB_CR7_LDa ROUT
1955 1956 1957 1958 1959 1960 1961 1962 1963
 [ CacheablePageTables
        Push    "a1"
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
   [ MEMM_Type = "VMSAv6"
        MCR     p15, 0, a1, c7, c5, 4           ; ISB
   ]
        Pull    "a1"
 ]
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
        CMP     a2, #32                            ; arbitrary-ish threshold
        BHS     %FT20
        Push    "a2"
10
        MCR     p15, 0, a1, c8, c6, 1              ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1              ; invalidate ITLB entry
        ADD     a1, a1, #PageSize
        SUBS    a2, a2, #1
        BNE     %BT10
        Pull    "a2"
        MOV     pc, lr
;
20
        MCR     p15, 0, a1, c8, c7, 0              ; invalidate ITLB and DTLB
        MOV     pc, lr


1981 1982
 [ MEMM_Type = "ARM600"

1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
; --------------------------------------------------------------------------
; ----- ARMops for StrongARM and the like ----------------------------------
; --------------------------------------------------------------------------

; WB_Crd is Writeback data cache, clean by reading data from cleaner area

; Currently no support for mini data cache on some StrongARM variants. Mini
; cache is always writeback and must have cleaning support, so is very
; awkward to use for cacheable screen, say.

; Global cache cleaning requires address space for private cleaner areas (not accessed
; for any other reason). Cleaning is normally with interrupts enabled (to avoid a latency
; hit), which means that the cleaner data is not invalidated afterwards. This is fine for
; RISC OS - where the private area is not used for anything else, and any re-use of the
; cache under interrupts is safe (eg. a page being moved is *never* involved in any
; active interrupts).

; Mostly, cleaning toggles between two separate cache-sized areas, which gives minimum
; cleaning cost while guaranteeing proper clean even if previous clean data is present. If
; the clean routine is re-entered, an independent, double sized clean is initiated. This
; guarantees proper cleaning (regardless of multiple re-entrancy) whilst hardly complicating
; the routine at all. The overhead is small, since by far the most common cleaning will be
; non-re-entered. The upshot is that the cleaner address space available must be at least 4
; times the cache size:
;   1 : used alternately, on 1st, 3rd, ... non-re-entered cleans
;   2 : used alternately, on 2nd, 4th, ... non-re-entered cleans
;   3 : used only for first half of a re-entered clean
;   4 : used only for second half of a re-entered clean
;
;   DCache_CleanBaseAddress   : start address of total cleaner space
;   DCache_CleanNextAddress   : start address for next non-re-entered clean, or 0 if re-entered


Cache_CleanAll_WB_Crd ROUT
;
; - cleans data cache (and invalidates it as a side effect)
; - can be used with interrupts enabled (to avoid latency over time of clean)
; - can be re-entered
; - see remarks at top of StrongARM ops for discussion of strategy
;

        Push    "a2-a4, v1, v2, lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
2025
        LDR     lr, =ZeroPage
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
        LDR     a1, [lr, #DCache_CleanBaseAddress]
        LDR     a2, =DCache_CleanNextAddress
        LDR     a3, [lr, #DCache_Size]
        LDRB    a4, [lr, #DCache_LineLen]
        MOV     v2, #0
        SWP     v1, v2, [a2]                        ; read current CleanNextAddr, zero it (semaphore)
        TEQ     v1, #0                              ; but if it is already zero, we have re-entered
        ADDEQ   v1, a1, a3, LSL #1                  ; if re-entered, start clean at Base+2*Cache_Size
        ADDEQ   v2, v1, a3, LSL #1                  ; if re-entered, do a clean of 2*Cache_Size
        ADDNE   v2, v1, a3                          ; if not re-entered, do a clean of Cache_Size
10
        LDR     lr, [v1], a4
        TEQ     v1, v2
        BNE     %BT10
        ADD     v2, a1, a3, LSL #1                  ; compare end address with Base+2*Cache_Size
        CMP     v1, v2
        MOVEQ   v1, a1                              ; if equal, not re-entered and Next wraps back
        STRLS   v1, [a2]                            ; if lower or same, not re-entered, so update Next
        MCR     p15, 0, a1, c7, c10, 4              ; drain WBuffer
        Pull    "a2-a4, v1, v2, pc"


Cache_CleanInvalidateAll_WB_Crd ROUT
IMB_Full_WB_Crd
;
;does not truly invalidate DCache, but effectively invalidates (flushes) all lines not
;involved in interrupts - this is sufficient for OS requirements, and means we don't
;have to disable interrupts for possibly slow clean
;
        Push    "lr"
        BL      Cache_CleanAll_WB_Crd               ;clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0               ;flush ICache
        Pull    "pc"

Cache_InvalidateAll_WB_Crd
;
; no clean, assume caller knows what is happening
;
        MCR     p15, 0, a1, c7, c7, 0               ;flush ICache and DCache
        MCR     p15, 0, a1, c7, c10, 4              ;drain WBuffer
        MOV     pc, lr

Cache_RangeThreshold_WB_Crd
Jeffrey Lee's avatar
Jeffrey Lee committed
2069
        LDR     a1, =ZeroPage
2070 2071 2072 2073
        LDR     a1, [a1, #DCache_RangeThreshold]
        MOV     pc, lr

MMU_ChangingUncached_WB_Crd
2074 2075 2076 2077
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
TLB_InvalidateAll_WB_Crd
2078 2079 2080 2081
        MCR     p15, 0, a1, c8, c7, 0              ;flush ITLB and DTLB
        MOV     pc, lr

MMU_ChangingUncachedEntry_WB_Crd
2082 2083 2084 2085
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
TLB_InvalidateEntry_WB_Crd
2086 2087 2088 2089
        MCR     p15, 0, a1, c8, c6, 1              ;flush DTLB entry
        MCR     p15, 0, a1, c8, c5, 0              ;flush ITLB
        MOV     pc, lr

2090
DSB_ReadWrite_WB_Crd
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MOV     pc, lr


IMB_Range_WB_Crd ROUT
        SUB     a2, a2, a1
        CMP     a2, #64*1024                       ;arbitrary-ish range threshold
        ADD     a2, a2, a1
        BHS     IMB_Full_WB_Crd
        Push    "lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
2101
        LDR     lr, =ZeroPage
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
        LDRB    lr, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ;clean DCache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "pc"

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131

IMB_List_WB_Crd ROUT
        CMP     a3, #64*1024                       ;arbitrary-ish range threshold
        BHS     IMB_Full_WB_Crd
        Push    "v1-v2,lr"
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #DCache_LineLen]
05
        LDMIA   a1!, {v1-v2}
10
        MCR     p15, 0, v1, c7, c10, 1             ;clean DCache entry
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "v1-v2,pc"

2132 2133
MMU_Changing_WB_Crd
        Push    "lr"
2134 2135 2136
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
2137
        MCR     p15, 0, a1, c8, c7, 0               ;flush ITLB and DTLB
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
        BL      Cache_CleanAll_WB_Crd               ;clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0               ;flush ICache
        Pull    "pc"

MMU_ChangingEntry_WB_Crd ROUT
;
;there is no clean&invalidate DCache instruction, however we can do clean
;entry followed by invalidate entry without an interrupt hole, because they
;are for the same virtual address (and that virtual address will not be
;involved in interrupts, since it is involved in remapping)
;
2149 2150 2151
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
2152 2153
        Push    "a2, lr"
        ADD     a2, a1, #PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
2154
        LDR     lr, =ZeroPage
2155
        LDRB    lr, [lr, #DCache_LineLen]
2156 2157
        MCR     p15, 0, a1, c8, c6, 1              ;flush DTLB entry
        MCR     p15, 0, a1, c8, c5, 0              ;flush ITLB
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
10
        MCR     p15, 0, a1, c7, c10, 1             ;clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1              ;flush DCache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        SUB     a1, a1, #PageSize
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, pc"

MMU_ChangingEntries_WB_Crd ROUT
;
;same comments as MMU_ChangingEntry_WB_Crd
;
2173 2174 2175
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
2176 2177
        Push    "a2, a3, lr"
        MOV     a2, a2, LSL #Log2PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
2178 2179
        LDR     lr, =ZeroPage
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
2180 2181 2182
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
Jeffrey Lee's avatar
Jeffrey Lee committed
2183
        LDRB    a3, [lr, #DCache_LineLen]
2184 2185
        MOV     lr, a1
10
2186 2187
        MCR     p15, 0, a1, c8, c6, 1              ;flush DTLB entry
        ADD     a1, a1, #PageSize
2188 2189
        CMP     a1, a2
        BLO     %BT10
2190
        MCR     p15, 0, a1, c8, c5, 0              ;flush ITLB
2191 2192
        MOV     a1, lr                             ;restore start address
20
2193 2194 2195
        MCR     p15, 0, a1, c7, c10, 1             ;clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1              ;flush DCache entry
        ADD     a1, a1, a3
2196 2197
        CMP     a1, a2
        BLO     %BT20
2198 2199
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
2200 2201 2202
        Pull    "a2, a3, pc"
;
30
2203
        MCR     p15, 0, a1, c8, c7, 0              ;flush ITLB and DTLB
2204 2205 2206 2207
        BL      Cache_CleanAll_WB_Crd              ;clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, a3, pc"

2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
Cache_CleanRange_WB_Crd ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ;clean DCache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        Pull    "a2, a3, pc"
;
30
        BL      Cache_CleanAll_WB_Crd              ;clean DCache (wrt to non-interrupt stuff)
        Pull    "a2, a3, pc"

Cache_InvalidateRange_WB_Crd ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3, LSL #1                     ;assume clean+invalidate slower than just invalidate
        BHS     %FT30
        ADD     a2, a2, a1                         ;end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c6, 1              ;flush DCache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, a3, pc"
;
30
        BL      Cache_CleanAll_WB_Crd              ;clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, a3, pc"

Jeffrey Lee's avatar
Jeffrey Lee committed
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
Cache_CleanInvalidateRange_WB_Crd ROUT
;
;same comments as MMU_ChangingEntry_WB_Crd
;
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ;clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1              ;flush DCache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ;drain WBuffer
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, a3, pc"
;
30
        BL      Cache_CleanAll_WB_Crd              ;clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        Pull    "a2, a3, pc"

2279
MMU_ChangingUncachedEntries_WB_Crd ROUT
2280 2281 2282
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 ]
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
        CMP     a2, #32                            ;arbitrary-ish threshold
        BHS     %FT20
        Push    "lr"
        MOV     lr, a2
10
        MCR     p15, 0, a1, c8, c6, 1              ;flush DTLB entry
        ADD     a1, a1, #PageSize
        SUBS    lr, lr, #1
        BNE     %BT10
        MCR     p15, 0, a1, c8, c5, 0              ;flush ITLB
        Pull    "pc"
;
20
        MCR     p15, 0, a1, c8, c7, 0              ;flush ITLB and DTLB
        MOV     pc, lr

2299 2300 2301 2302 2303
ICache_InvalidateAll_WB_Crd ROUT
ICache_InvalidateRange_WB_Crd
        MCR     p15, 0, a1, c7, c5, 0              ;flush ICache
        MOV     pc, lr

2304
        LTORG
2305

Kevin Bracey's avatar
Kevin Bracey committed
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
; ARMops for XScale, mjs Feb 2001
;
; WB_Cal_LD is writeback, clean with allocate, lockdown
;
; If the mini data cache is used (XScaleMiniCache true), it is assumed to be
; configured writethrough (eg. used for RISC OS screen memory). This saves an ugly/slow
; mini cache clean for things like IMB_Full.
;
; Sadly, for global cache invalidate with mini cache, things are awkward. We can't clean the
; main cache then do the global invalidate MCR, unless we tolerate having _all_ interrupts
; off (else the main cache may be slightly dirty from interrupts, and the invalidate
; will lose data). So we must reluctantly 'invalidate' the mini cache by the ugly/slow
; mechanism as if we were cleaning it :-( Intel should provide a separate global invalidate
; (and perhaps a line allocate) for the mini cache.
;
; We do not use lockdown.
;
; For simplicity, we assume cacheable pages are mostly writeback. Any writethrough
; pages will be invalidated as if they were writeback, but there is little overhead
; (cleaning a clean line or allocating a line from cleaner area are both fast).

; Global cache cleaning requires address space for private cleaner areas (not accessed
; for any other reason). Cleaning is normally with interrupts enabled (to avoid a latency
; hit), which means that the cleaner data is not invalidated afterwards. This is fine for
; RISC OS - where the private area is not used for anything else, and any re-use of the
; cache under interrupts is safe (eg. a page being moved is *never* involved in any
; active interrupts).

; Mostly, cleaning toggles between two separate cache-sized areas, which gives minimum
; cleaning cost while guaranteeing proper clean even if previous clean data is present. If
; the clean routine is re-entered, an independent, double sized clean is initiated. This
; guarantees proper cleaning (regardless of multiple re-entrancy) whilst hardly complicating
; the routine at all. The overhead is small, since by far the most common cleaning will be
; non-re-entered. The upshot is that the cleaner address space available must be at least 4
; times the cache size:
;   1 : used alternately, on 1st, 3rd, ... non-re-entered cleans
;   2 : used alternately, on 2nd, 4th, ... non-re-entered cleans
;   3 : used only for first half of a re-entered clean
;   4 : used only for second half of a re-entered clean
;
; If the mini cache is used, it has its own equivalent cleaner space and algorithm.
; Parameters for each cache are:
;
;    Cache_CleanBaseAddress   : start address of total cleaner space
;    Cache_CleanNextAddress   : start address for next non-re-entered clean, or 0 if re-entered


                 GBLL XScaleMiniCache  ; *must* be configured writethrough if used
XScaleMiniCache  SETL {FALSE}


; MACRO to do Intel approved CPWAIT, to guarantee any previous MCR's have taken effect
; corrupts a1
;
        MACRO
        CPWAIT
        MRC      p15, 0, a1, c2, c0, 0               ; arbitrary read of CP15
        MOV      a1, a1                              ; wait for it
        ; SUB pc, pc, #4 omitted, because all ops have a pc load to return to caller
        MEND


Cache_CleanAll_WB_Cal_LD ROUT
;
; - cleans main cache (and invalidates as a side effect)
; - if mini cache is in use, will be writethrough so no clean required
; - can be used with interrupts enabled (to avoid latency over time of clean)
; - can be re-entered
; - see remarks at top of XScale ops for discussion of strategy
;
        Push    "a2-a4, v1, v2, lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
2377
        LDR     lr, =ZeroPage
Kevin Bracey's avatar
Kevin Bracey committed
2378
        LDR     a1, [lr, #DCache_CleanBaseAddress]
Jeffrey Lee's avatar
Jeffrey Lee committed
2379
        LDR     a2, =ZeroPage+DCache_CleanNextAddress
Kevin Bracey's avatar
Kevin Bracey committed
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
        LDR     a3, [lr, #DCache_Size]
        LDRB    a4, [lr, #DCache_LineLen]
        MOV     v2, #0
        SWP     v1, v2, [a2]                        ; read current CleanNextAddr, zero it (semaphore)
        TEQ     v1, #0                              ; but if it is already zero, we have re-entered
        ADDEQ   v1, a1, a3, LSL #1                  ; if re-entered, start clean at Base+2*Cache_Size
        ADDEQ   v2, v1, a3, LSL #1                  ; if re-entered, do a clean of 2*Cache_Size
        ADDNE   v2, v1, a3                          ; if not re-entered, do a clean of Cache_Size
10
        MCR     p15, 0, v1, c7, c2, 5               ; allocate address from cleaner space
        ADD     v1, v1, a4
        TEQ     v1, v2
        BNE     %BT10
        ADD     v2, a1, a3, LSL #1                  ; compare end address with Base+2*Cache_Size
        CMP     v1, v2
        MOVEQ   v1, a1                              ; if equal, not re-entered and Next wraps back
        STRLS   v1, [a2]                            ; if lower or same, not re-entered, so update Next
        MCR     p15, 0, a1, c7, c10, 4              ; drain WBuffer (waits, so no need for CPWAIT)
        Pull    "a2-a4, v1, v2, pc"

  [ XScaleMiniCache

Cache_MiniInvalidateAll_WB_Cal_LD ROUT
;
; similar to Cache_CleanAll_WB_Cal_LD, but must do direct reads (cannot use allocate address MCR), and
; 'cleans' to achieve invalidate as side effect (mini cache will be configured writethrough)
;
        Push    "a2-a4, v1, v2, lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
2408
        LDR     lr, =ZeroPage
Kevin Bracey's avatar
Kevin Bracey committed
2409
        LDR     a1, [lr, #MCache_CleanBaseAddress]
Jeffrey Lee's avatar
Jeffrey Lee committed
2410
        LDR     a2, =ZeroPage+MCache_CleanNextAddr
Kevin Bracey's avatar
Kevin Bracey committed
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
        LDR     a3, [lr, #MCache_Size]
        LDRB    a4, [lr, #MCache_LineLen]
        MOV     v2, #0
        SWP     v1, v2, [a2]                        ; read current CleanNextAddr, zero it (semaphore)
        TEQ     v1, #0                              ; but if it is already zero, we have re-entered
        ADDEQ   v1, a1, a3, LSL #1                  ; if re-entered, start clean at Base+2*Cache_Size
        ADDEQ   v2, v1, a3, LSL #1                  ; if re-entered, do a clean of 2*Cache_Size
        ADDNE   v2, v1, a3                          ; if not re-entered, do a clean of Cache_Size
10
        LDR     lr, [v1], a4                        ; read a line of cleaner data
        TEQ     v1, v2
        BNE     %BT10
        ADD     v2, a1, a3, LSL #1                  ; compare end address with Base+2*Size
        CMP     v1, v2
        MOVEQ   v1, a1                              ; if equal, not re-entered and Next wraps back
        STRLS   v1, [a2]                            ; if lower or same, not re-entered, so update Next
        ; note, no drain WBuffer, since we are really only invalidating a writethrough cache
        Pull    "a2-a4, v1, v2, pc"

  ] ; XScaleMiniCache


Cache_CleanInvalidateAll_WB_Cal_LD ROUT
;
; - cleans main cache (and invalidates wrt OS stuff as a side effect)
; - if mini cache in use (will be writethrough), 'cleans' in order to invalidate as side effect
;
        Push    "lr"
        BL      Cache_CleanAll_WB_Cal_LD
  [ XScaleMiniCache
        BL      Cache_MiniInvalidateAll_WB_Cal_LD
  ]
        MCR     p15, 0, a1, c7, c5, 0                ; invalidate ICache and BTB
        CPWAIT
        Pull    "pc"


Cache_InvalidateAll_WB_Cal_LD ROUT
;
; no clean, assume caller knows what's happening
;
        MCR     p15, 0, a1, c7, c7, 0           ; invalidate DCache, (MiniCache), ICache and BTB
        CPWAIT
        MOV     pc, lr


Cache_RangeThreshold_WB_Cal_LD ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
2458
        LDR     a1, =ZeroPage
Kevin Bracey's avatar
Kevin Bracey committed
2459 2460 2461 2462
        LDR     a1, [a1, #DCache_RangeThreshold]
        MOV     pc, lr


2463 2464 2465 2466 2467
MMU_ChangingUncached_WB_Cal_LD ROUT
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer (waits, so no need for CPWAIT)
 ]
TLB_InvalidateAll_WB_Cal_LD
Kevin Bracey's avatar
Kevin Bracey committed
2468 2469 2470 2471 2472
        MCR     p15, 0, a1, c8, c7, 0           ; invalidate ITLB and DTLB
        CPWAIT
        MOV     pc, lr


2473 2474 2475 2476 2477
MMU_ChangingUncachedEntry_WB_Cal_LD ROUT
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer (waits, so no need for CPWAIT)
 ]
TLB_InvalidateEntry_WB_Cal_LD
Kevin Bracey's avatar
Kevin Bracey committed
2478 2479 2480 2481 2482 2483
        MCR     p15, 0, a1, c8, c5, 1           ; invalidate ITLB entry
        MCR     p15, 0, a1, c8, c6, 1           ; invalidate DTLB entry
        CPWAIT
        MOV     pc, lr


2484
DSB_ReadWrite_WB_Cal_LD ROUT
Kevin Bracey's avatar
Kevin Bracey committed
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer (waits, so no need for CPWAIT)
        MOV     pc, lr


IMB_Full_WB_Cal_LD
        Push    "lr"
        BL      Cache_CleanAll_WB_Cal_LD             ; clean DCache (wrt to non-interrupt stuff)
        MCR     p15, 0, a1, c7, c5, 0                ; invalidate ICache and BTB
        CPWAIT
        Pull    "pc"


IMB_Range_WB_Cal_LD ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024                     ; arbitrary-ish range threshold
        ADD     a2, a2, a1
        BHS     IMB_Full_WB_Cal_LD
        Push    "lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
2503
        LDR     lr, =ZeroPage
Kevin Bracey's avatar
Kevin Bracey committed
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
        LDRB    lr, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1           ; clean DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1            ; invalidate ICache entry
 ]
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0            ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6            ; invalidate BTB
 ]
        MCR     p15, 0, a1, c7, c10, 4           ; drain WBuffer (waits, so no need for CPWAIT)
        Pull    "pc"


2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
IMB_List_WB_Cal_LD ROUT
        CMP     a3, #32*1024                     ; arbitrary-ish range threshold
        BHS     IMB_Full_WB_Cal_LD
        Push    "v1-v2,lr"
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #DCache_LineLen]
05
        LDMIA   a1!, {v1-v2}
10
        MCR     p15, 0, v1, c7, c10, 1           ; clean DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, v1, c7, c5, 1            ; invalidate ICache entry
 ]
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0            ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6            ; invalidate BTB
 ]
        MCR     p15, 0, a1, c7, c10, 4           ; drain WBuffer (waits, so no need for CPWAIT)
        Pull    "v1-v2,pc"


Kevin Bracey's avatar
Kevin Bracey committed
2549
MMU_Changing_WB_Cal_LD ROUT
2550 2551 2552
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer (waits, so no need for CPWAIT)
 ]
Kevin Bracey's avatar
Kevin Bracey committed
2553
        Push    "lr"
2554
        MCR     p15, 0, a1, c8, c7, 0           ; invalidate ITLB and DTLB
Kevin Bracey's avatar
Kevin Bracey committed
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
        BL      Cache_CleanAll_WB_Cal_LD
        MCR     p15, 0, a1, c7, c5, 0           ; invalidate ICache and BTB
        CPWAIT
        Pull    "pc"

MMU_ChangingEntry_WB_Cal_LD ROUT
;
;there is no clean&invalidate DCache instruction, however we can do clean
;entry followed by invalidate entry without an interrupt hole, because they
;are for the same virtual address (and that virtual address will not be
;involved in interrupts, since it is involved in remapping)
;
2567 2568 2569
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer (waits, so no need for CPWAIT)
 ]
Kevin Bracey's avatar
Kevin Bracey committed
2570 2571
        Push    "a2, lr"
        ADD     a2, a1, #PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
2572
        LDR     lr, =ZeroPage
Kevin Bracey's avatar
Kevin Bracey committed
2573
        LDRB    lr, [lr, #DCache_LineLen]
2574 2575
        MCR     p15, 0, a1, c8, c6, 1           ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1           ; invalidate ITLB entry
Kevin Bracey's avatar
Kevin Bracey committed
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598
10
        MCR     p15, 0, a1, c7, c10, 1          ; clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1           ; invalidate DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1           ; invalidate ICache entry
 ]
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4          ; drain WBuffer
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0           ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6           ; invalidate BTB
 ]
        CPWAIT
        Pull    "a2, pc"


MMU_ChangingEntries_WB_Cal_LD ROUT
;
;same comments as MMU_ChangingEntry_WB_Cal_LD
;
2599 2600 2601
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer (waits, so no need for CPWAIT)
 ]
Kevin Bracey's avatar
Kevin Bracey committed
2602 2603
        Push    "a2, a3, lr"
        MOV     a2, a2, LSL #Log2PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
2604 2605
        LDR     lr, =ZeroPage
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
Kevin Bracey's avatar
Kevin Bracey committed
2606 2607 2608
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
Jeffrey Lee's avatar
Jeffrey Lee committed
2609
        LDRB    a3, [lr, #DCache_LineLen]
Kevin Bracey's avatar
Kevin Bracey committed
2610 2611
        MOV     lr, a1
10
2612 2613 2614 2615 2616 2617 2618
        MCR     p15, 0, a1, c8, c6, 1              ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1              ; invalidate ITLB entry
        ADD     a1, a1, #PageSize
        CMP     a1, a2
        BLO     %BT10
        MOV     a1, lr                             ; restore start address
20
Kevin Bracey's avatar
Kevin Bracey committed
2619 2620 2621 2622 2623 2624 2625
        MCR     p15, 0, a1, c7, c10, 1             ; clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1              ; invalidate DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
 ]
        ADD     a1, a1, a3
        CMP     a1, a2
2626
        BLO     %BT20
Kevin Bracey's avatar
Kevin Bracey committed
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0              ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6              ; invalidate BTB
 ]
        CPWAIT
        Pull    "a2, a3, pc"
;
30
        MCR     p15, 0, a1, c8, c7, 0              ; invalidate ITLB and DTLB
2638
        BL      Cache_CleanInvalidateAll_WB_Cal_LD
Kevin Bracey's avatar
Kevin Bracey committed
2639 2640 2641
        CPWAIT
        Pull    "a2, a3, pc"

Jeffrey Lee's avatar
Jeffrey Lee committed
2642

2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
Cache_CleanRange_WB_Cal_LD ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ; clean DCache entry
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer (waits, so no need for CPWAIT)
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanAll_WB_Cal_LD


Cache_InvalidateRange_WB_Cal_LD ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3, LSL #1                     ;assume clean+invalidate slower than just invalidate
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c6, 1              ; invalidate DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
 ]
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0              ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6              ; invalidate BTB
 ]
        CPWAIT
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_Cal_LD


Jeffrey Lee's avatar
Jeffrey Lee committed
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
Cache_CleanInvalidateRange_WB_Cal_LD ROUT
;
;same comments as MMU_ChangingEntry_WB_Cal_LD
;
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
        MCR     p15, 0, a1, c7, c10, 1             ; clean DCache entry
        MCR     p15, 0, a1, c7, c6, 1              ; invalidate DCache entry
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
 ]
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0              ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6              ; invalidate BTB
 ]
        CPWAIT
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_Cal_LD

Kevin Bracey's avatar
Kevin Bracey committed
2730
MMU_ChangingUncachedEntries_WB_Cal_LD ROUT
2731 2732 2733
 [ CacheablePageTables
        MCR     p15, 0, a1, c7, c10, 4             ; drain WBuffer (waits, so no need for CPWAIT)
 ]
Kevin Bracey's avatar
Kevin Bracey committed
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
        CMP     a2, #32                            ; arbitrary-ish threshold
        BHS     %FT20
        Push    "lr"
        MOV     lr, a2
10
        MCR     p15, 0, a1, c8, c6, 1              ; invalidate DTLB entry
        MCR     p15, 0, a1, c8, c5, 1              ; invalidate ITLB entry
        SUBS    lr, lr, #1
        ADD     a1, a1, #PageSize
        BNE     %BT10
        CPWAIT
        Pull    "pc"
;
20
        MCR     p15, 0, a1, c8, c7, 0              ; invalidate ITLB and DTLB
        CPWAIT
        MOV     pc, lr

2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786

ICache_InvalidateRange_WB_Cal_LD ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
        LDRB    a3, [lr, #DCache_LineLen]
10
 [ :LNOT:XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 1              ; invalidate ICache entry
 ]
        ADD     a1, a1, a3
        CMP     a1, a2
        BLO     %BT10
 [ XScaleJTAGDebug
        MCR     p15, 0, a1, c7, c5, 0              ; invalidate ICache and BTB
 |
        MCR     p15, 0, a1, c7, c5, 6              ; invalidate BTB
 ]
        CPWAIT
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       ICache_InvalidateAll_WB_Cal_LD


ICache_InvalidateAll_WB_Cal_LD
        MCR     p15, 0, a1, c7, c5, 0                ; invalidate ICache and BTB
        CPWAIT
        MOV     pc, lr

2787 2788
 ] ; MEMM_Type = "ARM600"

2789
 [ MEMM_Type = "VMSAv6" ; Need appropriate myIMB, etc. implementations if this is to be removed
2790 2791 2792 2793 2794 2795 2796 2797

; --------------------------------------------------------------------------
; ----- ARMops for Cortex-A8 and the like ----------------------------------
; --------------------------------------------------------------------------

; WB_CR7_Lx refers to ARMs with writeback data cache, cleaned with
; register 7, and (potentially) multiple cache levels
;
2798 2799
; DCache_LineLen = smallest data/unified cache line length
; ICache_LineLen = smallest instruction cache line length
2800 2801
; DCache_RangeThreshold = clean threshold for data cache
; Cache_Lx_Info = Cache level ID register
Jeffrey Lee's avatar
Jeffrey Lee committed
2802 2803
; Cache_Lx_DTable = Cache size identification register for all 7 data/unified caches
; Cache_Lx_ITable = Cache size identification register for all 7 instruction caches
2804

Jeffrey Lee's avatar
Jeffrey Lee committed
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
; ARMv7 cache maintenance routines are a bit long-winded, so we use this macro
; to reduce the risk of mistakes creeping in due to code duplication
;
; $op: Operation to perform ('clean', 'invalidate', 'cleaninvalidate')
; $levels: Which levels to apply to ('lou', 'loc', 'louis')
; Uses r0-r8 & lr as temp
; Performs the indicated op on the indicated data & unified caches
;
; Code based around the alternate/faster code given in the ARMv7 ARM (section
; B2.2.4, alternate/faster code only in doc revision 9), but tightened up a bit
;
; Note that HAL_InvalidateCache_ARMvF uses its own implementation of this
; algorithm, since it must cope with different temporary registers and it needs
; to read the cache info straight from the CP15 registers
;
        MACRO
        MaintainDataCache_WB_CR7_Lx $op, $levels
Jeffrey Lee's avatar
Jeffrey Lee committed
2822
        LDR     lr, =ZeroPage
Jeffrey Lee's avatar
Jeffrey Lee committed
2823
        LDR     r0, [lr, #Cache_Lx_Info]!
2824
        ADD     lr, lr, #Cache_Lx_DTable-Cache_Lx_Info
Jeffrey Lee's avatar
Jeffrey Lee committed
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
      [ "$levels"="lou"
        ANDS    r3, r0, #&38000000
        MOV     r3, r3, LSR #26 ; Cache level value (naturally aligned)
      |
      [ "$levels"="loc"
        ANDS    r3, r0, #&07000000
        MOV     r3, r3, LSR #23 ; Cache level value (naturally aligned)
      |
      [ "$levels"="louis"
        ANDS    r3, r0, #&00E00000
        MOV     r3, r3, LSR #20 ; Cache level value (naturally aligned)
      |
        ! 1, "Unrecognised levels"
      ]
      ]
      ]
        BEQ     %FT50
        MOV     r8, #0 ; Current cache level
10 ; Loop1
        ADD     r2, r8, r8, LSR #1 ; Work out 3 x cachelevel
        MOV     r1, r0, LSR r2 ; bottom 3 bits are the Cache type for this level
        AND     r1, r1, #7 ; get those 3 bits alone
        CMP     r1, #2
        BLT     %FT40 ; no cache or only instruction cache at this level
        LDR     r1, [lr, r8, LSL #1] ; read CCSIDR to r1
2850
        AND     r2, r1, #CCSIDR_LineSize_mask ; extract the line length field
Jeffrey Lee's avatar
Jeffrey Lee committed
2851
        ADD     r2, r2, #4 ; add 4 for the line length offset (log2 16 bytes)
2852 2853
        LDR     r7, =CCSIDR_Associativity_mask:SHR:CCSIDR_Associativity_pos
        AND     r7, r7, r1, LSR #CCSIDR_Associativity_pos ; r7 is the max number on the way size (right aligned)
Jeffrey Lee's avatar
Jeffrey Lee committed
2854
        CLZ     r5, r7 ; r5 is the bit position of the way size increment
2855 2856
        LDR     r4, =CCSIDR_NumSets_mask:SHR:CCSIDR_NumSets_pos
        AND     r4, r4, r1, LSR #CCSIDR_NumSets_pos ; r4 is the max number of the index size (right aligned)
Jeffrey Lee's avatar
Jeffrey Lee committed
2857 2858 2859 2860 2861 2862
20 ; Loop2
        MOV     r1, r4 ; r1 working copy of the max index size (right aligned)
30 ; Loop3
        ORR     r6, r8, r7, LSL r5 ; factor in the way number and cache number into r6
        ORR     r6, r6, r1, LSL r2 ; factor in the index number
      [ "$op"="clean"
2863
        DCCSW   r6 ; Clean
Jeffrey Lee's avatar
Jeffrey Lee committed
2864 2865
      |
      [ "$op"="invalidate"
2866
        DCISW   r6 ; Invalidate
Jeffrey Lee's avatar
Jeffrey Lee committed
2867 2868
      |
      [ "$op"="cleaninvalidate"
2869
        DCCISW  r6 ; Clean & invalidate
Jeffrey Lee's avatar
Jeffrey Lee committed
2870 2871 2872 2873 2874 2875 2876 2877 2878
      |
        ! 1, "Unrecognised op"
      ]
      ]
      ]
        SUBS    r1, r1, #1 ; decrement the index
        BGE     %BT30
        SUBS    r7, r7, #1 ; decrement the way number
        BGE     %BT20
2879
        DSB                ; Cortex-A7 errata 814220: DSB required when changing cache levels when using set/way operations. This also counts as our end-of-maintenance DSB.
Jeffrey Lee's avatar
Jeffrey Lee committed
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
40 ; Skip
        ADD     r8, r8, #2
        CMP     r3, r8
        BGT     %BT10
50 ; Finished
        MEND

Cache_CleanAll_WB_CR7_Lx ROUT
; Clean cache by traversing all sets and ways for all data caches
        Push    "r1-r8,lr"
        MaintainDataCache_WB_CR7_Lx clean, loc
        Pull    "r1-r8,pc"
2892 2893 2894 2895 2896 2897


Cache_CleanInvalidateAll_WB_CR7_Lx ROUT
;
; similar to Cache_CleanAll, but does clean&invalidate of Dcache, and invalidates ICache
;
Jeffrey Lee's avatar
Jeffrey Lee committed
2898 2899
        Push    "r1-r8,lr"
        MaintainDataCache_WB_CR7_Lx cleaninvalidate, loc
2900 2901 2902
        ICIALLU                       ; invalidate ICache + branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
Jeffrey Lee's avatar
Jeffrey Lee committed
2903
        Pull    "r1-r8,pc"
2904 2905 2906 2907 2908 2909


Cache_InvalidateAll_WB_CR7_Lx ROUT
;
; no clean, assume caller knows what's happening
;
Jeffrey Lee's avatar
Jeffrey Lee committed
2910
        Push    "r1-r8,lr"
2911
        MaintainDataCache_WB_CR7_Lx invalidate, loc
2912 2913 2914
        ICIALLU                       ; invalidate ICache + branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
Jeffrey Lee's avatar
Jeffrey Lee committed
2915
        Pull    "r1-r8,pc"
2916 2917 2918


Cache_RangeThreshold_WB_CR7_Lx ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
2919
        LDR     a1, =ZeroPage
2920 2921 2922 2923
        LDR     a1, [a1, #DCache_RangeThreshold]
        MOV     pc, lr


2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
; In:  r1 = cache level (0-based)
; Out: r0 = Flags
;           bits 0-2: cache type:
;              000 -> none
;              001 -> instruction
;              010 -> data
;              011 -> split
;              100 -> unified
;              1xx -> reserved
;           Other bits: reserved
;      r1 = D line length
;      r2 = D size
;      r3 = I line length
;      r4 = I size
;      r0-r4 = zero if cache level not present
Cache_Examine_WB_CR7_Lx ROUT
        Entry   "r5"
        LDR     r5, =ZeroPage
        LDR     r0, [r5, #Cache_Lx_Info]!
        ADD     r5, r5, #Cache_Lx_DTable-Cache_Lx_Info
        BIC     r0, r0, #&00E00000
        ; Shift the CLIDR until we hit a zero entry or the desired level
        ; (could shift by exactly the amount we want... but ARM say not to do
        ; that since they may decide to re-use bits)
10
        TEQ     r1, #0
        TSTNE   r0, #7
        SUBNE   r1, r1, #1
        MOVNE   r0, r0, LSR #3
        ADDNE   r5, r5, #4
        BNE     %BT10
        ANDS    r0, r0, #7
        MOV     r1, #0
        MOV     r2, #0
        MOV     r3, #0
        MOV     r4, #0
        EXIT    EQ
        TST     r0, #6 ; Data or unified cache present?
        BEQ     %FT20
        LDR     lr, [r5]
        LDR     r1, =CCSIDR_NumSets_mask:SHR:CCSIDR_NumSets_pos
        LDR     r2, =CCSIDR_Associativity_mask:SHR:CCSIDR_Associativity_pos
        AND     r1, r1, lr, LSR #CCSIDR_NumSets_pos
        AND     r2, r2, lr, LSR #CCSIDR_Associativity_pos
        ADD     r1, r1, #1
        ADD     r2, r2, #1
        MUL     r2, r1, r2
        AND     r1, lr, #CCSIDR_LineSize_mask
        ASSERT  CCSIDR_LineSize_pos = 0
        MOV     lr, #16
        MOV     r1, lr, LSL r1
        MUL     r2, r1, r2
20
        TEQ     r0, #4 ; Unified cache?
        MOVEQ   r3, r1
        MOVEQ   r4, r2
        TST     r0, #1 ; Instruction cache present?
        EXIT    EQ
        LDR     lr, [r5, #Cache_Lx_ITable-Cache_Lx_DTable]
        LDR     r3, =CCSIDR_NumSets_mask:SHR:CCSIDR_NumSets_pos
        LDR     r4, =CCSIDR_Associativity_mask:SHR:CCSIDR_Associativity_pos
        AND     r3, r3, lr, LSR #CCSIDR_NumSets_pos
        AND     r4, r4, lr, LSR #CCSIDR_Associativity_pos
        ADD     r3, r3, #1
        ADD     r4, r4, #1
        MUL     r4, r3, r4
        AND     r3, lr, #CCSIDR_LineSize_mask
        ASSERT  CCSIDR_LineSize_pos = 0
        MOV     lr, #16
        MOV     r3, lr, LSL r3
        MUL     r4, r3, r4
        EXIT


2998
MMU_ChangingUncached_WB_CR7_Lx
2999 3000
        DSB            ; Ensure the page table write has actually completed
        ISB            ; Also required
3001
TLB_InvalidateAll_WB_CR7_Lx ROUT
3002 3003 3004 3005
        TLBIALL                       ; invalidate ITLB and DTLB
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3006 3007 3008 3009 3010 3011
        MOV     pc, lr


; a1 = page affected (page aligned address)
;
MMU_ChangingUncachedEntry_WB_CR7_Lx
3012 3013
        DSB
        ISB
3014
TLB_InvalidateEntry_WB_CR7_Lx ROUT
3015 3016 3017 3018
        TLBIMVA a1                    ; invalidate ITLB & DTLB entry
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3019 3020 3021 3022 3023
        MOV     pc, lr


IMB_Full_WB_CR7_Lx ROUT
;
3024
; do: clean DCache; drain WBuffer, invalidate ICache/branch predictor
3025 3026
; Luckily, we only need to clean as far as the level of unification
;
Jeffrey Lee's avatar
Jeffrey Lee committed
3027 3028
        Push    "r1-r8,lr"
        MaintainDataCache_WB_CR7_Lx clean, lou
3029 3030 3031
        ICIALLU                       ; invalidate ICache
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
Jeffrey Lee's avatar
Jeffrey Lee committed
3032
        Pull    "r1-r8,pc"
3033 3034 3035 3036 3037 3038 3039

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
IMB_Range_WB_CR7_Lx ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024 ; Maximum L1 cache size on Cortex-A8 is 32K, use that to guess what approach to take
3040 3041
        ADD     a2, a2, a1
        BHS     IMB_Full_WB_CR7_Lx
3042
        Push    "a1,lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
3043
        LDR     lr, =ZeroPage
3044
        LDRB    lr, [lr, #DCache_LineLen]
3045
10
3046
        DCCMVAU a1                    ; clean DCache entry by VA to PoU
3047 3048 3049
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
3050
        DSB          ; Wait for clean to complete
3051
        Pull    "a1" ; Get start address back
Jeffrey Lee's avatar
Jeffrey Lee committed
3052
        LDR     lr, =ZeroPage
3053
        LDRB    lr, [lr, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
3054
10
3055
        ICIMVAU a1                    ; invalidate ICache entry
3056 3057 3058
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
3059 3060 3061
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3062
        Pull    "pc"
3063

3064 3065 3066 3067 3068 3069 3070
;  a1 = pointer to list of (start, end) address pairs
;  a2 = pointer to end of list
;  a3 = total amount of memory to be synchronised
;
IMB_List_WB_CR7_Lx ROUT
        CMP     a3, #32*1024 ; Maximum L1 cache size on Cortex-A8 is 32K, use that to guess what approach to take
        BHS     IMB_Full_WB_CR7_Lx
3071
        Push    "a1,v1-v2,lr"
3072
        LDR     lr, =ZeroPage
3073
        LDRB    lr, [lr, #DCache_LineLen]
3074 3075 3076
05
        LDMIA   a1!, {v1-v2}
10
3077
        DCCMVAU v1                    ; clean DCache entry by VA to PoU
3078 3079 3080 3081 3082
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
3083
        DSB          ; Wait for clean to complete
3084 3085
        Pull    "a1" ; Get start address back
        LDR     lr, =ZeroPage
3086
        LDRB    lr, [lr, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
3087 3088 3089
05
        LDMIA   a1!, {v1-v2}
10
3090
        ICIMVAU v1                    ; invalidate ICache entry
3091 3092 3093 3094 3095
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
3096 3097 3098
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3099
        Pull    "v1-v2,pc"
3100

3101
MMU_Changing_WB_CR7_Lx ROUT
3102 3103
        DSB                           ; Ensure the page table write has actually completed
        ISB                           ; Also required
3104 3105 3106
        TLBIALL                       ; invalidate ITLB and DTLB
        DSB                           ; Wait for TLB invalidation to complete
        ISB                           ; Ensure that the effects are visible
3107
        B       Cache_CleanInvalidateAll_WB_CR7_Lx
3108 3109 3110 3111 3112

; a1 = page affected (page aligned address)
;
MMU_ChangingEntry_WB_CR7_Lx ROUT
        Push    "a2, lr"
3113 3114 3115 3116 3117
        DSB                           ; Ensure the page table write has actually completed
        ISB                           ; Also required
        TLBIMVA a1                    ; invalidate DTLB and ITLB
        DSB                           ; Wait for TLB invalidation to complete
        ISB                           ; Ensure that the effects are visible
Jeffrey Lee's avatar
Jeffrey Lee committed
3118
        LDR     lr, =ZeroPage
3119
        LDRB    lr, [lr, #DCache_LineLen]
3120 3121
        ADD     a2, a1, #PageSize
10
3122
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
3123 3124
        ADD     a1, a1, lr
        CMP     a1, a2
3125
        BNE     %BT10
3126
        DSB     ; Wait for clean to complete
Jeffrey Lee's avatar
Jeffrey Lee committed
3127
        LDR     lr, =ZeroPage
3128 3129
        LDRB    lr, [lr, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
        SUB     a1, a2, #PageSize     ; Get start address back
3130
10
3131
        ICIMVAU a1                    ; invalidate ICache entry to PoU
3132 3133 3134
        ADD     a1, a1, lr
        CMP     a1, a2
        BNE     %BT10
3135 3136 3137
        BPIALL                        ; invalidate branch predictors
        DSB
        ISB
3138 3139 3140 3141 3142 3143 3144
        Pull    "a2, pc"

; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingEntries_WB_CR7_Lx ROUT
        Push    "a2, a3, lr"
3145 3146
        DSB     ; Ensure the page table write has actually completed
        ISB     ; Also required
3147
        MOV     a2, a2, LSL #Log2PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
3148 3149
        LDR     lr, =ZeroPage
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
3150
        CMP     a2, a3
3151
        BHS     %FT90
3152
        ADD     a2, a2, a1                         ;clean end address (exclusive)
3153
        LDRB    a3, [lr, #DCache_LineLen]
3154 3155
        MOV     lr, a1
10
3156 3157 3158 3159 3160 3161 3162 3163
        TLBIMVA a1                    ; invalidate DTLB & ITLB entry
        ADD     a1, a1, #PageSize
        CMP     a1, a2
        BNE     %BT10
        DSB
        ISB
        MOV     a1, lr                ; Get start address back
20
3164
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
3165 3166
        ADD     a1, a1, a3
        CMP     a1, a2
3167
        BNE     %BT20
3168
        DSB     ; Wait for clean to complete
Jeffrey Lee's avatar
Jeffrey Lee committed
3169
        LDR     a3, =ZeroPage
3170
        LDRB    a3, [a3, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
3171 3172
        MOV     a1, lr                ; Get start address back
30
3173
        ICIMVAU a1                    ; invalidate ICache entry to PoU
3174
        ADD     a1, a1, a3
3175
        CMP     a1, a2
3176
        BNE     %BT30
3177 3178 3179
        BPIALL                        ; invalidate branch predictors
        DSB
        ISB
3180 3181
        Pull    "a2, a3, pc"
;
3182
90
3183 3184 3185
        TLBIALL                       ; invalidate ITLB and DTLB
        DSB                           ; Wait TLB invalidation to complete
        ISB                           ; Ensure that the effects are visible
3186
        BL      Cache_CleanInvalidateAll_WB_CR7_Lx
3187 3188
        Pull    "a2, a3, pc"

3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_CleanRange_WB_CR7_Lx ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
3200
        LDRB    a3, [lr, #DCache_LineLen]
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
        MOV     lr, a1
10
        DCCMVAC a1                    ; clean DCache entry to PoC
        ADD     a1, a1, a3
        CMP     a1, a2
        BNE     %BT10
        DSB     ; Wait for clean to complete
        ISB
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanAll_WB_CR7_Lx

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_InvalidateRange_WB_CR7_Lx ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3, LSL #1                     ;assume clean+invalidate slower than just invalidate
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
3226
        LDRB    a3, [lr, #DCache_LineLen]
3227 3228 3229 3230 3231
        MOV     lr, a1
10
        DCIMVAC a1                    ; invalidate DCache entry to PoC
        ADD     a1, a1, a3
        CMP     a1, a2
3232
        BLO     %BT10
3233
        LDR     a3, =ZeroPage
3234
        LDRB    a3, [a3, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
3235 3236 3237 3238 3239
        MOV     a1, lr ; Get start address back
10
        ICIMVAU a1                    ; invalidate ICache entry to PoU
        ADD     a1, a1, a3
        CMP     a1, a2
3240
        BLO     %BT10
3241 3242 3243 3244 3245 3246 3247 3248 3249
        BPIALL                        ; invalidate branch predictors
        DSB
        ISB
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_CR7_Lx

Jeffrey Lee's avatar
Jeffrey Lee committed
3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_CleanInvalidateRange_WB_CR7_Lx ROUT
        Push    "a2, a3, lr"
        LDR     lr, =ZeroPage
        SUB     a2, a2, a1
        LDR     a3, [lr, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     a2, a3
        BHS     %FT30
        ADD     a2, a2, a1                         ;clean end address (exclusive)
3261
        LDRB    a3, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3262 3263
        MOV     lr, a1
10
3264
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
Jeffrey Lee's avatar
Jeffrey Lee committed
3265 3266
        ADD     a1, a1, a3
        CMP     a1, a2
3267
        BLO     %BT10
3268
        DSB     ; Wait for clean to complete
Jeffrey Lee's avatar
Jeffrey Lee committed
3269
        LDR     a3, =ZeroPage
3270
        LDRB    a3, [a3, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
Jeffrey Lee's avatar
Jeffrey Lee committed
3271 3272
        MOV     a1, lr ; Get start address back
10
3273
        ICIMVAU a1                    ; invalidate ICache entry to PoU
Jeffrey Lee's avatar
Jeffrey Lee committed
3274 3275
        ADD     a1, a1, a3
        CMP     a1, a2
3276
        BLO     %BT10
3277 3278 3279
        BPIALL                        ; invalidate branch predictors
        DSB
        ISB
Jeffrey Lee's avatar
Jeffrey Lee committed
3280 3281 3282 3283 3284 3285
        Pull    "a2, a3, pc"
;
30
        Pull    "a2, a3, lr"
        B       Cache_CleanInvalidateAll_WB_CR7_Lx

3286 3287 3288 3289
; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingUncachedEntries_WB_CR7_Lx ROUT
3290
        Push    "a2,lr"
3291 3292 3293
        DSB     ; Ensure the page table write has actually completed
        ISB     ; Also required
        CMP     a2, #32               ; arbitrary-ish threshold
Ben Avison's avatar
Ben Avison committed
3294
        BLO     %FT10
3295 3296
        TLBIALL                       ; invalidate ITLB and DTLB
        B       %FT20
3297
10
3298
        TLBIMVA a1                    ; invalidate DTLB & ITLB entry
3299 3300 3301 3302
        ADD     a1, a1, #PageSize
        SUBS    a2, a2, #1
        BNE     %BT10
20
3303 3304 3305
        BPIALL                        ; invalidate branch predictors
        DSB
        ISB
3306
        Pull    "a2,pc"
3307

3308 3309 3310 3311 3312 3313 3314 3315
;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
ICache_InvalidateRange_WB_CR7_Lx ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024 ; Maximum L1 cache size on Cortex-A8 is 32K, use that to guess what approach to take
        ADD     a2, a2, a1
        BHS     ICache_InvalidateAll_WB_CR7_Lx
3316
        Push    "lr"
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #ICache_LineLen]
10
        ICIMVAU a1                    ; invalidate ICache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3327
        Pull    "pc"
3328 3329 3330 3331 3332 3333 3334 3335

ICache_InvalidateAll_WB_CR7_Lx ROUT
        ICIALLU                       ; invalidate ICache
        BPIALL                        ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
        MOV     pc, lr

Jeffrey Lee's avatar
Jeffrey Lee committed
3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
        LTORG

   [ SMP
; --------------------------------------------------------------------------
; ----- ARMops for ARMv7+ with multiprocessing extensions ------------------
; --------------------------------------------------------------------------

; These are MP-safe versions of the standard ARMv7 ARMops (WB_CR7_Lx).
; Where possible they use maintenance ops that broadcast to the other cores in
; the system.

Cache_CleanAll_ARMv7MP * Cache_CleanAll_WB_CR7_Lx ; DOES NOT BROADCAST
Cache_CleanInvalidateAll_ARMv7MP * Cache_CleanInvalidateAll_WB_CR7_Lx ; DOES NOT BROADCAST
Cache_InvalidateAll_ARMv7MP * Cache_InvalidateAll_WB_CR7_Lx ; DOES NOT BROADCAST
Cache_Examine_ARMv7MP * Cache_Examine_WB_CR7_Lx

3352 3353 3354
        ; Call appropriate ranged/full ICache invalidate code
        ; $type = ARMop type suffix
        ; $start = reg containing start addr
3355
        ; a2 = end addr
3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
        ; $pull = stacked registers (except lr, which must be stacked)
        MACRO
        ICacheInvalidate $type, $start, $pull
        ASSERT  "$start" <> "a2"
      [ "$start" = "lr"
        MOV     a1, lr
      ]
        SUB     lr, a2, $start
        CMP     lr, #32*1024
      [ "$start" <> "lr" :LAND: "$start" <> "a1"
        MOVLO   a1, $start
      ]
3368 3369 3370 3371 3372 3373 3374 3375
     [ "$pull" <> ""
        ; If $pull contains a1 or a2 then we'll have to postpone pulling them until after the ranged call. But there's no straightforward way of checking for that, so we'll take the easy way out and always postpone pulling for ranged calls.
        Pull    "$pull,lr",HS
        BHS     ICache_InvalidateAll_$type
        Push    "pc"
        B       ICache_InvalidateRange_$type._alt
        Pull    "$pull,pc"
     |
3376 3377 3378
        BLO     ICache_InvalidateRange_$type._alt
        Pull    "lr"
        B       ICache_InvalidateAll_$type
3379
     ]
3380 3381
        MEND

Jeffrey Lee's avatar
Jeffrey Lee committed
3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
Cache_RangeThreshold_ARMv7MP ROUT
        MVN     a1, #0                ; Claim 4G-1 to discourage global ops (since they don't broadcast)
        MOV     pc, lr

MMU_ChangingUncached_ARMv7MP
        DSB            ; Ensure the page table write has actually completed
        ISB            ; Also required
TLB_InvalidateAll_ARMv7MP ROUT
        TLBIALLIS                     ; invalidate ITLB and DTLB
        BPIALLIS                      ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
        MOV     pc, lr

; a1 = page affected (page aligned address)
;
MMU_ChangingUncachedEntry_ARMv7MP
        DSB
        ISB
TLB_InvalidateEntry_ARMv7MP
        TLBIMVAAIS a1                 ; invalidate ITLB & DTLB entry
        BPIALLIS                      ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
        MOV     pc, lr


IMB_Full_ARMv7MP ROUT ; Only cleans local DCache
;
; do: clean DCache; drain WBuffer, invalidate ICache/branch predictor
; Luckily, we only need to clean as far as the level of unification
;
        Push    "r1-r8,lr"
        MaintainDataCache_WB_CR7_Lx clean, lou
        ICIALLUIS                     ; invalidate ICache
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
        Pull    "r1-r8,pc"

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
IMB_Range_ARMv7MP ROUT
3425
        Push    "a1,lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
3426
        LDR     lr, =ZeroPage
3427
        LDRB    lr, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3428 3429 3430 3431 3432 3433 3434
10
        DCCMVAU a1                    ; clean DCache entry by VA to PoU
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        DSB          ; Wait for clean to complete
        Pull    "a1" ; Get start address back
3435
        ICacheInvalidate ARMv7MP, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3436 3437 3438 3439 3440 3441

;  a1 = pointer to list of (start, end) address pairs
;  a2 = pointer to end of list
;  a3 = total amount of memory to be synchronised
;
IMB_List_ARMv7MP ROUT
3442
        Push    "a1,v1-v2,lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
3443
        LDR     lr, =ZeroPage
3444
        LDRB    lr, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
05
        LDMIA   a1!, {v1-v2}
10
        DCCMVAU v1                    ; clean DCache entry by VA to PoU
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
        DSB          ; Wait for clean to complete
        Pull    "a1" ; Get start address back
3456 3457 3458
        CMP     a3, #32*1024          ; see if global ICache invalidate sensible
        Pull    "v1-v2,lr",HS
        BHS     ICache_InvalidateAll_ARMv7MP
Jeffrey Lee's avatar
Jeffrey Lee committed
3459
        LDR     lr, =ZeroPage
3460
        LDRB    lr, [lr, #ICache_LineLen] ; Use ICache line length, on some CPUs I&D will differ
Jeffrey Lee's avatar
Jeffrey Lee committed
3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
05
        LDMIA   a1!, {v1-v2}
10
        ICIMVAU v1                    ; invalidate ICache entry
        ADD     v1, v1, lr
        CMP     v1, v2
        BLO     %BT10
        CMP     a1, a2
        BNE     %BT05
        BPIALLIS                      ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3473
        Pull    "v1-v2,pc"
Jeffrey Lee's avatar
Jeffrey Lee committed
3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492

MMU_Changing_ARMv7MP ROUT ; Only cleans local caches
        DSB                           ; Ensure the page table write has actually completed
        ISB                           ; Also required
        TLBIALLIS                     ; invalidate ITLB and DTLB
        DSB                           ; Wait for TLB invalidation to complete
        ISB                           ; Ensure that the effects are visible
        B       Cache_CleanInvalidateAll_ARMv7MP

; a1 = page affected (page aligned address)
;
MMU_ChangingEntry_ARMv7MP ROUT
        Push    "a2, lr"
        DSB                           ; Ensure the page table write has actually completed
        ISB                           ; Also required
        TLBIMVAAIS a1                 ; invalidate DTLB and ITLB
        DSB                           ; Wait for TLB invalidation to complete
        ISB                           ; Ensure that the effects are visible
        LDR     lr, =ZeroPage
3493
        LDRB    lr, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3494 3495 3496 3497 3498 3499 3500
        ADD     a2, a1, #PageSize
10
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
        ADD     a1, a1, lr
        CMP     a1, a2
        BNE     %BT10
        DSB     ; Wait for clean to complete
3501
        SUB     a1, a2, #PageSize     ; Get start address back
3502 3503 3504
        Push    "pc"
        B       ICache_InvalidateRange_ARMv7MP_alt ; Skip the range check, we know 4K is small enough to want a ranged clean
        Pull    "a2, pc"
Jeffrey Lee's avatar
Jeffrey Lee committed
3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515

; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingEntries_ARMv7MP ROUT
        Push    "a2, a3, lr"
        DSB     ; Ensure the page table write has actually completed
        ISB     ; Also required
        MOV     a2, a2, LSL #Log2PageSize
        LDR     lr, =ZeroPage
        ADD     a2, a2, a1                         ;clean end address (exclusive)
3516
        LDRB    a3, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531
        MOV     lr, a1
10
        TLBIMVAAIS a1                 ; invalidate DTLB & ITLB entry
        ADD     a1, a1, #PageSize
        CMP     a1, a2
        BNE     %BT10
        DSB
        ISB
        MOV     a1, lr                ; Get start address back
20
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
        ADD     a1, a1, a3
        CMP     a1, a2
        BNE     %BT20
        DSB     ; Wait for clean to complete
3532
        ICacheInvalidate ARMv7MP, lr, "a2,a3"
Jeffrey Lee's avatar
Jeffrey Lee committed
3533 3534 3535 3536 3537

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_CleanRange_ARMv7MP ROUT
3538
        Push    "lr"
Jeffrey Lee's avatar
Jeffrey Lee committed
3539
        LDR     lr, =ZeroPage
3540
        LDRB    lr, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3541 3542
10
        DCCMVAC a1                    ; clean DCache entry to PoC
3543
        ADD     a1, a1, lr
Jeffrey Lee's avatar
Jeffrey Lee committed
3544 3545 3546 3547
        CMP     a1, a2
        BNE     %BT10
        DSB     ; Wait for clean to complete
        ISB
3548
        Pull    "pc"
Jeffrey Lee's avatar
Jeffrey Lee committed
3549 3550 3551 3552 3553 3554 3555

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_InvalidateRange_ARMv7MP ROUT
        Push    "a3, lr"
        LDR     lr, =ZeroPage
3556
        LDRB    a3, [lr, #DCache_LineLen]
Jeffrey Lee's avatar
Jeffrey Lee committed
3557 3558 3559 3560 3561 3562
        MOV     lr, a1
10
        DCIMVAC a1                    ; invalidate DCache entry to PoC
        ADD     a1, a1, a3
        CMP     a1, a2
        BNE     %BT10
3563
        ICacheInvalidate ARMv7MP, lr, "a3"
Jeffrey Lee's avatar
Jeffrey Lee committed
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
Cache_CleanInvalidateRange_ARMv7MP ROUT
        Push    "a3, lr"
        LDR     lr, =ZeroPage
        LDRB    a3, [lr, #DCache_LineLen] ; log2(line len)-2
        MOV     lr, a1
10
        DCCIMVAC a1                   ; clean&invalidate DCache entry to PoC
        ADD     a1, a1, a3
        CMP     a1, a2
        BNE     %BT10
        DSB     ; Wait for clean to complete
3579
        ICacheInvalidate ARMv7MP, lr, "a3"
Jeffrey Lee's avatar
Jeffrey Lee committed
3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610

; a1 = first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingUncachedEntries_ARMv7MP ROUT
        Push    "a2,lr"
        DSB     ; Ensure the page table write has actually completed
        ISB     ; Also required
        CMP     a2, #32               ; arbitrary-ish threshold
        BLO     %FT10
        TLBIALLIS                     ; invalidate ITLB and DTLB
        B       %FT20
10
        TLBIMVAAIS a1                 ; invalidate DTLB & ITLB entry
        ADD     a1, a1, #PageSize
        SUBS    a2, a2, #1
        BNE     %BT10
20
        BPIALLIS                      ; invalidate branch predictors
        DSB
        ISB
        Pull    "a2,pc"

;  a1 = start address (inclusive, cache line aligned)
;  a2 = end address (exclusive, cache line aligned)
;
ICache_InvalidateRange_ARMv7MP ROUT
        SUB     a2, a2, a1
        CMP     a2, #32*1024 ; Maximum L1 cache size on Cortex-A8 is 32K, use that to guess what approach to take
        ADD     a2, a2, a1
        BHS     ICache_InvalidateAll_ARMv7MP
3611 3612
        Push    "lr"
ICache_InvalidateRange_ARMv7MP_alt
Jeffrey Lee's avatar
Jeffrey Lee committed
3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
        LDR     lr, =ZeroPage
        LDRB    lr, [lr, #ICache_LineLen]
10
        ICIMVAU a1                    ; invalidate ICache entry
        ADD     a1, a1, lr
        CMP     a1, a2
        BLO     %BT10
        BPIALLIS                      ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
3623
        Pull    "pc"
Jeffrey Lee's avatar
Jeffrey Lee committed
3624 3625 3626 3627 3628 3629 3630 3631 3632 3633

ICache_InvalidateAll_ARMv7MP ROUT
        ICIALLUIS                     ; invalidate ICache
        BPIALLIS                      ; invalidate branch predictors
        DSB                           ; Wait for cache/branch invalidation to complete
        ISB                           ; Ensure that the effects of the completed cache/branch invalidation are visible
        MOV     pc, lr

   ] ; SMP

3634 3635 3636 3637 3638 3639 3640 3641
; --------------------------------------------------------------------------
; ----- ARMops for PL310 L2 cache controller--------------------------------
; --------------------------------------------------------------------------

; These are a hybrid of the standard ARMv7 ARMops (WB_CR7_Lx) and the PL310
; cache maintenance ops. Currently they're only used on Cortex-A9 systems, so
; may need modifications to work with other systems.
; Specifically, the code assumes the PL310 is being used in non-exclusive mode.
Jeffrey Lee's avatar
Jeffrey Lee committed
3642 3643 3644
;
; To make the code fully re-entrant and MP-safe, we avoid using the background
; operations (INV_WAY, CLEAN_WAY, CLEAN_INV_WAY).
3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657

        MACRO
        PL310Sync $regs, $temp
        ; Errata 753970 requires us to write to a different location when
        ; performing a sync operation for r3p0
        LDR     $temp, [$regs, #PL310_REG0_CACHE_ID]
        AND     $temp, $temp, #&3f
        TEQ     $temp, #PL310_R3P0
        MOV     $temp, #0
        STREQ   $temp, [$regs, #PL310_REG7_CACHE_SYNC_753970]
        STRNE   $temp, [$regs, #PL310_REG7_CACHE_SYNC]
        MEND

Jeffrey Lee's avatar
Jeffrey Lee committed
3658
      [ :LNOT: SMP
Jeffrey Lee's avatar
Jeffrey Lee committed
3659
PL310Threshold * 1024*1024 ; Arbitrary threshold for full clean
Jeffrey Lee's avatar
Jeffrey Lee committed
3660
      ]
Jeffrey Lee's avatar
Jeffrey Lee committed
3661

3662 3663
Cache_CleanInvalidateAll_PL310 ROUT
        ; Errata 727915 workaround - use CLEAN_INV_INDEX instead of CLEAN_INV_WAY
Jeffrey Lee's avatar
Jeffrey Lee committed
3664
        ; Also, CLEAN_INV_WAY is a background op, while CLEAN_INV_INDEX is atomic.
3665 3666 3667 3668 3669
        Entry   "a2-a4"
        LDR     a2, =ZeroPage
        LDR     a2, [a2, #Cache_HALDevice]
        LDR     a2, [a2, #HALDevice_Address]
        ; Clean ARM caches
Jeffrey Lee's avatar
Jeffrey Lee committed
3670 3671 3672
      [ SMP
        BL      Cache_CleanAll_ARMv7MP
      |
3673
        BL      Cache_CleanAll_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3674
      ]
3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
        ; Determine PL310 way, index count
        LDR     a1, [a2, #PL310_REG1_AUX_CONTROL]
        AND     a3, a1, #1<<16
        AND     a1, a1, #7<<17
        MOV     a3, a3, LSL #15
        MOV     a1, a1, LSR #17
        LDR     a4, =&FF<<5
        ORR     a3, a3, #7<<28          ; a3 = max way number (inclusive)
        ORR     a4, a4, a4, LSL a1      ; a4 = max index number (inclusive)
10
        ORR     a1, a3, a4
20
        STR     a1, [a2, #PL310_REG7_CLEAN_INV_INDEX]
        SUBS    a1, a1, #1<<28          ; next way
        BCS     %BT20                   ; underflow?
        SUBS    a4, a4, #1<<5           ; next index
        BGE     %BT10
Jeffrey Lee's avatar
Jeffrey Lee committed
3692 3693
        ; Ensure the ops are actually complete
        DSB
3694 3695
        ; Clean & invalidate ARM caches
        PullEnv
Jeffrey Lee's avatar
Jeffrey Lee committed
3696 3697 3698
      [ SMP
        B       Cache_CleanInvalidateAll_ARMv7MP
      |
3699
        B       Cache_CleanInvalidateAll_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3700
      ]
3701 3702

Cache_CleanAll_PL310 ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
3703
        Entry   "a2-a4"
3704 3705 3706 3707
        LDR     a2, =ZeroPage
        LDR     a2, [a2, #Cache_HALDevice]
        LDR     a2, [a2, #HALDevice_Address]
        ; Clean ARM caches
Jeffrey Lee's avatar
Jeffrey Lee committed
3708 3709 3710
      [ SMP
        BL      Cache_CleanAll_ARMv7MP
      |
3711
        BL      Cache_CleanAll_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3712
      ]
Jeffrey Lee's avatar
Jeffrey Lee committed
3713
        ; Determine PL310 way, index count
3714
        LDR     a1, [a2, #PL310_REG1_AUX_CONTROL]
Jeffrey Lee's avatar
Jeffrey Lee committed
3715 3716 3717 3718 3719 3720 3721
        AND     a3, a1, #1<<16
        AND     a1, a1, #7<<17
        MOV     a3, a3, LSL #15
        MOV     a1, a1, LSR #17
        LDR     a4, =&FF<<5
        ORR     a3, a3, #7<<28          ; a3 = max way number (inclusive)
        ORR     a4, a4, a4, LSL a1      ; a4 = max index number (inclusive)
3722
10
Jeffrey Lee's avatar
Jeffrey Lee committed
3723 3724 3725 3726 3727 3728 3729 3730 3731
        ORR     a1, a3, a4
20
        STR     a1, [a2, #PL310_REG7_CLEAN_INDEX]
        SUBS    a1, a1, #1<<28          ; next way
        BCS     %BT20                   ; underflow?
        SUBS    a4, a4, #1<<5           ; next index
        BGE     %BT10
        ; Ensure the ops are actually complete
        DSB
3732 3733
        EXIT

Jeffrey Lee's avatar
Jeffrey Lee committed
3734 3735
; This op will be rarely (if ever) used, just implement as clean + invalidate
Cache_InvalidateAll_PL310 * Cache_CleanInvalidateAll_PL310
3736

Jeffrey Lee's avatar
Jeffrey Lee committed
3737 3738 3739
      [ SMP
Cache_RangeThreshold_PL310 * Cache_RangeThreshold_ARMv7MP
      |
3740
Cache_RangeThreshold_PL310 ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
3741
        MOV     a1, #PL310Threshold
3742
        MOV     pc, lr
Jeffrey Lee's avatar
Jeffrey Lee committed
3743
      ]
3744

3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
Cache_Examine_PL310 ROUT
        ; Assume that the PL310 is the level 2 cache
        CMP     r1, #1
        BLT     Cache_Examine_WB_CR7_Lx
        MOVGT   r0, #0
        MOVGT   r1, #0
        MOVGT   r2, #0
        MOVGT   r3, #0
        MOVGT   r4, #0
        MOVGT   pc, lr
        LDR     r0, =ZeroPage
        LDR     r0, [r0, #Cache_HALDevice]
        LDR     r0, [r0, #HALDevice_Address]
        LDR     r0, [r0, #PL310_REG1_AUX_CONTROL]
        AND     r2, r0, #&E0000 ; Get way size
        TST     r0, #1:SHL:16 ; Check associativity
        MOV     r2, r2, LSR #17
        MOVEQ   r1, #8*1024*8 ; 8KB base way size with 8 way associativity
        MOVNE   r1, #8*1024*16 ; 8KB base way size with 16 way associativity
        MOV     r2, r1, LSL r2
        ; Assume this really is a PL310 (32 byte line size, unified architecture)
        MOV     r0, #4
        MOV     r1, #32
        MOV     r3, #32
        MOV     r4, r2
        MOV     pc, lr

3772 3773 3774 3775 3776 3777
DSB_ReadWrite_PL310 ROUT
        Entry
        LDR     lr, =ZeroPage
        LDR     lr, [lr, #Cache_HALDevice]
        LDR     lr, [lr, #HALDevice_Address]
        ; Drain ARM write buffer
3778
        DSB     SY
3779 3780
        ; Drain PL310 write buffer
        PL310Sync lr, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3781 3782
        ; Ensure the PL310 sync is complete
        DSB     SY
3783 3784 3785 3786 3787 3788 3789 3790
        EXIT

DSB_Write_PL310 ROUT
        Entry
        LDR     lr, =ZeroPage
        LDR     lr, [lr, #Cache_HALDevice]
        LDR     lr, [lr, #HALDevice_Address]
        ; Drain ARM write buffer
3791
        DSB     ST
3792 3793
        ; Drain PL310 write buffer
        PL310Sync lr, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3794 3795
        ; Ensure the PL310 sync is complete
        DSB     ST
3796 3797 3798 3799 3800 3801 3802 3803
        EXIT

DMB_ReadWrite_PL310 ROUT
        Entry
        LDR     lr, =ZeroPage
        LDR     lr, [lr, #Cache_HALDevice]
        LDR     lr, [lr, #HALDevice_Address]
        ; Drain ARM write buffer
3804
        DMB     SY
3805 3806
        ; Drain PL310 write buffer
        PL310Sync lr, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3807
        ; Ensure the PL310 sync is complete
3808
        DMB     SY
3809 3810 3811
        EXIT

DMB_Write_PL310 ROUT
3812 3813 3814 3815 3816
        Entry
        LDR     lr, =ZeroPage
        LDR     lr, [lr, #Cache_HALDevice]
        LDR     lr, [lr, #HALDevice_Address]
        ; Drain ARM write buffer
3817
        DMB     ST
3818 3819
        ; Drain PL310 write buffer
        PL310Sync lr, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3820 3821
        ; Ensure the PL310 sync is complete
        DMB     ST
3822 3823 3824
        EXIT

MMU_Changing_PL310 ROUT
3825 3826
        DSB     ; Ensure the page table write has actually completed
        ISB     ; Also required
Jeffrey Lee's avatar
Jeffrey Lee committed
3827 3828 3829
      [ SMP
        TLBIALLIS ; invalidate ITLB and DTLB
      |
3830
        TLBIALL ; invalidate ITLB and DTLB
Jeffrey Lee's avatar
Jeffrey Lee committed
3831
      ]
3832 3833
        DSB     ; Wait for TLB invalidation to complete
        ISB     ; Ensure that the effects are visible
3834
        B       Cache_CleanInvalidateAll_PL310
3835

Jeffrey Lee's avatar
Jeffrey Lee committed
3836
; a1 = virtual address of page affected (page aligned address)
3837 3838
;
MMU_ChangingEntry_PL310 ROUT
3839 3840
        Push    "a1-a2,lr"
        ; Do the TLB maintenance
Jeffrey Lee's avatar
Jeffrey Lee committed
3841 3842 3843
      [ SMP
        BL      MMU_ChangingUncachedEntry_ARMv7MP
      |
3844
        BL      MMU_ChangingUncachedEntry_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3845
      ]
3846 3847
        ; Keep the rest simple by just calling through to MMU_ChangingEntries
        MOV     a2, #1
Jeffrey Lee's avatar
Jeffrey Lee committed
3848 3849 3850 3851 3852 3853
        B       %FT10

; a1 = virtual address of first page affected (page aligned address)
; a2 = number of pages
;
MMU_ChangingEntries_PL310
3854 3855
        Push    "a1-a2,lr"
        ; Do the TLB maintenance
Jeffrey Lee's avatar
Jeffrey Lee committed
3856 3857 3858
      [ SMP
        BL      MMU_ChangingUncachedEntries_ARMv7MP
      |
3859
        BL      MMU_ChangingUncachedEntries_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3860
      ]
Jeffrey Lee's avatar
Jeffrey Lee committed
3861
10      ; Arrive here from MMU_ChangingEntry_PL310
3862
        LDR     a1, [sp]
Jeffrey Lee's avatar
Jeffrey Lee committed
3863
        ; Do PL310 clean & invalidate
3864
        ADD     a2, a1, a2, LSL #Log2PageSize
Jeffrey Lee's avatar
Jeffrey Lee committed
3865
        BL      Cache_CleanInvalidateRange_PL310
3866
        Pull    "a1-a2,pc"
Jeffrey Lee's avatar
Jeffrey Lee committed
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878

; a1 = start address (inclusive, cache line aligned)
; a2 = end address (exclusive, cache line aligned)
;
Cache_CleanInvalidateRange_PL310 ROUT
        Entry   "a2-a4,v1"
        ; For simplicity, align to page boundaries
        LDR     a4, =PageSize-1
        ADD     a2, a2, a4
        BIC     a1, a1, a4
        BIC     a3, a2, a4
        SUB     v1, a3, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3879
      [ :LNOT: SMP
Jeffrey Lee's avatar
Jeffrey Lee committed
3880 3881
        CMP     v1, #PL310Threshold
        BHS     %FT90
Jeffrey Lee's avatar
Jeffrey Lee committed
3882
      ]
Jeffrey Lee's avatar
Jeffrey Lee committed
3883 3884
        MOV     a4, a1
        ; Behave in a similar way to the PL310 full clean & invalidate:
3885 3886
        ; * Clean ARM
        ; * Clean & invalidate PL310
Jeffrey Lee's avatar
Jeffrey Lee committed
3887 3888 3889 3890 3891
        ; * Clean & invalidate ARM

        ; a4 = base virtual address
        ; a3 = end virtual address
        ; v1 = length
3892 3893

        ; Clean ARM
Jeffrey Lee's avatar
Jeffrey Lee committed
3894
        LDR     a1, =ZeroPage
Jeffrey Lee's avatar
Jeffrey Lee committed
3895
      [ :LNOT: SMP
Jeffrey Lee's avatar
Jeffrey Lee committed
3896 3897 3898 3899
        LDR     lr, [a1, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     lr, v1
        ADRLE   lr, %FT30
        BLE     Cache_CleanAll_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3900
      ]
Jeffrey Lee's avatar
Jeffrey Lee committed
3901
        ; Clean each page in turn
3902
        LDRB    a2, [a1, #DCache_LineLen]
3903
20
3904
        DCCMVAC a4                      ; clean DCache entry to PoC
Jeffrey Lee's avatar
Jeffrey Lee committed
3905 3906
        ADD     a4, a4, a2
        CMP     a4, a3
3907
        BNE     %BT20
3908
        DSB     ; Wait for clean to complete
Jeffrey Lee's avatar
Jeffrey Lee committed
3909 3910
        SUB     a4, a3, v1

3911
30
Jeffrey Lee's avatar
Jeffrey Lee committed
3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
        ; Clean & invalidate PL310
        LDR     a1, =ZeroPage
        LDR     a2, [a1, #Cache_HALDevice]
        LDR     a2, [a2, #HALDevice_Address]
        ; Clean & invalidate each line/index of the pages
50
        ; Convert logical addr to physical.
        ; Use the ARMv7 CP15 registers for convenience.
        PHPSEI
        MCR     p15, 0, a4, c7, c8, 0   ; ATS1CPR
3922
        ISB
Jeffrey Lee's avatar
Jeffrey Lee committed
3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939
        MRC     p15, 0, a1, c7, c4, 0   ; Get result
        PLP
        TST     a1, #1
        ADD     a4, a4, #PageSize
        BNE     %FT75                   ; Lookup failed - assume this means that the page doesn't need cleaning from the PL310
        ; Point to last line in page, and mask out attributes returned by the
        ; lookup
        ORR     a1, a1, #&FE0
        BIC     a1, a1, #&01F
60
        STR     a1, [a2, #PL310_REG7_CLEAN_INV_PA]
        TST     a1, #&FE0
        SUB     a1, a1, #1<<5           ; next index
        BNE     %BT60
75
        CMP     a4, a3
        BNE     %BT50
3940
        ; Sync
Jeffrey Lee's avatar
Jeffrey Lee committed
3941
        DSB
Jeffrey Lee's avatar
Jeffrey Lee committed
3942 3943 3944
        ; Clean & invalidate ARM
        SUB     a1, a3, v1
        MOV     a2, a3
Jeffrey Lee's avatar
Jeffrey Lee committed
3945 3946 3947
      [ SMP
        BL      Cache_CleanInvalidateRange_ARMv7MP
      |
Jeffrey Lee's avatar
Jeffrey Lee committed
3948
        BL      Cache_CleanInvalidateRange_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3949
      ]
3950
        EXIT
Jeffrey Lee's avatar
Jeffrey Lee committed
3951
      [ :LNOT: SMP
Jeffrey Lee's avatar
Jeffrey Lee committed
3952
90
3953
        ; Full clean required
Jeffrey Lee's avatar
Jeffrey Lee committed
3954 3955
        PullEnv
        B       Cache_CleanInvalidateAll_PL310
Jeffrey Lee's avatar
Jeffrey Lee committed
3956
      ]
3957

3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
; a1 = start address (inclusive, cache line aligned)
; a2 = end address (exclusive, cache line aligned)
;
Cache_CleanRange_PL310 ROUT
        Entry   "a2-a4,v1"
        ; For simplicity, align to page boundaries
        LDR     a4, =PageSize-1
        ADD     a2, a2, a4
        BIC     a1, a1, a4
        BIC     a3, a2, a4
        SUB     v1, a3, a1
Jeffrey Lee's avatar
Jeffrey Lee committed
3969
      [ :LNOT: SMP
3970 3971
        CMP     v1, #PL310Threshold
        BHS     %FT90
Jeffrey Lee's avatar
Jeffrey Lee committed
3972
      ]
3973 3974 3975 3976 3977 3978 3979
        MOV     a4, a1
        ; a4 = base virtual address
        ; a3 = end virtual address
        ; v1 = length

        ; Clean ARM
        LDR     a1, =ZeroPage
Jeffrey Lee's avatar
Jeffrey Lee committed
3980
      [ :LNOT: SMP
3981 3982 3983 3984
        LDR     lr, [a1, #DCache_RangeThreshold]   ;check whether cheaper to do global clean
        CMP     lr, v1
        ADRLE   lr, %FT30
        BLE     Cache_CleanAll_WB_CR7_Lx
Jeffrey Lee's avatar
Jeffrey Lee committed
3985
      ]
3986
        ; Clean each page in turn
3987
        LDRB    a2, [a1, #DCache_LineLen]
3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
20
        DCCMVAC a4                      ; clean DCache entry to PoC
        ADD     a4, a4, a2
        CMP     a4, a3
        BNE     %BT20
        DSB     ; Wait for clean to complete
        SUB     a4, a3, v1

30
        ; Clean PL310
        LDR     a1, =ZeroPage
        LDR     a2, [a1, #Cache_HALDevice]
        LDR     a2, [a2, #HALDevice_Address]
        ; Clean & invalidate each line/index of the pages
50
        ; Convert logical addr to physical.
        ; Use the ARMv7 CP15 registers for convenience.
        PHPSEI
        MCR     p15, 0, a4, c7, c8, 0   ; ATS1CPR
        ISB
        MRC     p15, 0, a1, c7, c4, 0   ; Get result
        PLP
        TST     a1, #1
        ADD     a4, a4, #PageSize
        BNE     %FT75                   ; Lookup failed - assume this means that the page doesn't need cleaning from the PL310
        ; Point to last line in page, and mask out attributes returned by the
        ; lookup
        ORR     a1, a1, #&FE0
        BIC     a1, a1, #&01F
60
        STR     a1, [a2, #PL310_REG7_CLEAN_PA]
        TST     a1, #&FE0
        SUB     a1, a1, #1<<5           ; next index
        BNE     %BT60
75
        CMP     a4, a3
        BNE     %BT50
        ; Sync
Jeffrey Lee's avatar
Jeffrey Lee committed
4026
        DMB
4027
        EXIT
Jeffrey Lee's avatar
Jeffrey Lee committed
4028
      [ :LNOT: SMP
4029 4030 4031 4032
90
        ; Full clean required
        PullEnv
        B       Cache_CleanInvalidateAll_PL310
Jeffrey Lee's avatar
Jeffrey Lee committed
4033
      ]
4034 4035 4036

Cache_InvalidateRange_PL310 * Cache_CleanInvalidateRange_PL310 ; TODO: Need a ranged invalidate implementation that doesn't round to page size

4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
; --------------------------------------------------------------------------
; ----- Generic ARMv6 and ARMv7 barrier operations -------------------------
; --------------------------------------------------------------------------

; Although the ARMv6 barriers are supported on ARMv7, they are deprected, and
; they do give less control than the native ARMv7 barriers. So we prefer to use
; the ARMv7 barriers wherever possible.

DSB_ReadWrite_ARMv6 ROUT
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 4
        MOV     pc, lr

DMB_ReadWrite_ARMv6 ROUT
        MOV     a1, #0
        MCR     p15, 0, a1, c7, c10, 5
        MOV     pc, lr

DSB_ReadWrite_ARMv7 ROUT
        DSB     SY
        MOV     pc, lr

DSB_Write_ARMv7 ROUT
        DSB     ST
        MOV     pc, lr

DMB_ReadWrite_ARMv7 ROUT
        DMB     SY
        MOV     pc, lr

DMB_Write_ARMv7 ROUT
        DMB     ST
        MOV     pc, lr

4071 4072
 ] ; MEMM_Type = "VMSAv6"

Jeffrey Lee's avatar
Jeffrey Lee committed
4073 4074
        LTORG

4075 4076
; --------------------------------------------------------------------------

4077 4078 4079 4080 4081 4082 4083
LookForHALCacheController ROUT
        Entry   "r0-r3,r8,r12"
        ; Look for any known cache controllers that the HAL has registered, and
        ; replace our ARMop routines with the appropriate routines for that
        ; controller
        LDR     r0, =(0:SHL:16)+HALDeviceType_SysPeri+HALDeviceSysPeri_CacheC
        MOV     r1, #0
4084 4085
        LDR     r12, =ZeroPage
        STR     r1, [r12, #Cache_HALDevice] ; In case none found
4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136
10
        MOV     r8, #OSHW_DeviceEnumerate
        SWI     XOS_Hardware
        EXIT    VS
        CMP     r1, #-1
        EXIT    EQ
        ; Do we recognise this controller?
        ASSERT  HALDevice_ID = 2
      [ NoARMv4
        LDR     lr, [r2]
        MOV     lr, lr, LSR #16
      |
        LDRH    lr, [r2, #HALDevice_ID]
      ]
        ADR     r8, KnownHALCaches
20
        LDR     r12, [r8], #8+Proc_MMU_ChangingUncachedEntries-Proc_Cache_CleanInvalidateAll
        CMP     r12, #-1
        BEQ     %BT10
        CMP     lr, r12
        BNE     %BT20
        ; Cache recognised. Disable IRQs for safety, and then try enabling it.
        Push    "r2"
        MOV     r0, r2
        MSR     CPSR_c, #SVC32_mode+I32_bit
        MOV     lr, pc
        LDR     pc, [r2, #HALDevice_Activate]
        CMP     r0, #1
        Pull    "r2"
        MSRNE   CPSR_c, #SVC32_mode
        BNE     %BT10
        ; Cache enabled OK - remember the device pointer and patch our maintenance ops
        LDR     r0, =ZeroPage
        STR     r2, [r0, #Cache_HALDevice]
        ADD     r0, r0, #Proc_Cache_CleanInvalidateAll
        MOV     r1, #Proc_MMU_ChangingUncachedEntries-Proc_Cache_CleanInvalidateAll
30
        LDR     r3, [r8, #-4]!
        TEQ     r3, #0
        STRNE   r3, [r0, r1]
        SUBS    r1, r1, #4
        BGE     %BT30
        ; It's now safe to restore IRQs
        MSR     CPSR_c, #SVC32_mode
        EXIT

KnownHALCaches ROUT
      [ MEMM_Type = "VMSAv6"
        DCD     HALDeviceID_CacheC_PL310
01
        DCD     Cache_CleanInvalidateAll_PL310
Jeffrey Lee's avatar
Jeffrey Lee committed
4137
        DCD     Cache_CleanInvalidateRange_PL310
4138
        DCD     Cache_CleanAll_PL310
4139
        DCD     Cache_CleanRange_PL310
4140
        DCD     Cache_InvalidateAll_PL310
4141
        DCD     Cache_InvalidateRange_PL310
4142
        DCD     Cache_RangeThreshold_PL310
4143
        DCD     Cache_Examine_PL310
4144 4145
        DCD     0 ; ICache_InvalidateAll
        DCD     0 ; ICache_InvalidateRange
4146 4147
        DCD     0 ; TLB_InvalidateAll
        DCD     0 ; TLB_InvalidateEntry
4148 4149 4150 4151 4152 4153
        DCD     DSB_ReadWrite_PL310
        DCD     DSB_Write_PL310
        DCD     0 ; DSB_Read
        DCD     DMB_ReadWrite_PL310
        DCD     DMB_Write_PL310
        DCD     0 ; DMB_Read
4154 4155
        DCD     0 ; IMB_Full
        DCD     0 ; IMB_Range
4156
        DCD     0 ; IMB_List
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
        DCD     MMU_Changing_PL310
        DCD     MMU_ChangingEntry_PL310
        DCD     0 ; MMU_ChangingUncached
        DCD     0 ; MMU_ChangingUncachedEntry
        DCD     MMU_ChangingEntries_PL310
        DCD     0 ; MMU_ChangingUncachedEntries
        ASSERT  . - %BT01 = 4+Proc_MMU_ChangingUncachedEntries-Proc_Cache_CleanInvalidateAll
      ]
        DCD     -1

; --------------------------------------------------------------------------

4169 4170 4171 4172 4173 4174
        MACRO
        ARMopPtr $op
        ASSERT  . - ARMopPtrTable = ARMop_$op * 4
        DCD     ZeroPage + Proc_$op
        MEND

4175 4176
; ARMops exposed by OS_MMUControl 2
ARMopPtrTable
4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
        ARMopPtr Cache_CleanInvalidateAll
        ARMopPtr Cache_CleanAll
        ARMopPtr Cache_InvalidateAll
        ARMopPtr Cache_RangeThreshold
        ARMopPtr TLB_InvalidateAll
        ARMopPtr TLB_InvalidateEntry
        ARMopPtr DSB_ReadWrite
        ARMopPtr IMB_Full
        ARMopPtr IMB_Range
        ARMopPtr IMB_List
        ARMopPtr MMU_Changing
        ARMopPtr MMU_ChangingEntry
        ARMopPtr MMU_ChangingUncached
        ARMopPtr MMU_ChangingUncachedEntry
        ARMopPtr MMU_ChangingEntries
        ARMopPtr MMU_ChangingUncachedEntries
        ARMopPtr DSB_Write
        ARMopPtr DSB_Read
        ARMopPtr DMB_ReadWrite
        ARMopPtr DMB_Write
        ARMopPtr DMB_Read
Jeffrey Lee's avatar
Jeffrey Lee committed
4198
        ARMopPtr Cache_CleanInvalidateRange
4199
 [ {FALSE} ; Not fully tested yet, so keep out of the public API
4200 4201 4202 4203
        ARMopPtr Cache_CleanRange
        ARMopPtr Cache_InvalidateRange
        ARMopPtr ICache_InvalidateAll
        ARMopPtr ICache_InvalidateRange
4204
 ]
4205
ARMopPtrTable_End
4206
        ASSERT ARMopPtrTable_End - ARMopPtrTable = ARMop_Max*4
4207

4208 4209 4210
;        IMPORT  Write0_Translated

ARM_PrintProcessorType
Jeffrey Lee's avatar
Jeffrey Lee committed
4211
        LDR     a1, =ZeroPage
4212 4213 4214 4215 4216
        LDRB    a1, [a1, #ProcessorType]
        TEQ     a1, #ARMunk
        MOVEQ   pc, lr

        Push    "lr"
Kevin Bracey's avatar
Kevin Bracey committed
4217
        ADR     a2, PNameTable
4218 4219
        LDHA    a1, a2, a1, a3
        ADD     a1, a2, a1
Robert Sprowson's avatar
Robert Sprowson committed
4220
      [ International
4221
        BL      Write0_Translated
Robert Sprowson's avatar
Robert Sprowson committed
4222 4223 4224
      |
        SWI     XOS_Write0
      ]
4225 4226 4227 4228
        SWI     XOS_NewLine
        SWI     XOS_NewLine
        Pull    "pc"

Kevin Bracey's avatar
Kevin Bracey committed
4229 4230 4231 4232 4233 4234
PNameTable
        DCW     PName_ARM600    - PNameTable
        DCW     PName_ARM610    - PNameTable
        DCW     PName_ARM700    - PNameTable
        DCW     PName_ARM710    - PNameTable
        DCW     PName_ARM710a   - PNameTable
4235 4236
        DCW     PName_SA110     - PNameTable      ; pre rev T
        DCW     PName_SA110     - PNameTable      ; rev T or later
Kevin Bracey's avatar
Kevin Bracey committed
4237 4238 4239 4240 4241 4242 4243 4244
        DCW     PName_ARM7500   - PNameTable
        DCW     PName_ARM7500FE - PNameTable
        DCW     PName_SA1100    - PNameTable
        DCW     PName_SA1110    - PNameTable
        DCW     PName_ARM720T   - PNameTable
        DCW     PName_ARM920T   - PNameTable
        DCW     PName_ARM922T   - PNameTable
        DCW     PName_X80200    - PNameTable
Kevin Bracey's avatar
Kevin Bracey committed
4245
        DCW     PName_X80321    - PNameTable
4246 4247 4248
        DCW     PName_ARM1176JZF_S - PNameTable
        DCW     PName_Cortex_A5 - PNameTable
        DCW     PName_Cortex_A7 - PNameTable
4249
        DCW     PName_Cortex_A8 - PNameTable
Robert Sprowson's avatar
Robert Sprowson committed
4250
        DCW     PName_Cortex_A9 - PNameTable
4251 4252 4253
        DCW     PName_Cortex_A17 - PNameTable     ; A12 rebranded as A17
        DCW     PName_Cortex_A15 - PNameTable
        DCW     PName_Cortex_A17 - PNameTable
Ben Avison's avatar
Ben Avison committed
4254 4255 4256
        DCW     PName_Cortex_A53 - PNameTable
        DCW     PName_Cortex_A57 - PNameTable
        DCW     PName_Cortex_A72 - PNameTable     ; A58 rebranded as A72
Kevin Bracey's avatar
Kevin Bracey committed
4257 4258

PName_ARM600
4259
        =       "600:ARM 600 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4260
PName_ARM610
4261
        =       "610:ARM 610 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4262
PName_ARM700
4263
        =       "700:ARM 700 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4264
PName_ARM710
4265
        =       "710:ARM 710 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4266
PName_ARM710a
4267
        =       "710a:ARM 710a Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4268
PName_SA110
4269
        =       "SA110:SA-110 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4270
PName_ARM7500
4271
        =       "7500:ARM 7500 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4272
PName_ARM7500FE
4273
        =       "7500FE:ARM 7500FE Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4274
PName_SA1100
4275
        =       "SA1100:SA-1100 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4276
PName_SA1110
4277
        =       "SA1110:SA-1110 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4278
PName_ARM720T
4279
        =       "720T:ARM 720T Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4280
PName_ARM920T
4281
        =       "920T:ARM 920T Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4282
PName_ARM922T
4283
        =       "922T:ARM 922T Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4284
PName_X80200
4285
        =       "X80200:80200 Processor",0
Kevin Bracey's avatar
Kevin Bracey committed
4286 4287
PName_X80321
        =       "X80321:80321 Processor",0
4288 4289
PName_ARM1176JZF_S
        =       "ARM1176JZF_S:ARM1176JZF-S Processor",0
4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
PName_Cortex_A5
        =       "CA5:Cortex-A5 Processor",0
PName_Cortex_A7
        =       "CA7:Cortex-A7 Processor",0
PName_Cortex_A8
        =       "CA8:Cortex-A8 Processor",0
PName_Cortex_A9
        =       "CA9:Cortex-A9 Processor",0
PName_Cortex_A15
        =       "CA15:Cortex-A15 Processor",0
PName_Cortex_A17
        =       "CA17:Cortex-A17 Processor",0
Ben Avison's avatar
Ben Avison committed
4302 4303 4304 4305 4306 4307
PName_Cortex_A53
        =       "CA53:Cortex-A53 Processor",0
PName_Cortex_A57
        =       "CA57:Cortex-A57 Processor",0
PName_Cortex_A72
        =       "CA72:Cortex-A72 Processor",0
4308 4309
        ALIGN

Kevin Bracey's avatar
Kevin Bracey committed
4310 4311 4312 4313

; Lookup tables from DA flags PCB (bits 14:12,5,4, packed down to 4:2,1,0)
; to XCB bits in page table descriptors.

4314
XCB_CB  *       0:SHL:0
Kevin Bracey's avatar
Kevin Bracey committed
4315 4316 4317
XCB_NB  *       1:SHL:0
XCB_NC  *       1:SHL:1
XCB_P   *       1:SHL:2
4318 4319 4320
 [ MEMM_Type = "VMSAv6"
XCB_TU  *       1:SHL:5 ; For VMSAv6, deal with temp uncacheable via the table
 ]
Kevin Bracey's avatar
Kevin Bracey committed
4321 4322 4323

        ALIGN 32

4324 4325
 [ MEMM_Type = "ARM600"

Kevin Bracey's avatar
Kevin Bracey committed
4326 4327 4328
; WT read-allocate cache (eg ARM720T)
XCBTableWT                                      ; C+B        CNB   NCB         NCNB
        = L2_C+L2_B, L2_C, L2_B, 0              ;        Default
4329 4330 4331
        = L2_C+L2_B, L2_C, L2_B, 0              ; WT,         WT, Non-merging, X
        = L2_C+L2_B, L2_C, L2_B, 0              ; WB/RA,      WB, Merging,     X
        = L2_C+L2_B, L2_C, L2_B, 0              ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4332 4333 4334 4335 4336 4337
        = L2_C+L2_B, L2_C, L2_B, 0              ; Alt DCache, X,  X,           X
        = L2_C+L2_B, L2_C, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B, L2_C, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B, L2_C, L2_B, 0              ; X,          X,  X,           X

; SA-110 in Risc PC - WB only read-allocate cache, non-merging WB
4338
XCBTableSA110                                   ; C+B        CNB   NCB         NCNB
Kevin Bracey's avatar
Kevin Bracey committed
4339
        = L2_C+L2_B,    0, L2_B, 0              ;        Default
4340 4341 4342
        =      L2_B,    0, L2_B, 0              ; WT,         WT, Non-merging, X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/RA,      WB, Merging,     X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4343 4344 4345 4346 4347 4348
        = L2_C+L2_B,    0, L2_B, 0              ; Alt DCache, X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X

; ARMv5 WB/WT read-allocate cache, non-merging WB (eg ARM920T)
4349
XCBTableWBR                                     ; C+B        CNB   NCB         NCNB
Kevin Bracey's avatar
Kevin Bracey committed
4350
        = L2_C+L2_B,    0, L2_B, 0              ;        Default
4351 4352 4353
        = L2_C     ,    0, L2_B, 0              ; WT,         WT, Non-merging, X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/RA,      WB, Merging,     X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4354 4355 4356 4357 4358 4359
        = L2_C+L2_B,    0, L2_B, 0              ; Alt DCache, X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X

; SA-1110 - WB only read allocate cache, merging WB, mini D-cache
4360
XCBTableSA1110                                  ; C+B        CNB   NCB         NCNB
Kevin Bracey's avatar
Kevin Bracey committed
4361
        = L2_C+L2_B,    0, L2_B, 0              ;        Default
4362 4363 4364
        =      L2_B,    0,    0, 0              ; WT,         WT, Non-merging, X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/RA,      WB, Merging,     X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4365 4366 4367 4368 4369 4370 4371
        = L2_C     ,    0, L2_B, 0              ; Alt DCache, X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X

; XScale - WB/WT read or write-allocate cache, merging WB, mini D-cache
;          defaulting to read-allocate
4372
XCBTableXScaleRA                                ; C+B        CNB   NCB         NCNB
Kevin Bracey's avatar
Kevin Bracey committed
4373
        =      L2_C+L2_B,    0,      L2_B, 0    ;        Default
4374 4375 4376
        =      L2_C     ,    0, L2_X+L2_B, 0    ; WT,         WT, Non-merging, X
        =      L2_C+L2_B,    0,      L2_B, 0    ; WB/RA,      WB, Merging,     X
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4377 4378 4379 4380 4381 4382 4383
        = L2_X+L2_C     ,    0,      L2_B, 0    ; Alt DCache, X,  X,           X
        =      L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X
        =      L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X
        =      L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X

; XScale - WB/WT read or write-allocate cache, merging WB, mini D-cache
;          defaulting to write-allocate
4384
XCBTableXScaleWA                                ; C+B        CNB   NCB         NCNB
Kevin Bracey's avatar
Kevin Bracey committed
4385
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ;        Default
4386 4387 4388
        =      L2_C     ,    0, L2_X+L2_B, 0    ; WT,         WT, Non-merging, X
        =      L2_C+L2_B,    0,      L2_B, 0    ; WB/RA,      WB, Merging,     X
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ; WB/WA,      X,  Idempotent,  X
Kevin Bracey's avatar
Kevin Bracey committed
4389 4390 4391 4392 4393
        = L2_X+L2_C     ,    0,      L2_B, 0    ; Alt DCache, X,  X,           X
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X
        = L2_X+L2_C+L2_B,    0,      L2_B, 0    ; X,          X,  X,           X

4394 4395 4396
; XScale - WB/WT read-allocate cache, merging WB, no mini D-cache/extended pages
XCBTableXScaleNoExt                             ; C+B        CNB   NCB         NCNB
        = L2_C+L2_B,    0, L2_B, 0              ;        Default
4397 4398 4399
        = L2_C     ,    0,    0, 0              ; WT,         WT, Non-merging, X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/RA,      WB, Merging,     X
        = L2_C+L2_B,    0, L2_B, 0              ; WB/WA,      X,  Idempotent,  X
4400 4401 4402 4403 4404
        = L2_C+L2_B,    0, L2_B, 0              ; Alt DCache, X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X
        = L2_C+L2_B,    0, L2_B, 0              ; X,          X,  X,           X

4405 4406 4407 4408
 ] ; MEMM_Type = "ARM600"

 [ MEMM_Type = "VMSAv6"

4409
; VMSAv6/v7 L2 memory attributes (short descriptor format, TEX remap disabled)
4410 4411 4412 4413 4414

L2_SO_S     * 0                             ; Strongly-ordered, shareable
L2_Dev_S    * L2_B                          ; Device, shareable
L2_Dev_nS   * 2:SHL:L2_TEXShift             ; Device, non-shareable

4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
; For Normal memory types, use the form that is explicit about inner and outer
; cacheability. This provides a nice mapping to the way cacheability is
; specified in the TTBR (see SetTTBR)
VMSAv6_Cache_NC * 0
VMSAv6_Cache_WBWA * 1
VMSAv6_Cache_WT * 2
VMSAv6_Cache_WBRA * 3
        ASSERT L2_C = L2_B:SHL:1
        MACRO
        VMSAv6_Nrm_XCB $inner, $outer
L2_Nrm_$inner._$outer * ((4+VMSAv6_Cache_$outer):SHL:L2_TEXShift) + (VMSAv6_Cache_$inner * L2_B)
      [ "$outer" == "$inner"
L2_Nrm_$inner * L2_Nrm_$inner._$outer
      ]
        MEND

        VMSAv6_Nrm_XCB WT, WT               ; Normal, WT/RA, S bit determines shareability
        VMSAv6_Nrm_XCB WBRA, WBRA           ; Normal, WB/RA, S bit determines shareability
        VMSAv6_Nrm_XCB NC, NC               ; Normal, non-cacheable (but bufferable), S bit determines shareability
        VMSAv6_Nrm_XCB WBWA, WBWA           ; Normal, WB/WA, S bit determines shareability
        VMSAv6_Nrm_XCB WT, WBWA             ; Normal, inner WT, outer WB/WA, S bit determines shareability

4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458
; Generic XCB table for VMSAv6/v7

; * NCNB is roughly equivalent to "strongly ordered".
; * NCB with non-merging write buffer is equivalent to "Device".
; * NCB with merging write buffer is also mapped to "Device". "Normal" is
;   tempting but may result in issues with read-sensitive devices (see below).
; * For NCB with devices which aren't read-sensitive, we introduce a new
;   "Merging write buffer with idempotent memory" policy which maps to the
;   Normal, non-cacheable type. This will degrade nicely on older OS's and CPUs,
;   avoiding some isses if we were to make NCB with merging write buffer default
;   to Normal memory. This policy is also the new default, so that all existing
;   NCB RAM uses it (so unaligned loads, etc. will work). No existing code seems
;   to be using NCB for IO devices (only for IO RAM like VRAM), so this change
;   should be safe (previously, all NCB policies would have mapped to Device
;   memory)
; * CNB has no equivalent - there's no control over whether the write buffer is
;   used for cacheable regions, so we have to downgrade to NCNB.

; The caches should behave sensibly when given unsupported attributes
; (downgrade WB to WT to NC), but we may end up doing more cache maintenance
; than needed if the hardware downgrades some areas to NC.

4459 4460 4461 4462 4463 4464 4465 4466 4467
XCBTableVMSAv6                                       ; C+B        CNB   NCB         NCNB
        DCW L2_Nrm_WBWA, L2_SO_S, L2_Nrm_NC, L2_SO_S ;        Default
        DCW L2_Nrm_WT,   L2_SO_S, L2_Dev_S,  L2_SO_S ; WT,         WT, Non-merging, X
        DCW L2_Nrm_WBRA, L2_SO_S, L2_Dev_S,  L2_SO_S ; WB/RA,      WB, Merging,     X
        DCW L2_Nrm_WBWA, L2_SO_S, L2_Nrm_NC, L2_SO_S ; WB/WA,      X,  Idempotent,  X
        DCW L2_Nrm_WT_WBWA,L2_SO_S,L2_Nrm_NC,L2_SO_S ; Alt DCache, X,  X,           X
        DCW L2_Nrm_WBWA, L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
        DCW L2_Nrm_WBWA, L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
        DCW L2_Nrm_WBWA, L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
4468 4469 4470
        ; This second set of entries deals with when pages are made
        ; temporarily uncacheable - we need to change the cacheability without
        ; changing the memory type.
4471 4472 4473 4474 4475 4476 4477 4478
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ;        Default
        DCW L2_Nrm_NC,   L2_SO_S, L2_Dev_S,  L2_SO_S ; WT,         WT, Non-merging, X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Dev_S,  L2_SO_S ; WB/RA,      WB, Merging,     X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ; WB/WA,      X,  Idempotent,  X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ; Alt DCache, X,  X,           X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
        DCW L2_Nrm_NC,   L2_SO_S, L2_Nrm_NC, L2_SO_S ; X,          X,  X,           X
4479 4480 4481

 ] ; MEMM_Type = "VMSAv6"

4482
        END