VMSAv6 26.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
; Copyright 2009 Castle Technology Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
;     http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; > VMSAv6

        GBLL    DebugAborts
DebugAborts SETL {FALSE}


; MMU interface file - VMSAv6 version

; Created from s.ARM600 by JL 18-Feb-09


; Make sure we aren't being compiled against a CPU that can't possibly support a VMSAv6 MMU

Jeffrey Lee's avatar
Jeffrey Lee committed
28
        ASSERT :LNOT: NoARMv6
29 30 31

        KEEP

Jeffrey Lee's avatar
Jeffrey Lee committed
32 33 34
        ; Convert given page flags to the equivalent temp uncacheable L2PT flags
        MACRO
        GetTempUncache $out, $pageflags, $pcbtrans, $temp
Jeffrey Lee's avatar
Jeffrey Lee committed
35 36 37 38
        ASSERT  $out <> $pageflags
        ASSERT  $out <> $pcbtrans
        ASSERT  $out <> $temp
        ASSERT  $temp <> $pcbtrans
Jeffrey Lee's avatar
Jeffrey Lee committed
39 40 41 42 43 44 45 46 47 48 49 50
        ASSERT  DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
        ASSERT  DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
        ASSERT  DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
        AND     $out, $pageflags, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
        AND     $temp, $pageflags, #DynAreaFlags_CPBits
        ORR     $out, $out, #XCB_TU<<4                      ; treat as temp uncacheable
        ORR     $out, $out, $temp, LSR #10-4
        LDRB    $out, [$pcbtrans, $out, LSR #4]             ; convert to X, C and B bits for this CPU
        MEND

TempUncache_L2PTMask * L2_B+L2_C+L2_TEX

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
; **************** CAM manipulation utility routines ***********************************

; **************************************************************************************
;
;       BangCamUpdate - Update CAM, MMU for page move, coping with page currently mapped in
;
; mjs Oct 2000
; reworked to use generic ARM ops (vectored to appropriate routines during boot)
;
; First look in the CamEntries table to find the logical address L this physical page is
; currently allocated to. Then check in the Level 2 page tables to see if page L is currently
; at page R2. If it is, then map page L to be inaccessible, otherwise leave page L alone.
; Then map logical page R3 to physical page R2.
;
; in:   r2 = physical page number
;       r3 = logical address (2nd copy if doubly mapped area)
;       r9 = offset from 1st to 2nd copy of doubly mapped area (either source or dest, but not both)
;       r11 = PPL + CB bits
;
; out:  r0, r1, r4, r6 corrupted
;       r2, r3, r5, r7-r12 preserved
;

BangCamUpdate ROUT
        TST     r11, #DynAreaFlags_DoublyMapped ; if moving page to doubly mapped area
        SUBNE   r3, r3, r9                      ; then CAM soft copy holds ptr to 1st copy

Jeffrey Lee's avatar
Jeffrey Lee committed
78
        LDR     r1, =ZeroPage
79
        LDR     r1, [r1, #CamEntriesPointer]
80 81 82
        ADD     r1, r1, r2, LSL #CAM_EntrySizeLog2 ; point at cam entry (logaddr, PPL)
        ASSERT  CAM_LogAddr=0
        ASSERT  CAM_PageFlags=4
83
        LDMIA   r1, {r0, r6}                    ; r0 = current logaddress, r6 = current PPL
84 85
        BIC     r4, r11, #PageFlags_Unsafe
        STMIA   r1, {r3, r4}                    ; store new address, PPL
86
        Push    "r0, r6"                        ; save old logical address, PPL
Jeffrey Lee's avatar
Jeffrey Lee committed
87
        LDR     r1, =ZeroPage+PhysRamTable      ; go through phys RAM table
88 89 90 91 92 93 94 95
        MOV     r6, r2                          ; make copy of r2 (since that must be preserved)
10
        LDMIA   r1!, {r0, r4}                   ; load next address, size
        SUBS    r6, r6, r4, LSR #12             ; subtract off that many pages
        BCS     %BT10                           ; if more than that, go onto next bank

        ADD     r6, r6, r4, LSR #12             ; put back the ones which were too many
        ADD     r0, r0, r6, LSL #12             ; move on address by the number of pages left
96
        LDR     r6, [sp]                        ; reload old logical address
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114

; now we have r6 = old logical address, r2 = physical page number, r0 = physical address

        TEQ     r6, r3                          ; TMD 19-Jan-94: if old logaddr = new logaddr, then
        BEQ     %FT20                           ; don't remove page from where it is, to avoid window
                                                ; where page is nowhere.
        LDR     r1, =L2PT
        ADD     r6, r1, r6, LSR #10             ; r6 -> L2PT entry for old log.addr
        MOV     r4, r6, LSR #12                 ; r4 = word offset into L2 for address r6
        LDR     r4, [r1, r4, LSL #2]            ; r4 = L2PT entry for L2PT entry for old log.addr
        TST     r4, #3                          ; if page not there
        BEQ     %FT20                           ; then no point in trying to remove it

        LDR     r4, [r6]                        ; r4 = L2PT entry for old log.addr
        MOV     r4, r4, LSR #12                 ; r4 = physical address for old log.addr
        TEQ     r4, r0, LSR #12                 ; if equal to physical address of page being moved
        BNE     %FT20                           ; if not there, then just put in new page

115
        AND     r4, r11, #PageFlags_Unsafe
116 117 118
        Push    "r0, r3, r11, r14"              ; save phys.addr, new log.addr, new PPL, lr
        ADD     r3, sp, #4*4
        LDMIA   r3, {r3, r11}                   ; reload old logical address, old PPL
119 120 121
        LDR     r0, =DuffEntry                  ; Nothing to do if wasn't mapped in
        ORR     r11, r11, r4
        TEQ     r3, r0
122
        MOV     r0, #0                          ; cause translation fault
123
        BLNE    BangL2PT                        ; map page out
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
        Pull    "r0, r3, r11, r14"
20
        ADD     sp, sp, #8                      ; junk old logical address, PPL
        B       BangCamAltEntry                 ; and branch into BangCam code

; **************************************************************************************
;
;       BangCam - Update CAM, MMU for page move, assuming page currently mapped out
;
; This routine maps a physical page to a given logical address
; It is assumed that the physical page is currently not mapped anywhere else
;
; in:   r2 = physical page number
;       r3 = logical address (2nd copy if doubly mapped)
;       r9 = offset from 1st to 2nd copy of doubly mapped area (either source or dest, but not both)
;       r11 = PPL
;
; out:  r0, r1, r4, r6 corrupted
;       r2, r3, r5, r7-r12 preserved
;
144
; NB The physical page number MUST be in range.
145 146 147 148 149

BangCam ROUT
        TST     r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped
        SUBNE   r3, r3, r9              ; then move ptr to 1st copy

Jeffrey Lee's avatar
Jeffrey Lee committed
150
        LDR     r1, =ZeroPage+PhysRamTable ; go through phys RAM table
151 152 153 154 155 156 157 158 159 160 161 162
        MOV     r6, r2                  ; make copy of r2 (since that must be preserved)
10
        LDMIA   r1!, {r0, r4}           ; load next address, size
        SUBS    r6, r6, r4, LSR #12     ; subtract off that many pages
        BCS     %BT10                   ; if more than that, go onto next bank

        ADD     r6, r6, r4, LSR #12     ; put back the ones which were too many
        ADD     r0, r0, r6, LSL #12     ; move on address by the number of pages left
BangCamAltEntry
        LDR     r4, =DuffEntry          ; check for requests to map a page to nowhere
        TEQ     r4, r3                  ; don't actually map anything to nowhere
        MOVEQ   pc, lr
163
        GetPTE  r0, 4K, r0, r11
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

        LDR     r1, =L2PT               ; point to level 2 page tables

        ;fall through to BangL2PT

;internal entry point for updating L2PT entry
;
; entry: r0 = new L2PT value, r1 -> L2PT, r3 = logical address (4k aligned), r11 = PPL
;
; exit: r0,r1,r4,r6 corrupted
;
BangL2PT                                        ; internal entry point used only by BangCamUpdate
        Push    "lr"
        MOV     r6, r0

179
        TST     r11, #PageFlags_Unsafe
180
        BNE     BangL2PT_unsafe
181

182 183 184
        TST     r11, #DynAreaFlags_DoublyMapped
        BNE     BangL2PT_sledgehammer           ;if doubly mapped, don't try to be clever

Jeffrey Lee's avatar
Jeffrey Lee committed
185 186 187 188 189 190 191 192
        ;In order to safely map out a cacheable page and remove it from the
        ;cache, we need to perform the following process:
        ;* Make the page uncacheable
        ;* Flush TLB
        ;* Clean+invalidate cache
        ;* Write new mapping (r6)
        ;* Flush TLB
        ;For uncacheable pages we can just do the last two steps
193 194 195
        ;
        TEQ     r6, #0                          ;EQ if mapping out
        TSTEQ   r11, #DynAreaFlags_NotCacheable ;EQ if also cacheable (overcautious for temp uncache+illegal PCB combos)
Jeffrey Lee's avatar
Jeffrey Lee committed
196
        LDR     r4, =ZeroPage
Jeffrey Lee's avatar
Jeffrey Lee committed
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
        BNE     %FT20
        ; Potentially we could just map as strongly-ordered + XN here
        ; But for safety just go for temp uncacheable (will retain memory type + shareability)
        LDR     lr, [r4, #MMU_PCBTrans]
        GetTempUncache r0, r11, lr, r4
        LDR     lr, [r1, r3, LSR #10]           ;get current L2PT entry
        LDR     r4, =TempUncache_L2PTMask
        BIC     lr, lr, r4                      ;remove current attributes
        ORR     lr, lr, r0
        STR     lr, [r1, r3, LSR #10]           ;Make uncacheable
        LDR     r4, =ZeroPage
        MOV     r0, r3
        ARMop   MMU_ChangingUncachedEntry,,, r4 ; TLB flush
        MOV     r0, r3
        ADD     r1, r3, #4096
        ARMop   Cache_CleanInvalidateRange,,, r4 ; Cache flush
        LDR     r1, =L2PT
214 215

20      STR     r6, [r1, r3, LSR #10]           ;update L2PT entry
216 217 218
        Pull    "lr"
        MOV     r0, r3
        ARMop   MMU_ChangingUncachedEntry,,tailcall,r4
219 220 221 222 223 224 225 226 227

BangL2PT_sledgehammer

        ;sledgehammer is super cautious and does cache/TLB coherency on a global basis
        ;should only be used for awkward cases
        ;
        TEQ     r6, #0                          ;EQ if mapping out
        TSTEQ   r11, #DynAreaFlags_NotCacheable ;EQ if also cacheable (overcautious for temp uncache+illegal PCB combos)
        ADR     lr, %FT30
Jeffrey Lee's avatar
Jeffrey Lee committed
228
        LDR     r4, =ZeroPage
229 230 231 232 233 234 235
        ARMop   MMU_Changing, EQ, tailcall, r4
        ARMop   MMU_ChangingUncached, NE, tailcall, r4

30      STR     r6, [r1, r3, LSR #10]!          ; update level 2 page table (and update pointer so we can use bank-to-bank offset
        TST     r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped
        STRNE   r6, [r1, r9, LSR #10]           ; then store entry for 2nd copy as well
        ADDNE   r3, r3, r9                      ; and point logical address back at 2nd copy
236 237 238 239 240 241 242 243 244 245 246 247 248
        ; In order to guarantee that the result of a page table write is
        ; visible, the ARMv6+ memory order model requires us to perform TLB
        ; maintenance (equivalent to the MMU_ChangingUncached ARMop) after we've
        ; performed the write. Performing the maintenance beforehand (as we've
        ; done traditionally) will work most of the time, but not always.
        Pull    "lr"
        ARMop   MMU_ChangingUncached,,tailcall,r4

BangL2PT_unsafe
        STR     r6, [r1, r3, LSR #10]!          ; update level 2 page table (and update pointer so we can use bank-to-bank offset
        TST     r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped
        STRNE   r6, [r1, r9, LSR #10]           ; then store entry for 2nd copy as well
        ADDNE   r3, r3, r9                      ; and point logical address back at 2nd copy
249 250 251
        Pull    "pc"


252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
PPLTransNonShareable                                          ; EL1 EL0
        DCW     (AP_Full * L2_APMult)+L2_SmallPage            ; RWX RWX
        DCW     (AP_Read * L2_APMult)+L2_SmallPage            ; RWX R X
        DCW     (AP_None * L2_APMult)+L2_SmallPage            ; RWX
        DCW     (AP_ROM  * L2_APMult)+L2_SmallPage            ; R X R X
        DCW     (AP_PROM * L2_APMult)+L2_SmallPage            ; R X
        DCW     (AP_Full * L2_APMult)+L2_SmallPage+L2_XN      ; RW  RW
        DCW     (AP_Read * L2_APMult)+L2_SmallPage+L2_XN      ; RW  R
        DCW     (AP_None * L2_APMult)+L2_SmallPage+L2_XN      ; RW
        DCW     (AP_ROM  * L2_APMult)+L2_SmallPage+L2_XN      ; R   R
        DCW     (AP_PROM * L2_APMult)+L2_SmallPage+L2_XN      ; R

PPLTransShareable                                             ; EL1 EL0
        DCW     (AP_Full * L2_APMult)+L2_SmallPage      +L2_S ; RWX RWX
        DCW     (AP_Read * L2_APMult)+L2_SmallPage      +L2_S ; RWX R X
        DCW     (AP_None * L2_APMult)+L2_SmallPage      +L2_S ; RWX
        DCW     (AP_ROM  * L2_APMult)+L2_SmallPage      +L2_S ; R X R X
        DCW     (AP_PROM * L2_APMult)+L2_SmallPage      +L2_S ; R X
        DCW     (AP_Full * L2_APMult)+L2_SmallPage+L2_XN+L2_S ; RW  RW
        DCW     (AP_Read * L2_APMult)+L2_SmallPage+L2_XN+L2_S ; RW  R
        DCW     (AP_None * L2_APMult)+L2_SmallPage+L2_XN+L2_S ; RW
        DCW     (AP_ROM  * L2_APMult)+L2_SmallPage+L2_XN+L2_S ; R   R
        DCW     (AP_PROM * L2_APMult)+L2_SmallPage+L2_XN+L2_S ; R

PPLAccess            ; EL1EL0
                     ; RWXRWX
        GenPPLAccess 2_111111
        GenPPLAccess 2_111101
        GenPPLAccess 2_111000
        GenPPLAccess 2_101101
        GenPPLAccess 2_101000
        GenPPLAccess 2_110110
        GenPPLAccess 2_110100
        GenPPLAccess 2_110000
        GenPPLAccess 2_100100
        GenPPLAccess 2_100000
        DCD     -1
289 290 291 292 293 294 295 296 297

PageShifts
        =       12, 13, 0, 14           ; 1 2 3 4
        =       0,  0,  0, 15           ; 5 6 7 8

        LTORG

; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
Jeffrey Lee's avatar
Jeffrey Lee committed
298
; "VMSAv6"-specific OS_MMUControl code
299 300
;

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
        ; Make current stack page(s) temporarily uncacheable to make cache disable operations safer
        ; In: R0 = OS_Memory 0 flags
ModifyStackCacheability
        Entry   "r1-r2", 24             ; Make up to two pages uncacheable
        ADD     lr, sp, #24+12          ; Get original SP
        STR     lr, [sp, #4]            ; Make current page uncacheable
        ASSERT  (SVCStackAddress :AND: ((1<<20)-1)) = 0 ; Assume MB aligned stack
        TST     lr, #(1<<20)-4096       ; Zero if this is the last stack page
        SUBNE   lr, lr, #4096
        STRNE   lr, [sp, #12+4]         ; Make next page uncacheable
        MOVNE   r2, #2
        MOV     r1, sp
        MOVEQ   r2, #1
        BL      MemoryConvertNoFIQCheck ; Bypass FIQ disable logic within OS_Memory (we've already claimed the FIQ vector)
        EXIT
316

Jeffrey Lee's avatar
Jeffrey Lee committed
317 318 319 320 321 322 323 324
; in:   r0 = 0 (reason code 0, for modify control register)
;       r1 = EOR mask
;       r2 = AND mask
;
;       new control = ((old control AND r2) EOR r1)
;
; out:  r1 = old value
;       r2 = new value
325
MMUControl_ModifyControl ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
326
        Push    "r0,r3,r4,r5"
327 328 329
        CMP     r1,#0
        CMPEQ   r2,#&FFFFFFFF
        BEQ     MMUC_modcon_readonly
330 331 332 333
        MOV     r3, r1
        MOV     r1, #Service_ClaimFIQ
        SWI     XOS_ServiceCall         ; stop FIQs for safety
        MOV     r1, r3
Jeffrey Lee's avatar
Jeffrey Lee committed
334
        LDR     r3,=ZeroPage
335 336
        MRS     r4, CPSR
        CPSID   if                      ; disable IRQs while we modify soft copy (and possibly switch caches off/on)
337

338 339
        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control lr
340 341 342
        AND     r2, r2, lr
        EOR     r2, r2, r1
        MOV     r1, lr
343 344 345 346

        ; On some CPUs LDREX/STREX only work on cacheable memory. Allowing the
        ; D-cache to be disabled in this situation is likely to result in near-
        ; instant failure of the OS.
347
        LDR     r5, [r3, #ProcessorFlags]
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
        TST     r5, #CPUFlag_NoDCacheDisable
        ORRNE   r2, r2, #MMUC_C

        ; If we have multiple cache levels, assume it's split caches ontop of a
        ; unified cache. In which case, having mismatched I+D cache settings can
        ; be pretty dangerous due to the IMB ARMops assuming that cleaning to
        ; PoU is sufficient (D-cache on but I-cache off will fail due to the
        ; instruction fetches bypassing the unified cache, D-cache off but
        ; I-cache on will fail because the I-cache will pull code into the
        ; unified cache which an IMB won't clean)
        ; If we have the ability to disable the L2 cache then this would be OK,
        ; but we can't guarantee that ability
        Push    "r1-r4"
        MOV     r1, #1
        ARMop   Cache_Examine,,,r3
        CMP     r0, #0
        Pull    "r1-r4"
        BEQ     %FT04
        LDR     lr, =MMUC_C+MMUC_I
        TST     r2, lr
        ORRNE   r2, r2, lr              ; If one cache is on, force both on

04
371 372 373
        STR     r2, [r3, #MMUControlSoftCopy]
        BIC     lr, r2, r1              ; lr = bits going from 0->1
        TST     lr, #MMUC_C             ; if cache turning on then flush cache before we do it
374 375 376 377 378 379 380 381
        BEQ     %FT05

        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turning on, I-cache invalidate is either necessary (both turning on) or a safe side-effect
        B       %FT10

05
        TST     lr, #MMUC_I
        ARMop   IMB_Full,NE,,r3         ; I-cache turning on, Cache_InvalidateAll could be unsafe
382 383

10
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
        ; If I+D currently enabled, and at least one is turning off, turn off
        ; HAL L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BEQ     %FT11
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BNE     %FT11
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT11
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Deactivate]
        Pull    "r1-r3,r12"
11
400 401 402
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then clean data cache first
        BEQ     %FT15
403 404 405 406 407
        ; When disabling the data cache we have the problem that modern ARMs generally ignore unexpected cache hits, so any stack usage between us disabling the cache and finishing the clean + invalidate is very unsafe
        ; Solve this problem by making the current pages of the SVC stack temporarily uncacheable for the duration of the dangerous bit
        ; (n.b. making the current stack page uncacheable has the same problems as turning off the cache globally, but OS_Memory 0 has its own workaround for that)
        MOV     r0, #(1<<9)+(2<<14)
        BL      ModifyStackCacheability
408
        ARMop   Cache_CleanAll,,,r3
409 410
15
        ARM_write_control r2
Jeffrey Lee's avatar
Jeffrey Lee committed
411
        myISB   ,lr ; Must be running on >=ARMv6, so perform ISB to ensure CP15 write is complete
412 413
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then flush cache afterwards
414 415 416 417 418 419
        BEQ     %FT17
        LDR     r3,=ZeroPage
        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turned off, can safely invalidate I+D
        B       %FT19
17
        TST     lr, #MMUC_I
420
        BEQ     %FT20
Jeffrey Lee's avatar
Jeffrey Lee committed
421
        LDR     r3,=ZeroPage
422 423
        ARMop   IMB_Full,,,r3           ; Only I-cache which turned off, clean D-cache & invalidate I-cache
19
424 425 426 427 428
        ; Undo any stack uncaching we performed above
        BIC     lr, r1, r2
        TST     lr, #MMUC_C
        MOVNE   r0, #(1<<9)+(3<<14)
        BLNE    ModifyStackCacheability
429
20
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
        ; If either I+D was disabled, and now both are turned on, turn on HAL
        ; L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BNE     %FT30
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BEQ     %FT30
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT30
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Activate]
        Pull    "r1-r3,r12"
30
446 447 448 449 450 451
        MSR     CPSR_c, r4              ; restore IRQ state
        MOV     r3, r1
        MOV     r1, #Service_ReleaseFIQ
        SWI     XOS_ServiceCall         ; allow FIQs again
        MOV     r1, r3
        CLRV
Jeffrey Lee's avatar
Jeffrey Lee committed
452
        Pull    "r0,r3,r4,r5,pc"
453 454

MMUC_modcon_readonly
Jeffrey Lee's avatar
Jeffrey Lee committed
455
        LDR     r3, =ZeroPage
456 457 458 459
        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control r1
        STR     r1, [r3, #MMUControlSoftCopy]
        MOV     r2, r1
Jeffrey Lee's avatar
Jeffrey Lee committed
460
        Pull    "r0,r3,r4,r5,pc"
461

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
; PPLTrans should contain L2_AP + L2_XN + L2_S + L2_SmallPage
; PCBTrans should contain L2_C + L2_B + L2_TEX

; In:
; r0 = phys addr (aligned)
; r1 = page flags:
;      APBits
;      NotBufferable
;      NotCacheable
;      CPBits
;      PageFlags_TempUncacheableBits
; r2 -> PPLTrans
; r3 -> PCBTrans
; Out:
; r0 = PTE for 4K page ("small page")
Get4KPTE ROUT
        Entry   "r4"
        AND     lr, r1, #DynAreaFlags_APBits
        MOV     lr, lr, LSL #1
        LDRH    lr, [r2, lr]
        ; Insert AP bits, page type/size, etc.
        ORR     r0, r0, lr
        ; Insert CB+TEX bits
        ASSERT  DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
        ASSERT  DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
        ASSERT  DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
        TST     r1, #PageFlags_TempUncacheableBits
        AND     r4, r1, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
        AND     lr, r1, #DynAreaFlags_CPBits
        ORRNE   r4, r4, #XCB_TU<<4                      ; if temp uncache, set TU bit
        ORR     r4, r4, lr, LSR #10-4
        LDRB    r4, [r3, r4, LSR #4]                    ; convert to TEX, C and B bits for this CPU
        ORR     r0, r0, r4
        EXIT

; In:
; As per Get4KPTE
; Out:
; r0 = PTE for 64K page ("large page")
Get64KPTE ROUT
        Entry   "r4"
        AND     lr, r1, #DynAreaFlags_APBits
        MOV     lr, lr, LSL #1
        LDRH    lr, [r2, lr]
        ; Remap XN bit, page type
        AND     r4, lr, #L2_XN
        BIC     lr, lr, #3
        ORR     r0, r0, #L2_LargePage
        ASSERT  L2L_XN = L2_XN :SHL: 15
        ORR     r0, r0, r4, LSL #15
        ; Insert AP, S bits
        ORR     r0, r0, lr
50
        ; Insert CB+TEX bits
        ; Shared with Get1MPTE
        ASSERT  DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
        ASSERT  DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
        ASSERT  DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
        TST     r1, #PageFlags_TempUncacheableBits
        AND     r4, r1, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
        AND     lr, r1, #DynAreaFlags_CPBits
        ORRNE   r4, r4, #XCB_TU<<4                      ; if temp uncache, set TU bit
        ORR     r4, r4, lr, LSR #10-4
        LDRB    r4, [r3, r4, LSR #4]                    ; convert to TEX, C and B bits for this CPU
        ; Move TEX field up
        ORR     r4, r4, r4, LSL #L2L_TEXShift-L2_TEXShift
        BIC     r4, r4, #L2_TEX :OR: ((L2_C+L2_B) :SHL: (L2L_TEXShift-L2_TEXShift))
        ORR     r0, r0, r4
        EXIT

; In:
; As per Get4KPTE
; Out:
; r0 = PTE for 1M page ("section")
Get1MPTE
        ALTENTRY
        AND     lr, r1, #DynAreaFlags_APBits
        MOV     lr, lr, LSL #1
        LDRH    lr, [r2, lr]
        ; Remap XN bit, page type
        AND     r4, lr, #L2_XN
        AND     lr, lr, #L2_AP + L2_S
        ORR     r0, r0, #L1_Section
        ASSERT  L1_XN = L2_XN :SHL: 4
        ORR     r0, r0, r4, LSL #4
        ; Insert AP, S bits
        ASSERT  L1_APShift-L2_APShift=6
        ASSERT  L1_S = L2_S :SHL: 6
        ORR     r0, r0, lr, LSL #6
        ; Insert CB+TEX bits
        ASSERT  L1_C = L2_C
        ASSERT  L1_B = L2_B
        ASSERT  L1_TEXShift = L2L_TEXShift
        B       %BT50

; In:
; r0 = L2PT entry
; Out:
; r0 = phys addr
; r1 = page flags
;      or -1 if fault
; r2 = page size (bytes)
DecodeL2Entry   ROUT
        TST     r0, #3
        MOVEQ   r1, #-1
        MOVEQ   pc, lr
        Entry   "r3-r5"
        ; Find entry in PPL table
        LDR     r3, =ZeroPage
        LDR     r2, =L2_AP+L2_XN ; L2_S ignored, pages should either be all shareable or all not shareable
        LDR     r3, [r3, #MMU_PPLTrans]
        AND     r4, r2, r0
        ; Get XN
        ASSERT  L2_XN = 1
        ASSERT  L2_SmallPage = 2
        ASSERT  L2_LargePage = 1
        TST     r0, #L2_SmallPage ; EQ if LargePage
        TSTEQ   r0, #L2L_XN
        BICEQ   r4, r4, #L2_XN ; Large page with no XN, so clear the fake XN flag we picked up earlier
        MOV     r1, #0
10
        LDRH    r5, [r3, r1]
        AND     r5, r5, r2
        CMP     r5, r4
        ADDNE   r1, r1, #2
        BNE     %BT10
        ; Remap TEX+CB so that they're in the same position as a small page entry
        TST     r0, #L2_SmallPage ; EQ if LargePage
        MOV     r4, #L2_C+L2_B
        ORRNE   r4, r4, #L2_TEX
        AND     r4, r0, r4
        ANDEQ   lr, r0, #L2L_TEX
        ORREQ   r4, r4, lr, LSR #L2L_TEXShift-L2_TEXShift
        ; Align phys addr to page size and set up R2
        MOV     r0, r0, LSR #12
        BICEQ   r0, r0, #15
        MOV     r0, r0, LSL #12
        MOVEQ   r2, #65536
        MOVNE   r2, #4096
20
        ; Search through PCBTrans for a match on TEX+CB (shared with L1 decoding)
        ; Funny order is used so that NCNB is preferred over other variants (since NCNB is common fallback)
        LDR     r3, =ZeroPage
        MOV     r1, r1, LSR #1
        LDR     r3, [r3, #MMU_PCBTrans]
        MOV     lr, #3
30
        LDRB    r5, [r3, lr]
        CMP     r5, r4
        BEQ     %FT40
        TST     lr, #2_11
        SUBNE   lr, lr, #1                      ; loop goes 3,2,1,0,7,6,5,4,...,31,30,29,28
        ADDEQ   lr, lr, #7
        TEQ     lr, #35
        BNE     %BT30                           ; Give up if end of table reached
40
        ; Decode index back into page flags
        ; n.b. temp uncache is ignored (no way we can differentiate between real uncached)
        ASSERT  DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
        ASSERT  DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
        ASSERT  DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
        AND     r4, lr, #XCB_NC+XCB_NB
        AND     lr, lr, #7*XCB_P
        ORR     r1, r1, r4, LSL #4
        ORR     r1, r1, lr, LSL #10
        EXIT

; In:
; r0 = L1PT entry
; Out:
; r0 = phys addr
; r1 = page flags if 1MB page
;      or -1 if fault
;      or -2 if page table ptr
DecodeL1Entry
        ALTENTRY
        AND     r1, r0, #3
        ASSERT  L1_Fault < L1_Page
        ASSERT  L1_Page < L1_Section
        CMP     r1, #L1_Page
        BGT     %FT50
        MOVLT   r1, #-1
        MOVEQ   r1, #-2
        MOVEQ   r0, r0, LSR #10
        MOVEQ   r0, r0, LSL #10
        EXIT
50
        ; Find entry in PPL table
        LDR     r3, =ZeroPage
        LDR     lr, =L2_AP
        LDR     r3, [r3, #MMU_PPLTrans]
        ASSERT  L1_APShift = L2_APShift+6
        AND     r4, lr, r0, LSR #6
        TST     r0, #L1_XN
        ORRNE   r4, r4, #L2_XN
        ORR     lr, lr, #L2_XN
        MOV     r1, #0
60
        LDRH    r5, [r3, r1]
        AND     r5, r5, lr
        CMP     r5, r4
        ADDNE   r1, r1, #2
        BNE     %BT60
        ; Remap TEX+CB so that they're in the same position as a small page entry
        ASSERT  L1_C = L2_C
        ASSERT  L1_B = L2_B
        AND     r4, r0, #L1_C+L1_B
        AND     lr, r0, #L1_TEX
        ORR     r4, r4, lr, LSR #L1_TEXShift-L2_TEXShift
        ; Align phys addr to page size
        MOV     r0, r0, LSR #20
        MOV     r0, r0, LSL #20
        ; Now search through PCBTrans for a match
        B       %BT20

677
        END