VMSAv6 8.79 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
; Copyright 2009 Castle Technology Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
;     http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; > VMSAv6

; MMU interface file - VMSAv6 version

; Created from s.ARM600 by JL 18-Feb-09


; Make sure we aren't being compiled against a CPU that can't possibly support a VMSAv6 MMU

Jeffrey Lee's avatar
Jeffrey Lee committed
24
        ASSERT :LNOT: NoARMv6
25 26 27

        KEEP

Jeffrey Lee's avatar
Jeffrey Lee committed
28
        =       0                       ; So PageShifts-1 is word aligned
29 30 31
PageShifts
        =       12, 13, 0, 14           ; 1 2 3 4
        =       0,  0,  0, 15           ; 5 6 7 8
Jeffrey Lee's avatar
Jeffrey Lee committed
32
        ALIGN
33 34 35

; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
Jeffrey Lee's avatar
Jeffrey Lee committed
36
; "VMSAv6"-specific OS_MMUControl code
37 38
;

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
        ; Make current stack page(s) temporarily uncacheable to make cache disable operations safer
        ; In: R0 = OS_Memory 0 flags
ModifyStackCacheability
        Entry   "r1-r2", 24             ; Make up to two pages uncacheable
        ADD     lr, sp, #24+12          ; Get original SP
        STR     lr, [sp, #4]            ; Make current page uncacheable
        ASSERT  (SVCStackAddress :AND: ((1<<20)-1)) = 0 ; Assume MB aligned stack
        TST     lr, #(1<<20)-4096       ; Zero if this is the last stack page
        SUBNE   lr, lr, #4096
        STRNE   lr, [sp, #12+4]         ; Make next page uncacheable
        MOVNE   r2, #2
        MOV     r1, sp
        MOVEQ   r2, #1
        BL      MemoryConvertNoFIQCheck ; Bypass FIQ disable logic within OS_Memory (we've already claimed the FIQ vector)
        EXIT
54

Jeffrey Lee's avatar
Jeffrey Lee committed
55 56 57 58 59 60 61 62
; in:   r0 = 0 (reason code 0, for modify control register)
;       r1 = EOR mask
;       r2 = AND mask
;
;       new control = ((old control AND r2) EOR r1)
;
; out:  r1 = old value
;       r2 = new value
63
MMUControl_ModifyControl ROUT
Jeffrey Lee's avatar
Jeffrey Lee committed
64
        Push    "r0,r3,r4,r5"
65 66 67
        CMP     r1,#0
        CMPEQ   r2,#&FFFFFFFF
        BEQ     MMUC_modcon_readonly
68 69 70 71
        MOV     r3, r1
        MOV     r1, #Service_ClaimFIQ
        SWI     XOS_ServiceCall         ; stop FIQs for safety
        MOV     r1, r3
Jeffrey Lee's avatar
Jeffrey Lee committed
72
        LDR     r3,=ZeroPage
73 74
        MRS     r4, CPSR
        CPSID   if                      ; disable IRQs while we modify soft copy (and possibly switch caches off/on)
75

76 77
        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control lr
78 79 80
        AND     r2, r2, lr
        EOR     r2, r2, r1
        MOV     r1, lr
81 82 83 84

        ; On some CPUs LDREX/STREX only work on cacheable memory. Allowing the
        ; D-cache to be disabled in this situation is likely to result in near-
        ; instant failure of the OS.
85
        LDR     r5, [r3, #ProcessorFlags]
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        TST     r5, #CPUFlag_NoDCacheDisable
        ORRNE   r2, r2, #MMUC_C

        ; If we have multiple cache levels, assume it's split caches ontop of a
        ; unified cache. In which case, having mismatched I+D cache settings can
        ; be pretty dangerous due to the IMB ARMops assuming that cleaning to
        ; PoU is sufficient (D-cache on but I-cache off will fail due to the
        ; instruction fetches bypassing the unified cache, D-cache off but
        ; I-cache on will fail because the I-cache will pull code into the
        ; unified cache which an IMB won't clean)
        ; If we have the ability to disable the L2 cache then this would be OK,
        ; but we can't guarantee that ability
        Push    "r1-r4"
        MOV     r1, #1
        ARMop   Cache_Examine,,,r3
        CMP     r0, #0
        Pull    "r1-r4"
        BEQ     %FT04
        LDR     lr, =MMUC_C+MMUC_I
        TST     r2, lr
        ORRNE   r2, r2, lr              ; If one cache is on, force both on

04
109 110 111
        STR     r2, [r3, #MMUControlSoftCopy]
        BIC     lr, r2, r1              ; lr = bits going from 0->1
        TST     lr, #MMUC_C             ; if cache turning on then flush cache before we do it
112 113 114 115 116 117 118 119
        BEQ     %FT05

        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turning on, I-cache invalidate is either necessary (both turning on) or a safe side-effect
        B       %FT10

05
        TST     lr, #MMUC_I
        ARMop   IMB_Full,NE,,r3         ; I-cache turning on, Cache_InvalidateAll could be unsafe
120 121

10
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
        ; If I+D currently enabled, and at least one is turning off, turn off
        ; HAL L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BEQ     %FT11
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BNE     %FT11
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT11
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Deactivate]
        Pull    "r1-r3,r12"
11
138 139 140
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then clean data cache first
        BEQ     %FT15
141 142 143 144 145
        ; When disabling the data cache we have the problem that modern ARMs generally ignore unexpected cache hits, so any stack usage between us disabling the cache and finishing the clean + invalidate is very unsafe
        ; Solve this problem by making the current pages of the SVC stack temporarily uncacheable for the duration of the dangerous bit
        ; (n.b. making the current stack page uncacheable has the same problems as turning off the cache globally, but OS_Memory 0 has its own workaround for that)
        MOV     r0, #(1<<9)+(2<<14)
        BL      ModifyStackCacheability
146
        ARMop   Cache_CleanAll,,,r3
147 148
15
        ARM_write_control r2
Jeffrey Lee's avatar
Jeffrey Lee committed
149
        myISB   ,lr ; Must be running on >=ARMv6, so perform ISB to ensure CP15 write is complete
150 151
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then flush cache afterwards
152 153 154 155 156 157
        BEQ     %FT17
        LDR     r3,=ZeroPage
        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turned off, can safely invalidate I+D
        B       %FT19
17
        TST     lr, #MMUC_I
158
        BEQ     %FT20
Jeffrey Lee's avatar
Jeffrey Lee committed
159
        LDR     r3,=ZeroPage
160 161
        ARMop   IMB_Full,,,r3           ; Only I-cache which turned off, clean D-cache & invalidate I-cache
19
162 163 164 165 166
        ; Undo any stack uncaching we performed above
        BIC     lr, r1, r2
        TST     lr, #MMUC_C
        MOVNE   r0, #(1<<9)+(3<<14)
        BLNE    ModifyStackCacheability
167
20
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
        ; If either I+D was disabled, and now both are turned on, turn on HAL
        ; L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BNE     %FT30
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BEQ     %FT30
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT30
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Activate]
        Pull    "r1-r3,r12"
30
184 185 186 187 188 189
        MSR     CPSR_c, r4              ; restore IRQ state
        MOV     r3, r1
        MOV     r1, #Service_ReleaseFIQ
        SWI     XOS_ServiceCall         ; allow FIQs again
        MOV     r1, r3
        CLRV
Jeffrey Lee's avatar
Jeffrey Lee committed
190
        Pull    "r0,r3,r4,r5,pc"
191 192

MMUC_modcon_readonly
Jeffrey Lee's avatar
Jeffrey Lee committed
193
        LDR     r3, =ZeroPage
194 195 196 197
        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control r1
        STR     r1, [r3, #MMUControlSoftCopy]
        MOV     r2, r1
Jeffrey Lee's avatar
Jeffrey Lee committed
198
        Pull    "r0,r3,r4,r5,pc"
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
 [ CacheablePageTables
; Out: R0 = desired page flags for the page tables
GetPageFlagsForCacheablePageTables ROUT
        ; The ID_MMFR3 register indicates whether the MMU can read from the L1
        ; data cache.
        ; If it can, it means we can use an inner & outer write-back policy.
        ; If it can't, it means the best we can do is inner write-through and
        ; outer write-back (without performing extra cache maintenance, at
        ; least)
        ARM_read_ID r0
        AND     r0, r0, #&F<<16
        CMP     r0, #ARMvF<<16                  ; Check that feature registers are implemented
        BNE     %FT90
        MRC     p15, 0, r0, c0, c1, 7           ; ID_MMFR3
        TST     r0, #&F<<20
        BEQ     %FT90
        ; MMU can read from the L1 cache, so go for default cache policy
        LDR     r0, =AreaFlags_PageTablesAccess
        MOV     pc, lr
90
        ; MMU can't read from the L1 cache, so use inner write-through, outer write-back
        LDR     r0, =AreaFlags_PageTablesAccess :OR: (CP_CB_AlternativeDCache :SHL: DynAreaFlags_CPShift)
        MOV     pc, lr
 ]

Jeffrey Lee's avatar
Jeffrey Lee committed
225 226 227
      [ LongDesc
        GET     s.VMSAv6Long
        GET     s.LongDesc
228 229
      ]
      [ ShortDesc
Jeffrey Lee's avatar
Jeffrey Lee committed
230 231 232 233
        GET     s.VMSAv6Short
        GET     s.ShortDesc
      ]

234
        END