; Copyright 2009 Castle Technology Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
;     http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; > VMSAv6

; MMU interface file - VMSAv6 version

; Created from s.ARM600 by JL 18-Feb-09


; Make sure we aren't being compiled against a CPU that can't possibly support a VMSAv6 MMU

        ASSERT :LNOT: NoARMv6

        KEEP

        =       0                       ; So PageShifts-1 is word aligned
PageShifts
        =       12, 13, 0, 14           ; 1 2 3 4
        =       0,  0,  0, 15           ; 5 6 7 8
        ALIGN

; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
; "VMSAv6"-specific OS_MMUControl code
;

        ; Make current stack page(s) temporarily uncacheable to make cache disable operations safer
        ; In: R0 = OS_Memory 0 flags
ModifyStackCacheability
        Entry   "r1-r2", 24             ; Make up to two pages uncacheable
        ADD     lr, sp, #24+12          ; Get original SP
        STR     lr, [sp, #4]            ; Make current page uncacheable
        ASSERT  (SVCStackAddress :AND: ((1<<20)-1)) = 0 ; Assume MB aligned stack
        TST     lr, #(1<<20)-4096       ; Zero if this is the last stack page
        SUBNE   lr, lr, #4096
        STRNE   lr, [sp, #12+4]         ; Make next page uncacheable
        MOVNE   r2, #2
        MOV     r1, sp
        MOVEQ   r2, #1
        BL      MemoryConvertNoFIQCheck ; Bypass FIQ disable logic within OS_Memory (we've already claimed the FIQ vector)
        EXIT

; in:   r0 = 0 (reason code 0, for modify control register)
;       r1 = EOR mask
;       r2 = AND mask
;
;       new control = ((old control AND r2) EOR r1)
;
; out:  r1 = old value
;       r2 = new value
MMUControl_ModifyControl ROUT
        Push    "r0,r3,r4,r5"
        CMP     r1,#0
        CMPEQ   r2,#&FFFFFFFF
        BEQ     MMUC_modcon_readonly
        MOV     r3, r1
        MOV     r1, #Service_ClaimFIQ
        SWI     XOS_ServiceCall         ; stop FIQs for safety
        MOV     r1, r3
        LDR     r3,=ZeroPage
        MRS     r4, CPSR
        CPSID   if                      ; disable IRQs while we modify soft copy (and possibly switch caches off/on)

        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control lr
        AND     r2, r2, lr
        EOR     r2, r2, r1
        MOV     r1, lr

        ; On some CPUs LDREX/STREX only work on cacheable memory. Allowing the
        ; D-cache to be disabled in this situation is likely to result in near-
        ; instant failure of the OS.
        LDR     r5, [r3, #ProcessorFlags]
        TST     r5, #CPUFlag_NoDCacheDisable
        ORRNE   r2, r2, #MMUC_C

        ; If we have multiple cache levels, assume it's split caches ontop of a
        ; unified cache. In which case, having mismatched I+D cache settings can
        ; be pretty dangerous due to the IMB ARMops assuming that cleaning to
        ; PoU is sufficient (D-cache on but I-cache off will fail due to the
        ; instruction fetches bypassing the unified cache, D-cache off but
        ; I-cache on will fail because the I-cache will pull code into the
        ; unified cache which an IMB won't clean)
        ; If we have the ability to disable the L2 cache then this would be OK,
        ; but we can't guarantee that ability
        Push    "r1-r4"
        MOV     r1, #1
        ARMop   Cache_Examine,,,r3
        CMP     r0, #0
        Pull    "r1-r4"
        BEQ     %FT04
        LDR     lr, =MMUC_C+MMUC_I
        TST     r2, lr
        ORRNE   r2, r2, lr              ; If one cache is on, force both on

04
        STR     r2, [r3, #MMUControlSoftCopy]
        BIC     lr, r2, r1              ; lr = bits going from 0->1
        TST     lr, #MMUC_C             ; if cache turning on then flush cache before we do it
        BEQ     %FT05

        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turning on, I-cache invalidate is either necessary (both turning on) or a safe side-effect
        B       %FT10

05
        TST     lr, #MMUC_I
        ARMop   IMB_Full,NE,,r3         ; I-cache turning on, Cache_InvalidateAll could be unsafe

10
        ; If I+D currently enabled, and at least one is turning off, turn off
        ; HAL L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BEQ     %FT11
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BNE     %FT11
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT11
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Deactivate]
        Pull    "r1-r3,r12"
11
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then clean data cache first
        BEQ     %FT15
        ; When disabling the data cache we have the problem that modern ARMs generally ignore unexpected cache hits, so any stack usage between us disabling the cache and finishing the clean + invalidate is very unsafe
        ; Solve this problem by making the current pages of the SVC stack temporarily uncacheable for the duration of the dangerous bit
        ; (n.b. making the current stack page uncacheable has the same problems as turning off the cache globally, but OS_Memory 0 has its own workaround for that)
        MOV     r0, #(1<<9)+(2<<14)
        BL      ModifyStackCacheability
        ARMop   Cache_CleanAll,,,r3
15
        ARM_write_control r2
        myISB   ,lr ; Must be running on >=ARMv6, so perform ISB to ensure CP15 write is complete
        BIC     lr, r1, r2              ; lr = bits going from 1->0
        TST     lr, #MMUC_C             ; if cache turning off then flush cache afterwards
        BEQ     %FT17
        LDR     r3,=ZeroPage
        ARMop   Cache_InvalidateAll,,,r3 ; D-cache turned off, can safely invalidate I+D
        B       %FT19
17
        TST     lr, #MMUC_I
        BEQ     %FT20
        LDR     r3,=ZeroPage
        ARMop   IMB_Full,,,r3           ; Only I-cache which turned off, clean D-cache & invalidate I-cache
19
        ; Undo any stack uncaching we performed above
        BIC     lr, r1, r2
        TST     lr, #MMUC_C
        MOVNE   r0, #(1<<9)+(3<<14)
        BLNE    ModifyStackCacheability
20
        ; If either I+D was disabled, and now both are turned on, turn on HAL
        ; L2 cache
        TST     r1, #MMUC_C
        TSTNE   r1, #MMUC_I
        BNE     %FT30
        TST     r2, #MMUC_C
        TSTNE   r2, #MMUC_I
        BEQ     %FT30
        LDR     r0, [r3, #Cache_HALDevice]
        TEQ     r0, #0
        BEQ     %FT30
        Push    "r1-r3,r12"
        MOV     lr, pc
        LDR     pc, [r0, #HALDevice_Activate]
        Pull    "r1-r3,r12"
30
        MSR     CPSR_c, r4              ; restore IRQ state
        MOV     r3, r1
        MOV     r1, #Service_ReleaseFIQ
        SWI     XOS_ServiceCall         ; allow FIQs again
        MOV     r1, r3
        CLRV
        Pull    "r0,r3,r4,r5,pc"

MMUC_modcon_readonly
        LDR     r3, =ZeroPage
        ; We're ARMv6+, just read the real control reg and ignore the soft copy
        ARM_read_control r1
        STR     r1, [r3, #MMUControlSoftCopy]
        MOV     r2, r1
        Pull    "r0,r3,r4,r5,pc"

 [ CacheablePageTables
; Out: R0 = desired page flags for the page tables
GetPageFlagsForCacheablePageTables ROUT
        ; The ID_MMFR3 register indicates whether the MMU can read from the L1
        ; data cache.
        ; If it can, it means we can use an inner & outer write-back policy.
        ; If it can't, it means the best we can do is inner write-through and
        ; outer write-back (without performing extra cache maintenance, at
        ; least)
        ARM_read_ID r0
        AND     r0, r0, #&F<<16
        CMP     r0, #ARMvF<<16                  ; Check that feature registers are implemented
        BNE     %FT90
        MRC     p15, 0, r0, c0, c1, 7           ; ID_MMFR3
        TST     r0, #&F<<20
        BEQ     %FT90
        ; MMU can read from the L1 cache, so go for default cache policy
        LDR     r0, =AreaFlags_PageTablesAccess
        MOV     pc, lr
90
        ; MMU can't read from the L1 cache, so use inner write-through, outer write-back
        LDR     r0, =AreaFlags_PageTablesAccess :OR: (CP_CB_AlternativeDCache :SHL: DynAreaFlags_CPShift)
        MOV     pc, lr
 ]

      [ LongDesc
        GET     s.VMSAv6Long
        GET     s.LongDesc
      ]
      [ ShortDesc
        GET     s.VMSAv6Short
        GET     s.ShortDesc
      ]

        END