; Copyright 2009 Castle Technology Ltd ; ; Licensed under the Apache License, Version 2.0 (the "License"); ; you may not use this file except in compliance with the License. ; You may obtain a copy of the License at ; ; http://www.apache.org/licenses/LICENSE-2.0 ; ; Unless required by applicable law or agreed to in writing, software ; distributed under the License is distributed on an "AS IS" BASIS, ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ; See the License for the specific language governing permissions and ; limitations under the License. ; ; > VMSAv6 GBLL DebugAborts DebugAborts SETL {FALSE} ; MMU interface file - VMSAv6 version ; Created from s.ARM600 by JL 18-Feb-09 ; Make sure we aren't being compiled against a CPU that can't possibly support a VMSAv6 MMU ASSERT :LNOT: NoARMv6 KEEP ; Convert given page flags to the equivalent temp uncacheable L2PT flags MACRO GetTempUncache $out, $pageflags, $pcbtrans, $temp ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10 ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4 ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4 AND $out, $pageflags, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable AND $temp, $pageflags, #DynAreaFlags_CPBits ORR $out, $out, #XCB_TU<<4 ; treat as temp uncacheable ORR $out, $out, $temp, LSR #10-4 LDRB $out, [$pcbtrans, $out, LSR #4] ; convert to X, C and B bits for this CPU MEND TempUncache_L2PTMask * L2_B+L2_C+L2_TEX ; **************** CAM manipulation utility routines *********************************** ; ************************************************************************************** ; ; BangCamUpdate - Update CAM, MMU for page move, coping with page currently mapped in ; ; mjs Oct 2000 ; reworked to use generic ARM ops (vectored to appropriate routines during boot) ; ; First look in the CamEntries table to find the logical address L this physical page is ; currently allocated to. Then check in the Level 2 page tables to see if page L is currently ; at page R2. If it is, then map page L to be inaccessible, otherwise leave page L alone. ; Then map logical page R3 to physical page R2. ; ; in: r2 = physical page number ; r3 = logical address (2nd copy if doubly mapped area) ; r9 = offset from 1st to 2nd copy of doubly mapped area (either source or dest, but not both) ; r11 = PPL + CB bits ; ; out: r0, r1, r4, r6 corrupted ; r2, r3, r5, r7-r12 preserved ; ; NB Use of stack is allowed in this routine BangCamUpdate ROUT TST r11, #DynAreaFlags_DoublyMapped ; if moving page to doubly mapped area SUBNE r3, r3, r9 ; then CAM soft copy holds ptr to 1st copy LDR r1, =ZeroPage LDR r1, [r1, #CamEntriesPointer] ADD r1, r1, r2, LSL #CAM_EntrySizeLog2 ; point at cam entry (logaddr, PPL) ASSERT CAM_LogAddr=0 ASSERT CAM_PageFlags=4 LDMIA r1, {r0, r6} ; r0 = current logaddress, r6 = current PPL BIC r4, r11, #PageFlags_Unsafe STMIA r1, {r3, r4} ; store new address, PPL Push "r0, r6" ; save old logical address, PPL LDR r1, =ZeroPage+PhysRamTable ; go through phys RAM table MOV r6, r2 ; make copy of r2 (since that must be preserved) 10 LDMIA r1!, {r0, r4} ; load next address, size SUBS r6, r6, r4, LSR #12 ; subtract off that many pages BCS %BT10 ; if more than that, go onto next bank ADD r6, r6, r4, LSR #12 ; put back the ones which were too many ADD r0, r0, r6, LSL #12 ; move on address by the number of pages left LDR r6, [sp] ; reload old logical address ; now we have r6 = old logical address, r2 = physical page number, r0 = physical address TEQ r6, r3 ; TMD 19-Jan-94: if old logaddr = new logaddr, then BEQ %FT20 ; don't remove page from where it is, to avoid window ; where page is nowhere. LDR r1, =L2PT ADD r6, r1, r6, LSR #10 ; r6 -> L2PT entry for old log.addr MOV r4, r6, LSR #12 ; r4 = word offset into L2 for address r6 LDR r4, [r1, r4, LSL #2] ; r4 = L2PT entry for L2PT entry for old log.addr TST r4, #3 ; if page not there BEQ %FT20 ; then no point in trying to remove it LDR r4, [r6] ; r4 = L2PT entry for old log.addr MOV r4, r4, LSR #12 ; r4 = physical address for old log.addr TEQ r4, r0, LSR #12 ; if equal to physical address of page being moved BNE %FT20 ; if not there, then just put in new page AND r4, r11, #PageFlags_Unsafe Push "r0, r3, r11, r14" ; save phys.addr, new log.addr, new PPL, lr ADD r3, sp, #4*4 LDMIA r3, {r3, r11} ; reload old logical address, old PPL LDR r0, =DuffEntry ; Nothing to do if wasn't mapped in ORR r11, r11, r4 TEQ r3, r0 MOV r0, #0 ; cause translation fault BLNE BangL2PT ; map page out Pull "r0, r3, r11, r14" 20 ADD sp, sp, #8 ; junk old logical address, PPL B BangCamAltEntry ; and branch into BangCam code ; ************************************************************************************** ; ; BangCam - Update CAM, MMU for page move, assuming page currently mapped out ; ; This routine maps a physical page to a given logical address ; It is assumed that the physical page is currently not mapped anywhere else ; ; in: r2 = physical page number ; r3 = logical address (2nd copy if doubly mapped) ; r9 = offset from 1st to 2nd copy of doubly mapped area (either source or dest, but not both) ; r11 = PPL ; ; out: r0, r1, r4, r6 corrupted ; r2, r3, r5, r7-r12 preserved ; ; NB Can't use stack - there might not be one! ; ; NB Also - the physical page number MUST be in range. ; This routine must work in 32-bit mode BangCam ROUT TST r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped SUBNE r3, r3, r9 ; then move ptr to 1st copy LDR r1, =ZeroPage+PhysRamTable ; go through phys RAM table MOV r6, r2 ; make copy of r2 (since that must be preserved) 10 LDMIA r1!, {r0, r4} ; load next address, size SUBS r6, r6, r4, LSR #12 ; subtract off that many pages BCS %BT10 ; if more than that, go onto next bank ADD r6, r6, r4, LSR #12 ; put back the ones which were too many ADD r0, r0, r6, LSL #12 ; move on address by the number of pages left BangCamAltEntry LDR r4, =DuffEntry ; check for requests to map a page to nowhere ADR r1, PPLTrans TEQ r4, r3 ; don't actually map anything to nowhere MOVEQ pc, lr AND r4, r11, #3 ; first use PPL bits LDR r1, [r1, r4, LSL #2] ; get PPL bits and SmallPage indicator ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10 ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4 ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4 ORR r0, r0, r1 LDR r6, =ZeroPage LDR r6, [r6, #MMU_PCBTrans] TST r11, #PageFlags_TempUncacheableBits AND r1, r11, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable AND r4, r11, #DynAreaFlags_CPBits ORRNE r1, r1, #XCB_TU<<4 ; if temp uncache, set TU bit ORR r1, r1, r4, LSR #10-4 LDRB r1, [r6, r1, LSR #4] ; convert to X, C and B bits for this CPU ORR r0, r0, r1 LDR r1, =L2PT ; point to level 2 page tables ;fall through to BangL2PT ;internal entry point for updating L2PT entry ; ; entry: r0 = new L2PT value, r1 -> L2PT, r3 = logical address (4k aligned), r11 = PPL ; ; exit: r0,r1,r4,r6 corrupted ; BangL2PT ; internal entry point used only by BangCamUpdate Push "lr" MOV r6, r0 TST r11, #PageFlags_Unsafe BNE BangL2PT_unsafe TST r11, #DynAreaFlags_DoublyMapped BNE BangL2PT_sledgehammer ;if doubly mapped, don't try to be clever ;In order to safely map out a cacheable page and remove it from the ;cache, we need to perform the following process: ;* Make the page uncacheable ;* Flush TLB ;* Clean+invalidate cache ;* Write new mapping (r6) ;* Flush TLB ;For uncacheable pages we can just do the last two steps ; TEQ r6, #0 ;EQ if mapping out TSTEQ r11, #DynAreaFlags_NotCacheable ;EQ if also cacheable (overcautious for temp uncache+illegal PCB combos) LDR r4, =ZeroPage BNE %FT20 ; Potentially we could just map as strongly-ordered + XN here ; But for safety just go for temp uncacheable (will retain memory type + shareability) LDR lr, [r4, #MMU_PCBTrans] GetTempUncache r0, r11, lr, r4 LDR lr, [r1, r3, LSR #10] ;get current L2PT entry LDR r4, =TempUncache_L2PTMask BIC lr, lr, r4 ;remove current attributes ORR lr, lr, r0 STR lr, [r1, r3, LSR #10] ;Make uncacheable LDR r4, =ZeroPage MOV r0, r3 ARMop MMU_ChangingUncachedEntry,,, r4 ; TLB flush MOV r0, r3 ADD r1, r3, #4096 ARMop Cache_CleanInvalidateRange,,, r4 ; Cache flush LDR r1, =L2PT 20 STR r6, [r1, r3, LSR #10] ;update L2PT entry Pull "lr" MOV r0, r3 ARMop MMU_ChangingUncachedEntry,,tailcall,r4 BangL2PT_sledgehammer ;sledgehammer is super cautious and does cache/TLB coherency on a global basis ;should only be used for awkward cases ; TEQ r6, #0 ;EQ if mapping out TSTEQ r11, #DynAreaFlags_NotCacheable ;EQ if also cacheable (overcautious for temp uncache+illegal PCB combos) ADR lr, %FT30 LDR r4, =ZeroPage ARMop MMU_Changing, EQ, tailcall, r4 ARMop MMU_ChangingUncached, NE, tailcall, r4 30 STR r6, [r1, r3, LSR #10]! ; update level 2 page table (and update pointer so we can use bank-to-bank offset TST r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped STRNE r6, [r1, r9, LSR #10] ; then store entry for 2nd copy as well ADDNE r3, r3, r9 ; and point logical address back at 2nd copy ; In order to guarantee that the result of a page table write is ; visible, the ARMv6+ memory order model requires us to perform TLB ; maintenance (equivalent to the MMU_ChangingUncached ARMop) after we've ; performed the write. Performing the maintenance beforehand (as we've ; done traditionally) will work most of the time, but not always. Pull "lr" ARMop MMU_ChangingUncached,,tailcall,r4 BangL2PT_unsafe STR r6, [r1, r3, LSR #10]! ; update level 2 page table (and update pointer so we can use bank-to-bank offset TST r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped STRNE r6, [r1, r9, LSR #10] ; then store entry for 2nd copy as well ADDNE r3, r3, r9 ; and point logical address back at 2nd copy Pull "pc" PPLTransL1 & (AP_Full * L1_APMult) + L1_Section ; R any W any & (AP_Read * L1_APMult) + L1_Section ; R any W sup & (AP_None * L1_APMult) + L1_Section ; R sup W sup & (AP_ROM * L1_APMult) + L1_Section ; R any W none PPLTrans & (AP_Full * L2X_APMult) + L2_ExtPage ; R any W any & (AP_Read * L2X_APMult) + L2_ExtPage ; R any W sup & (AP_None * L2X_APMult) + L2_ExtPage ; R sup W sup & (AP_ROM * L2X_APMult) + L2_ExtPage ; R any W none PPLTransLarge & (AP_Full * L2_APMult) + L2_LargePage ; R any W any & (AP_Read * L2_APMult) + L2_LargePage ; R any W sup & (AP_None * L2_APMult) + L2_LargePage ; R sup W sup & (AP_ROM * L2_APMult) + L2_LargePage ; R any W none PageShifts = 12, 13, 0, 14 ; 1 2 3 4 = 0, 0, 0, 15 ; 5 6 7 8 ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ; SWI OS_UpdateMEMC: Read/write MEMC1 control register SSETMEMC ROUT AND r10, r0, r1 LDR r12, =ZeroPage WritePSRc SVC_mode+I_bit+F_bit, r0 LDR r0, [r12, #MEMC_CR_SoftCopy] ; return old value BIC r11, r0, r1 ORR r11, r11, R10 BIC r11, r11, #&FF000000 BIC r11, r11, #&00F00000 ORR r11, r11, #MEMCADR STR r11, [r12, #MEMC_CR_SoftCopy] ; mjs Oct 2000 kernel/HAL split ; ; The kernel itself should now never call this SWI, but grudgingly has ; to maintain at least bit 10 of soft copy ; ; Here, we only mimic action of bit 10 to control video/cursor DMA (eg. for ADFS) ; The whole OS_UpdateMEMC thing would ideally be withdrawn as archaic, but ; unfortunately has not even been deprecated up to now ; for reference, the bits of the MEMC1 control register are: ; ; bits 0,1 => unused ; bits 2,3 => page size, irrelevant since always 4K ; bits 4,5 => low ROM access time (mostly irrelevant but set it up anyway) ; bits 6,7 => hi ROM access time (definitely irrelevant but set it up anyway) ; bits 8,9 => DRAM refresh control ; bit 10 => Video/cursor DMA enable ; bit 11 => Sound DMA enable ; bit 12 => OS mode Push "r0,r1,r4, r14" TST r11, #(1 :SHL: 10) MOVEQ r0, #1 ; blank (video DMA disable) MOVNE r0, #0 ; unblank (video DMA enable) MOV r1, #0 ; no funny business with DPMS ADD r4, r12, #VduDriverWorkSpace LDR r4, [r4, #CurrentGraphicsVDriver] MOV r4, r4, LSL #24 ORR r4, r4, #GraphicsV_SetBlank BL CallGraphicsV Pull "r0,r1,r4, r14" WritePSRc SVC_mode+I_bit, r11 ExitSWIHandler LTORG ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ; ; SWI OS_MMUControl ; ; in: r0 = 0 (reason code 0, for modify control register) ; r1 = EOR mask ; r2 = AND mask ; ; new control = ((old control AND r2) EOR r1) ; ; out: r1 = old value ; r2 = new value ; ; in: r0 bits 1 to 28 = 0, bit 0 = 1 (reason code 1, for flush request) ; r0 bit 31 set if cache(s) to be flushed ; r0 bit 30 set if TLB(s) to be flushed ; r0 bit 29 set if flush of entry only (else whole flush) ; r0 bit 28 set if write buffer to be flushed (implied by bit 31) ; r1 = entry specifier, if r0 bit 29 set ; (currently, flushing by entry is ignored, and just does full flush) ; ; in: r0 bits 0-7 = 2: reason code 2, read ARMop ; r0 bits 15-8 = ARMop index ; ; out: r0 = ARMop function ptr ; MMUControlSWI Entry BL MMUControlSub PullEnv ORRVS lr, lr, #V_bit ExitSWIHandler MMUControlSub Push lr AND lr,r0,#&FF CMP lr, #MMUCReason_Unknown ADDCC pc, pc, lr, LSL #2 B MMUControl_Unknown B MMUControl_ModifyControl B MMUControl_Flush B MMUControl_GetARMop MMUControl_Unknown ADRL r0, ErrorBlock_HeapBadReason [ International BL TranslateError | SETV ] Pull "pc" ; Make current stack page(s) temporarily uncacheable to make cache disable operations safer ; In: R0 = OS_Memory 0 flags ModifyStackCacheability Entry "r1-r2", 24 ; Make up to two pages uncacheable ADD lr, sp, #24+12 ; Get original SP STR lr, [sp, #4] ; Make current page uncacheable ASSERT (SVCStackAddress :AND: ((1<<20)-1)) = 0 ; Assume MB aligned stack TST lr, #(1<<20)-4096 ; Zero if this is the last stack page SUBNE lr, lr, #4096 STRNE lr, [sp, #12+4] ; Make next page uncacheable MOVNE r2, #2 MOV r1, sp MOVEQ r2, #1 BL MemoryConvertNoFIQCheck ; Bypass FIQ disable logic within OS_Memory (we've already claimed the FIQ vector) EXIT MMUControl_ModifyControl ROUT Push "r0,r3,r4,r5" CMP r1,#0 CMPEQ r2,#&FFFFFFFF BEQ MMUC_modcon_readonly MOV r3, r1 MOV r1, #Service_ClaimFIQ SWI XOS_ServiceCall ; stop FIQs for safety MOV r1, r3 LDR r3,=ZeroPage MRS r4, CPSR CPSID if ; disable IRQs while we modify soft copy (and possibly switch caches off/on) ; We're ARMv6+, just read the real control reg and ignore the soft copy ARM_read_control lr AND r2, r2, lr EOR r2, r2, r1 MOV r1, lr LDR r5, [r3, #ProcessorFlags] TST r5, #CPUFlag_SplitCache BEQ %FT05 05 STR r2, [r3, #MMUControlSoftCopy] BIC lr, r2, r1 ; lr = bits going from 0->1 TST lr, #MMUC_C ; if cache turning on then flush cache before we do it TSTEQ lr, #MMUC_I BEQ %FT10 ARMop Cache_InvalidateAll,,,r3 10 ; If I+D currently enabled, and at least one is turning off, turn off ; HAL L2 cache TST r1, #MMUC_C TSTNE r1, #MMUC_I BEQ %FT11 TST r2, #MMUC_C TSTNE r2, #MMUC_I BNE %FT11 LDR r0, [r3, #Cache_HALDevice] TEQ r0, #0 BEQ %FT11 Push "r1-r3,r12" MOV lr, pc LDR pc, [r0, #HALDevice_Deactivate] Pull "r1-r3,r12" 11 BIC lr, r1, r2 ; lr = bits going from 1->0 TST lr, #MMUC_C ; if cache turning off then clean data cache first BEQ %FT15 ; When disabling the data cache we have the problem that modern ARMs generally ignore unexpected cache hits, so any stack usage between us disabling the cache and finishing the clean + invalidate is very unsafe ; Solve this problem by making the current pages of the SVC stack temporarily uncacheable for the duration of the dangerous bit ; (n.b. making the current stack page uncacheable has the same problems as turning off the cache globally, but OS_Memory 0 has its own workaround for that) MOV r0, #(1<<9)+(2<<14) BL ModifyStackCacheability ARMop Cache_CleanAll,,,r3 15 ARM_write_control r2 myISB ,lr ; Must be running on >=ARMv6, so perform ISB to ensure CP15 write is complete BIC lr, r1, r2 ; lr = bits going from 1->0 TST lr, #MMUC_C ; if cache turning off then flush cache afterwards TSTNE lr, #MMUC_I BEQ %FT20 LDR r3,=ZeroPage ARMop Cache_InvalidateAll,,,r3 ; Undo any stack uncaching we performed above BIC lr, r1, r2 TST lr, #MMUC_C MOVNE r0, #(1<<9)+(3<<14) BLNE ModifyStackCacheability 20 ; If either I+D was disabled, and now both are turned on, turn on HAL ; L2 cache TST r1, #MMUC_C TSTNE r1, #MMUC_I BNE %FT30 TST r2, #MMUC_C TSTNE r2, #MMUC_I BEQ %FT30 LDR r0, [r3, #Cache_HALDevice] TEQ r0, #0 BEQ %FT30 Push "r1-r3,r12" MOV lr, pc LDR pc, [r0, #HALDevice_Activate] Pull "r1-r3,r12" 30 MSR CPSR_c, r4 ; restore IRQ state MOV r3, r1 MOV r1, #Service_ReleaseFIQ SWI XOS_ServiceCall ; allow FIQs again MOV r1, r3 CLRV Pull "r0,r3,r4,r5,pc" MMUC_modcon_readonly LDR r3, =ZeroPage ; We're ARMv6+, just read the real control reg and ignore the soft copy ARM_read_control r1 STR r1, [r3, #MMUControlSoftCopy] MOV r2, r1 Pull "r0,r3,r4,r5,pc" MMUControl_Flush MOVS r10, r0 LDR r12, =ZeroPage ARMop Cache_CleanInvalidateAll,MI,,r12 TST r10,#&40000000 ARMop TLB_InvalidateAll,NE,,r12 TST r10,#&10000000 ARMop DSB_ReadWrite,NE,,r12 ADDS r0,r10,#0 Pull "pc" MMUControl_GetARMop AND r0, r0, #&FF00 CMP r0, #(ARMopPtrTable_End-ARMopPtrTable):SHL:6 BHS MMUControl_Unknown ADRL lr, ARMopPtrTable LDR r0, [lr, r0, LSR #6] LDR r0, [r0] Pull "pc" ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ; ; Exception veneers [ ChocolateAMB ; Instruction fetch abort pre-veneer, just to field possible lazy AMB aborts ; PAbPreVeneer ROUT Push "r0-r7, lr" ; wahey, we have an abort stack SUB r0, lr_abort, #4 ; aborting address MOV r2, #1 BL AMB_LazyFixUp ; can trash r0-r7, returns NE status if claimed and fixed up ; DSB + ISB required to ensure effect of page table write is fully ; visible (after overwriting a faulting entry) myDSB NE,r0 myISB NE,r0,,y Pull "r0-r7, lr", NE ; restore regs and SUBNES pc, lr_abort, #4 ; restart aborting instruction if fixed up LDR lr, [sp, #8*4] ; (not a lazy abort) restore lr LDR r0, =ZeroPage+PAbHan ; we want to jump to PAb handler, in abort mode LDR r0, [r0] STR r0, [sp, #8*4] Pull "r0-r7, pc" ] ; Preliminary layout of abort indirection nodes ^ 0 AI_Link # 4 AI_Low # 4 AI_High # 4 AI_WS # 4 AI_Addr # 4 EXPORT DAbPreVeneer DAbPreVeneer ROUT SUB r13_abort, r13_abort, #17*4 ; we use stacks, dontcherknow STMIA r13_abort, {r0-r7} ; save unbanked registers anyway STR lr_abort, [r13_abort, #15*4] ; save old PC, ie instruction address MyCLREX r0, r1 ; Exclusive monitor is in unpredictable state "after taking a data abort", clear it here ; Fixup code for MVA-based cache/TLB ops, which can abort on ARMv7 if the specified MVA doesn't have a mapping. ; Must come before AMBControl, else things can go very wrong during OS_ChangeDynamicArea ; MVA cache ops have the form coproc=p15, CRn=c7, opc1=0, opc2=1 ; MVA TLB ops have the form coproc=p15, CRn=c8, opc1=0, opc2=1 ; Note that some non-MVA ops also follow the above rules - at the moment we make no attempt to filter those false-positives out ; This code is also written from the perspective of running on an ARMv7 CPU - behaviour under ARMv6 hasn't been checked! ; Also, as wrong as it seems, attempting to load the aborting instruction could trigger an abort (something wrong with the prefetch handler? or cache flushes not being done properly?) ; So this code must protect DFAR, DFSR, spsr_abort, and lr_abort from being clobbered ; We also need to be careful about how AMBControl will react to the abort: ; If DFAR and lr_abort both point to the same page, when we try loading the instruction (and it triggers an abort), AMBControl will map in the page ; So when control returns to us (and we determine that it wasn't an MVA op) AMBControl will be called again (for DFAR), see that the page is mapped in, and claim that it's a real abort instead of the lazy fixup that it really is ; (The workaround for that issue probably makes the DFAR, DFSR, spsr_abort, lr_abort saving irrelevant, but it's better to be safe than sorry) CMP lr, #AplWorkMaxSize ; Assume that MVA ops won't come from application space (cheap workaround for above-mentioned AMBControl issue) BLO %FT10 MRS r1, SPSR TST r1, #T32_bit BNE %FT10 ; We don't cope with Thumb ATM. Should really check for Jazelle too! MOV r2, lr ; LR is already saved on the stack, but we can't load from it because any recursive abort won't have a clue what address we're trying to access. ; Protect DFAR, DFSR ARM_read_FAR r3 ARM_read_FSR r4 LDR r0, [r2, #-8] ; Get aborting instruction MSR SPSR_cxsf, r1 ; un-clobber SPSR, FAR, FSR ARM_write_FAR r3 ARM_write_FSR r4 CMP r0, #&F0000000 BHS %FT10 ; Ignore cc=NV, which is MCR2 encoding BIC r0, r0, #&F000000F ; Mask out the uninteresting bits BIC r0, r0, #&0000F000 EOR r0, r0, #&0E000000 ; Desired value, minus CRn EOR r0, r0, #&00000F30 CMP r0, #&00070000 ; CRn=c7? CMPNE r0, #&00080000 ; CRn=c8? BNE %FT10 ; It's not an MVA-based op MOV lr_abort, r2 ; un-clobber LR (doesn't need un-clobbering if it wasn't an MVA op) LDMIA r13_abort, {r0-r4} ; Restore the regs we intentionally clobbered ADD r13_abort, r13_abort, #17*4 SUBS pc, lr_abort, #4 ; Resume execution at the next instruction 10 [ ChocolateAMB ARM_read_FAR r0 ; aborting address MOV r2, #0 BL AMB_LazyFixUp ; can trash r0-r7, returns NE status if claimed and fixed up ; DSB + ISB required to ensure effect of page table write is fully ; visible (after overwriting a faulting entry) myDSB NE,r0 myISB NE,r0,,y LDR lr_abort, [r13_abort, #15*4] ; restore lr_abort LDMIA r13_abort, {r0-r7} ; restore regs ADDNE r13_abort, r13_abort, #17*4 ; if fixed up, restore r13_abort SUBNES pc, lr_abort, #8 ; and restart aborting instruction ] MRS r0, SPSR ; r0 = PSR when we aborted MRS r1, CPSR ; r1 = CPSR ADD r2, r13_abort, #8*4 ; r2 -> saved register bank for r8 onwards LDR r4, =ZeroPage+Abort32_dumparea+3*4 ;use temp area (avoid overwriting main area for expected aborts) ARM_read_FAR r3 STMIA r4, {r0,r3,lr_abort} ; dump 32-bit PSR, fault address, 32-bit PC MOV r4, lr_abort ; move address of aborting instruction into an unbanked register BIC r1, r1, #&1F ; knock out current mode bits ANDS r3, r0, #&1F ; extract old mode bits (and test for USR26_mode (=0)) TEQNE r3, #USR32_mode ; if usr26 or usr32 then use ^ to store registers STMEQIA r2, {r8-r14}^ BEQ %FT05 ORR r3, r3, r1 ; and put in user's MSR CPSR_c, r3 ; switch to user's mode STMIA r2, {r8-r12} ; save the banked registers STR r13, [r2,#5*4] STR r14, [r2,#6*4] MRS r5, SPSR ; get the SPSR for the aborter's mode STR r5, [r2, #8*4] ; and store away in the spare slot on the end ; (this is needed for LDM with PC and ^) ORR r1, r1, #ABT32_mode MSR CPSR_c, r1 ; back to abort mode for the rest of this 05 Push "r0" ; save SPSR_abort SUB sp, sp, #8*4 ; make room for r8_usr to r14_usr and PC STMIA sp, {r8-r15}^ ; save USR bank in case STM ^, and also so we can corrupt them SUB r11, r2, #8*4 ; r11 -> register bank STR r4, [sp, #7*4] ; store aborter's PC in user register bank ; Call normal exception handler 90 ; copy temp area to real area (we believe this is an unexpected data abort now) LDR r0, =ZeroPage+Abort32_dumparea LDR r1, [r0,#3*4] STR r1, [r0] LDR r1, [r0,#4*4] STR r1, [r0,#4] LDR r1, [r0,#5*4] STR r1, [r0,#2*4] LDR r0, =ZeroPage ; we're going to call abort handler [ ZeroPage = 0 STR r0, [r0, #CDASemaphore] ; so allow recovery if we were in CDA | MOV r2, #0 STR r2, [r0, #CDASemaphore] ; so allow recovery if we were in CDA ] LDR r0, [r0, #DAbHan] ; get address of data abort handler [ DebugAborts DREG r0, "Handler address = " ] ADD r2, r11, #8*4 ; point r2 at 2nd half of main register bank LDMIA sp, {r8-r14}^ ; reload user bank registers NOP ; don't access banked registers after LDM^ ADD sp, sp, #9*4 ; junk user bank stack frame + saved SPSR MRS r1, CPSR MRS r6, SPSR ; get original SPSR, with aborter's original mode AND r7, r6, #&0F TEQ r7, #USR26_mode ; also matches USR32 LDMEQIA r2, {r8-r14}^ ; if user mode then just use ^ to reload registers NOP BEQ %FT80 ORR r6, r6, #I32_bit ; use aborter's flags and mode but set I BIC r6, r6, #T32_bit ; and don't set Thumb MSR CPSR_c, r6 ; switch to aborter's mode LDMIA r2, {r8-r12} ; reload banked registers LDR r13, [r2, #5*4] LDR r14, [r2, #6*4] MSR CPSR_c, r1 ; switch back to ABT32 80 STR r0, [r13_abort, #16*4] ; save handler address at top of stack LDR lr_abort, [r13_abort, #15*4] ; get abort address back in R14 LDMIA r13_abort, {r0-r7} ; reload r0-r7 ADD r13_abort, r13_abort, #16*4 ; we use stacks, dontcherknow Pull pc ; ; ---------------- XOS_SynchroniseCodeAreas implementation --------------- ; ;this SWI effectively implements IMB and IMBrange (Instruction Memory Barrier) ;for newer ARMs ;entry: ; R0 = flags ; bit 0 set -> R1,R2 specify virtual address range to synchronise ; R1 = start address (word aligned, inclusive) ; R2 = end address (word aligned, inclusive) ; bit 0 clear synchronise entire virtual space ; bits 1..31 reserved ; ;exit: ; R0-R2 preserved ; SyncCodeAreasSWI ROUT Push "lr" BL SyncCodeAreas Pull "lr" ; no error return possible B SLVK SyncCodeAreas TST R0,#1 ; range variant of SWI? BEQ SyncCodeAreasFull SyncCodeAreasRange Push "r0-r2, lr" MOV r0, r1 ADD r1, r2, #4 ;exclusive end address LDR r2, =ZeroPage LDRB lr, [r2, #Cache_Type] CMP lr, #CT_ctype_WB_CR7_Lx ; DCache_LineLen lin or log? LDRB lr, [r2, #DCache_LineLen] MOVEQ r2, #4 MOVEQ lr, r2, LSL lr LDREQ r2, =ZeroPage SUB lr, lr, #1 ADD r1, r1, lr ;rounding up end address MVN lr, lr AND r0, r0, lr ;cache line aligned AND r1, r1, lr ;cache line aligned ARMop IMB_Range,,,r2 Pull "r0-r2, pc" SyncCodeAreasFull Push "r0, lr" LDR r0, =ZeroPage ARMop IMB_Full,,,r0 Pull "r0, pc" LTORG [ DebugAborts InsertDebugRoutines ] END