diff --git a/s/ARM600 b/s/ARM600 index 2853c9ab4dc06cc89d8e9c132f5b582bb4966811..63e46b8209a81e1f7e9057a682bee5e824cf07c4 100644 --- a/s/ARM600 +++ b/s/ARM600 @@ -437,13 +437,15 @@ Get1MPTE B %BT50 ; In: -; r0 = L2PT entry +; r0 = page-aligned logical addr ; Out: ; r0,r1 = phys addr ; r2 = page flags ; or -1 if fault ; r3 = page size (bytes) -DecodeL2Entry ROUT +LoadAndDecodeL2Entry ROUT + LDR r1, =L2PT + LDR r0, [r1, r0, LSR #10] ANDS r3, r0, #3 MOVEQ r2, #-1 MOVEQ pc, lr @@ -468,7 +470,7 @@ DecodeL2Entry ROUT MOVLT r3, #65536 MOVGE r3, #4096 20 - ; Common code shared with DecodeL1Entry + ; Common code shared with LoadAndDecodeL1Entry ; Only four PPL possibilities, so just directly decode it ; ARM access goes 0 => all R/O, 1 => user none, 2 => user R/O, 3 => user R/W ; PPL access goes 0 => user R/W, 1 => user R/O, 2 => user none, 3 => all R/0 @@ -502,15 +504,17 @@ DecodeL2Entry ROUT EXIT ; In: -; r0 = L1PT entry +; r0 = MB-aligned logical addr ; Out: ; r0,r1 = phys addr of section or L2PT entry ; r2 = page flags if 1MB page ; or -1 if fault ; or -2 if page table ptr ; r3 = section size (bytes) if section-mapped -DecodeL1Entry +LoadAndDecodeL1Entry ALTENTRY + LDR r1, =L1PT + LDR r0, [r1, r0, LSR #20-2] AND r2, r0, #3 ASSERT L1_Fault < L1_Page ASSERT L1_Page < L1_Section diff --git a/s/HAL b/s/HAL index c594a909e449d0746bd4611b0158eae105c76dc0..e76d873f3bc512a7858331484c45e0101d88c0ec 100644 --- a/s/HAL +++ b/s/HAL @@ -1488,18 +1488,16 @@ ConstructCAMfromPageTables BHI %BT10 MOV v2, #0 ; v2 = logical address - LDR v3, =L1PT ; v3 -> L1PT (not used much) - LDR v4, =L2PT ; v4 -> L2PT -30 LDR a1, [v3, v2, LSR #18] ; a1 = first level descriptor - BL DecodeL1Entry ; a1,a2 = phys addr, a3 = page flags/type, a4 = page size (bytes) +30 MOV a1, v2 + BL LoadAndDecodeL1Entry ; a1,a2 = phys addr, a3 = page flags/type, a4 = page size (bytes) CMP a3, #-2 ; Only care about page table pointers BEQ %FT40 ADDS v2, v2, #&00100000 BCC %BT30 Pull "v1-v8, pc" -40 LDR a1, [v4, v2, LSR #10] ; a1 = second level descriptor - BL DecodeL2Entry ; a1,a2 = phys addr, a3 = flags (-1 if fault), a4 = page size (bytes) +40 MOV a1, v2 + BL LoadAndDecodeL2Entry ; a1,a2 = phys addr, a3 = flags (-1 if fault), a4 = page size (bytes) CMP a3, #-1 ; move to next page if fault BEQ %FT80 SUBS a4, a4, #4096 ; large pages get bits 12-15 from the virtual address diff --git a/s/MemInfo b/s/MemInfo index a2de7965663219479159f7dbbcc3d2df35feabcf..3e77b14b5948122a495c196e09ef9d1ee49aa075 100644 --- a/s/MemInfo +++ b/s/MemInfo @@ -2034,12 +2034,11 @@ CheckMemoryAccess ROUT LDR r4, [r10, #IOAllocPtr] MOV r3, r3, LSL #20 ; Get MB-aligned addr of first entry to check CMP r3, r4 - LDR r7, =L1PT MOVLO r3, r4 ; Skip all the unallocated regions 31 Push "r0-r3" - LDR r0, [r7, r3, LSR #20-2] - BL DecodeL1Entry ; TODO bit wasteful. We only care about access privileges, but this call gives us cache info too. + MOV r0, r3 + BL LoadAndDecodeL1Entry ; TODO bit wasteful. We only care about access privileges, but this call gives us cache info too. LDR r5, [r10, #MMU_PPLAccess] AND lr, r2, #DynAreaFlags_APBits LDR r5, [r5, lr, LSL #2] @@ -2052,21 +2051,22 @@ CheckMemoryAccess ROUT BNE %BT31 40 ; Everything else! - LDR r3, =L1PT + (PhysicalAccess:SHR:18) - LDR r3, [r3] - TEQ r3, #0 - BEQ %FT50 - TST r3, #L1_SS + ASSERT PhysicalAccess >= IO + CMP r1, #HALWorkspace + BHS %FT50 + Push "r0-r2" + LDR r0, =PhysicalAccess + BL LoadAndDecodeL1Entry + CMP r2, #-2 + AND lr, r2, #DynAreaFlags_APBits + Pull "r0-r2" + BHS %FT50 + ADD r4, r3, #PhysicalAccess + LDR r5, [r10, #MMU_PPLAccess] LDR r3, =PhysicalAccess - LDREQ r4, =&100000 ; section mapped - LDRNE r4, =&1000000 ; supersection mapped - ; Assume IO memory mapped there - [ MEMM_Type = "VMSAv6" - LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_Phys - | - LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_PrivXN+CMA_Partially_Phys - ] - BL CMA_AddRange + LDR r5, [r5, lr, LSL #2] + ORR r5, r5, #CMA_Partially_Phys + BL CMA_AddRange2 50 ASSERT HALWorkspace > PhysicalAccess LDR r3, =HALWorkspace @@ -2259,9 +2259,8 @@ CMA_Done BCS %FT45 ; Get the L2PT entry and decode the flags Push "r0-r3" - LDR r8, =L2PT - LDR r0, [r8, r4, LSR #10] - BL DecodeL2Entry ; TODO bit wasteful. We only care about access privileges, but this call gives us cache info too. Also, if we know the L2PT backing exists (it should do) we could skip the logical_to_physical call + MOV r0, r4 + BL LoadAndDecodeL2Entry ; TODO bit wasteful. We only care about access privileges, but this call gives us cache info too. Also, if we know the L2PT backing exists (it should do) we could skip the logical_to_physical call ; r2 = DA flags ; Extract and decode AP LDR r0, =ZeroPage diff --git a/s/VMSAv6 b/s/VMSAv6 index c1d91a6010eae2b32e4317e896a3ef8a12dd2c8e..771547c3c203ed3167fec671e9e1bbb49bf4bb44 100644 --- a/s/VMSAv6 +++ b/s/VMSAv6 @@ -541,13 +541,15 @@ Get1MPTE B %BT50 ; In: -; r0 = L2PT entry +; r0 = page-aligned logical addr ; Out: ; r0,r1 = phys addr ; r2 = page flags ; or -1 if fault ; r3 = page size (bytes) -DecodeL2Entry ROUT +LoadAndDecodeL2Entry ROUT + LDR r1, =L2PT + LDR r0, [r1, r0, LSR #10] TST r0, #3 MOVEQ r2, #-1 MOVEQ pc, lr @@ -614,15 +616,17 @@ DecodeL2Entry ROUT EXIT ; In: -; r0 = L1PT entry +; r0 = MB-aligned logical addr ; Out: ; r0,r1 = phys addr of section or L2PT entry ; r2 = page flags if 1MB page ; or -1 if fault ; or -2 if page table ptr ; r3 = section size (bytes) if section-mapped -DecodeL1Entry +LoadAndDecodeL1Entry ALTENTRY + LDR r1, =L1PT + LDR r0, [r1, r0, LSR #20-2] AND r2, r0, #3 ASSERT L1_Fault < L1_Page ASSERT L1_Page < L1_Section