Commit 96913c1f authored by Ben Avison's avatar Ben Avison Committed by ROOL

Support temporary mapping of IO above 4GB using supersections

Add a new reason code, OS_Memory 22, equivalent to OS_Memory 14, but
accepting a 64-bit physical address in r1/r2. Current ARM architectures can
only express 40-bit or 32-bit physical addresses in their page tables
(depending on whether they feature the LPAE extension or not) so unlike
OS_Memory 14, OS_Memory 22 can return an error if an invalid physical
address has been supplied. OS_Memory 15 should still be used to release a
temporary mapping, whether you claimed it using OS_Memory 14 or OS_Memory 22.

The logical memory map has had to change to accommodate supersection mapping
of the physical access window, which needs to be 16MB wide and aligned to a
16MB boundary. This results in there being 16MB less logical address space
available for dynamic areas on all platforms (sorry) and there is now a 1MB
hole spare in the system address range (above IO).

The internal function RISCOS_AccessPhysicalAddress has been changed to
accept a 64-bit physical address. This function has been a candidate for
adding to the kernel entry points from the HAL for a long time - enough that
it features in the original HAL documentation - but has not been so added
(at least not yet) so there are no API compatibility issues there.

Requires RiscOS/Sources/Programmer/HdrSrc!2
parent 9024d1f6
......@@ -276,7 +276,7 @@ Usage:
void *RISCOS_AccessPhysicalAddress(unsigned int flags, void *phys, void **oldp)
void *RISCOS_AccessPhysicalAddress(unsigned int flags, uint64_t phys, void **oldp)
flags: bit 2 => make memory bufferable
other bits must be zero
......@@ -294,7 +294,7 @@ On exit:
Usage:
Arranges for the physical address phys to be mapped in to logical memory.
In fact, the whole megabyte containing "phys" is mapped in (ie if phys =
In fact, at least the whole megabyte containing "phys" is mapped in (ie if phys =
&12345678, then &12300000 to &123FFFFF become available). The memory is
supervisor access only, non-cacheable, non-bufferable by default, and will
remain available until the next call to RISCOS_Release/AccessPhysicalAddress
......@@ -321,14 +321,16 @@ Usage:
Example:
void *old;
unsigned int *addr = (unsigned int *) 0x80005000;
unsigned int *addr2 = (unsigned int *) 0x90005000;
uint64_t addr_physical = (uint64_t) 0x80005000;
uint64_t addr2_physical = (uint64_t) 0x90005000;
uint32_t *addr_logical;
uint32_t *addr2_logical;
addr = (unsigned int *) RISCOS_AccessPhysicalAddress(addr, &old);
addr[0] = 3; addr[1] = 5;
addr_logical = (unsigned int *) RISCOS_AccessPhysicalAddress(0, addr_physical, &old);
addr_logical[0] = 3; addr_logical[1] = 5;
addr2 = (unsigned int *) RISCOS_AccessPhysicalAddress(addr2, NULL);
*addr2 = 7;
addr2_logical = (unsigned int *) RISCOS_AccessPhysicalAddress(0, addr2_physical, NULL);
*addr2_logical = 7;
RISCOS_ReleasePhysicalAddress(old);
......
......@@ -1044,7 +1044,7 @@ if the virtual address space is exhausted.
As for RISCOS_MapInIO, but accepting a 64-bit physical address argument.
-- void *RISCOS_AccessPhysicalAddress(unsigned int flags, void *phys, void **oldp)
-- void *RISCOS_AccessPhysicalAddress(unsigned int flags, uint64_t phys, void **oldp)
flags: bit 2 => make memory bufferable
other bits must be zero
......@@ -1061,7 +1061,7 @@ On exit:
Returns logical address corresponding to phys
Arranges for the physical address phys to be mapped in to logical memory. In
fact, the whole megabyte containing "phys" is mapped in (ie if phys =
fact, at least the whole megabyte containing "phys" is mapped in (ie if phys =
&12345678, then &12300000 to &123FFFFF become available). The memory is
supervisor access only, non-cacheable, non-bufferable by default, and will
remain available until the next call to RISCOS_Release/AccessPhysicalAddress
......@@ -1085,13 +1085,15 @@ Usage:
Example:
void *old;
unsigned int *addr = (unsigned int *) 0x80005000;
unsigned int *addr2 = (unsigned int *) 0x90005000;
uint64_t addr_physical = (uint64_t) 0x80005000;
uint64_t addr2_physical = (uint64_t) 0x90005000;
uint32_t *addr_logical;
uint32_t *addr2_logical;
addr = (unsigned int *) RISCOS_AccessPhysicalAddress(addr, &old);
addr[0] = 3; addr[1] = 5;
addr_logical = (uint32_t *) RISCOS_AccessPhysicalAddress(0, addr_physical, &old);
addr_logical[0] = 3; addr_logical[1] = 5;
addr2 = (unsigned int *) RISCOS_AccessPhysicalAddress(addr2, NULL);
*addr2 = 7;
addr2_logical = (uint32_t *) RISCOS_AccessPhysicalAddress(0, addr2_physical, NULL);
*addr2_logical = 7;
RISCOS_ReleasePhysicalAddress(old);
......@@ -272,15 +272,16 @@ SysHeapAddress * SysHeapChunkAddress
SysHeapMaxSize * 32:SHL:20
FreePoolAddress * 0
IOLimit * &BA000000 ; initial lower limit on room for IO space (DA creation may move limit up)
IO * &FA000000 ; works downwards
IOLimit * &B9000000 ; initial lower limit on room for IO space (DA creation may move limit up)
IO * &F9000000 ; works downwards
PhysicalAccess * &F9000000 ; 16MB allocation to permit supersection mapping
HALWorkspace * &FA000000
HALWorkspaceSize * &00100000
IRQStackAddress * &FA100000
SVCStackAddress * &FA200000
ABTStackAddress * &FA300000
UNDStackAddress * &FA400000
PhysicalAccess * &FA500000
; &FA500000 spare since PhysicalAddress moved
DCacheCleanAddress * &FA600000 ; eg. for StrongARM, 256k of space, up to FA640000
KbuffsBaseAddress * &FA640000 ; kernel buffers for long command lines, size KbuffsMaxSize
HALWorkspaceNCNB * &FA6E8000 ; 32K of uncacheable HAL workspace (if requested)
......
......@@ -112,6 +112,7 @@ OSMemReason_FindAccessPrivilege * 18 ; Find best AP number from given permiss
OSMemReason_DMAPrep * 19 ; Convert PA <-> LA, perform cache maintenance required for DMA
OSMemReason_Compatibility * 20 ; Get/set compatibility settings
OSMemReason_MapIO64Permanent * 21 ; Map in IO area from 64-bit space
OSMemReason_AccessPhysAddr64 * 22 ; Temporarily map in 64-bit phys addr
OSMemReason_CheckMemoryAccess * 24 ; Return attributes/permissions for a logical address range
; OS_Memory 17/18 permission flags
......
......@@ -5704,7 +5704,8 @@ ReplacePage_DestUnmapped
FastCDA_ProfStart AccessPhysical, r2, r1, lr
MOV r0, #L1_B
LDR r1, [r8, #PageBlockSize+8] ; r1 = physical address of dest for copy
ADD r2, sp, #4
MOV r2, #0
ADD r3, sp, #4
BL RISCOS_AccessPhysicalAddress
; r0 = logical address of dest for copy
FastCDA_ProfEnd AccessPhysical, r2, r1, lr
......@@ -5723,7 +5724,8 @@ ReplacePage_BothUnmapped
FastCDA_ProfStart AccessPhysical, r0, r1, lr
MOV r0, #L1_B
LDR r1, [r8, #8] ; r1 = physical address of src for copy
ADD r2, sp, #8
MOV r2, #0
ADD r3, sp, #8
BL RISCOS_AccessPhysicalAddress
MOV r6, r0 ; r6 = logical address of src for copy
FastCDA_ProfEnd AccessPhysical, r0, r1, lr
......@@ -5745,7 +5747,8 @@ ReplacePage_BothUnmapped
; Now map in dest
MOV r0, #L1_B
LDR r1, [r8, #PageBlockSize+8] ; r1 = physical address of src for copy
MOV r2, #0 ; no oldp needed
MOV r2, #0
MOV r3, #0 ; no oldp needed
BL RISCOS_AccessPhysicalAddress
; r0 = logical address of dest for copy
MOV r6, sp
......@@ -5770,7 +5773,8 @@ ReplacePage_SrcUnmapped
FastCDA_ProfStart AccessPhysical, r0, r1, lr
MOV r0, #L1_B
LDR r1, [r8, #8] ; r1 = physical address of src for copy
ADD r2, sp, #8
MOV r2, #0
ADD r3, sp, #8
BL RISCOS_AccessPhysicalAddress
MOV r6, r0 ; r6 = logical address of src for copy
FastCDA_ProfEnd AccessPhysical, r0, r1, lr
......
......@@ -1811,7 +1811,8 @@ AllocateL2PT ROUT
MOVEQ a1, #L1_B ; if so, map in v4
MOVEQ a2, v4
SUBEQ sp, sp, #4
MOVEQ a3, sp
MOVEQ a3, #0
MOVEQ a4, sp
BLEQ RISCOS_AccessPhysicalAddress
MOV a2, #0
......@@ -1892,7 +1893,7 @@ $lab ANDS $tmp, $flags, #L1_SS
MEND
; void *RISCOS_AccessPhysicalAddress(unsigned int flags, void *addr, void **oldp)
; void *RISCOS_AccessPhysicalAddress(unsigned int flags, uint64_t addr, void **oldp)
RISCOS_AccessPhysicalAddress ROUT
; Only flag user can ask for is bufferable
; Convert to appropriate DA flags
......@@ -1901,30 +1902,62 @@ RISCOS_AccessPhysicalAddress ROUT
LDR a1, =OSAP_None + DynAreaFlags_NotCacheable ; SVC RW, USR none
ORREQ a1, a1, #DynAreaFlags_NotBufferable
RISCOS_AccessPhysicalAddressUnchecked ; well OK then, I trust you know what you're doing
; Check physical address is valid on current CPU
LDR ip, =ZeroPage
Push "a1,v3,lr"
LDR v3, [ip, #PhysIllegalMask]
TEQ v3, #0
BLEQ DeterminePhysIllegalMask
TST a3, v3
BNE %FT90
; Use Get1MPTE to convert DA flags into L1PT section-mapping flags
MOV ip, #0
GetPTE a1, 1M, ip, a1
; Mapping size (section or supersection) depends on address
MOV lr, a2, LSR #20
ORR lr, lr, a3, LSL #12 ; lr = physical megabyte number
TEQ a3, #0
ORRNE a1, a1, #L1_SS ; need to use supersection for such addresses
BIC a2, a2, #&FF000000 ; at most, bits 0-23 are used as offsets into section/supersection
BICNE lr, lr, #&F ; if address >4GB, round mapped address to 16MB (supersection)
BICEQ a2, a2, #&00F00000 ; else no further rounding needed (section) and bits 20-23 are not used as an offset either
ConstructIOPTE v3, lr, a1, ip
LDR ip, =L1PT + (PhysicalAccess:SHR:18) ; ip -> L1PT entry
MOV a4, a2, LSR #20 ; rounded to section
MOV a4, a4, LSL #20
GetPTE a1, 1M, a4, a1 ; a1 = complete descriptor
[ MEMM_Type = "VMSAv6"
ORR a1, a1, #L1_XN ; force non-executable to prevent speculative instruction fetches
ORR v3, v3, #L1_XN ; force non-executable to prevent speculative instruction fetches
]
TEQ a3, #0
LDRNE a4, [ip] ; read old value (if necessary)
STR a1, [ip] ; store new one
STRNE a4, [a3] ; put old one in [oldp]
TEQ a4, #0
LDRNE lr, [ip] ; read old value (if necessary)
STRNE lr, [a4] ; put old one in [oldp]
MOV a4, #15
STR v3, [ip], #4 ; store first of 16 new L1PT entries
TST v3, #L1_SS
MOVEQ v3, #0 ; if supersection mapped then use 16 duplicate entries, else remaining entries unmapped
10 SUBS a4, a4, #1
STR v3, [ip], #4
BNE %BT10
LDR a1, =PhysicalAccess
MOV a3, a2, LSL #12 ; take bottom 20 bits of address
ORR a3, a1, a3, LSR #12 ; and make an offset within PhysicalAccess
Push "a3,lr"
ORR a1, a1, a2
STR a1, [sp]
ARMop MMU_ChangingUncached ; sufficient, cause not cacheable
Pull "a1,pc"
Pull "a1,v3,pc"
90 ; Invalid physical address
ADD sp, sp, #1*4
MOV a1, #0
Pull "v3,pc"
; void RISCOS_ReleasePhysicalAddress(void *old)
RISCOS_ReleasePhysicalAddress
LDR ip, =L1PT + (PhysicalAccess:SHR:18) ; ip -> L1PT entry
STR a1, [ip]
LDR a1, =PhysicalAccess
MOV a4, #15
STR a1, [ip], #4 ; restore first of 16 L1PT entries
TST a1, #L1_SS
MOVEQ a1, #0 ; if supersection mapped then use 16 duplicate entries, else remaining entries unmapped
10 SUBS a4, a4, #1
STR a1, [ip], #4
BNE %BT10
ARMop MMU_ChangingUncached,,tailcall ; sufficient, cause not cacheable
......@@ -2117,6 +2150,7 @@ ClearFreePoolSection ROUT
]
MOV a2, r5
MOV a3, #0
MOV a4, #0
BL RISCOS_AccessPhysicalAddressUnchecked
MOV r4, #0 ; clear to this value
......@@ -2186,6 +2220,7 @@ ClearFreePoolSection ROUT
MOV r0, #L1_B
MOV r1, r10
MOV r2, #0
MOV r3, #0
BL RISCOS_AccessPhysicalAddress
MOV r0, r4
......
......@@ -72,7 +72,7 @@ MemReturn
B DMAPrep ; 19
B ChangeCompatibility ; 20
B MapIO64permanent ; 21
B %BT20 ; 22 reserved for us
B AccessPhysAddr64 ; 22
B %BT20 ; 23 reserved for us
B CheckMemoryAccess ; 24
; 25+ reserved for ROL
......@@ -1011,16 +1011,49 @@ MapIO64permanent
; access calls can be ignored.)
;
AccessPhysAddr ROUT
Push "r0,r1,r12,lr"
TST r0, #&100 ;test bufferable bit
Push "r0-r3,r12,lr"
MOV r2, #0
B %FT10
;----------------------------------------------------------------------------------------
;AccessPhysAddr64 - claim temporary access to given 64-bit physical address (in fact,
; controls access to the 1-16Mb aligned space containing the address)
; The access remains until the next AccessPhysAddr or until a
; ReleasePhysAddr (although interrupts or subroutines may temporarily
; make their own claims, but restore on Release before returning)
;
; In: r0 bits 0..7 = 22 (reason code 22)
; r0 bit 8 = 1 to map bufferable space, 0 for unbufferable
; r0 bits 9..31 = 0 (reserved flags)
; r1,r2 = physical address
;
; Out: r2 = logical address corresponding to phys address r1
; r3 = old state (for ReleasePhysAddr)
;
; Use of multiple accesses: it is fine to make several Access calls, and
; clean up with a single Release at the end. In this case, it is the old state
; (r3) of the *first* Access call that should be passed to Release in order to
; restore the state before any of your accesses. (The r3 values of the other
; access calls can be ignored.)
;
AccessPhysAddr64
Push "r0-r3,r12,lr"
10 TST r0, #&100 ;test bufferable bit
MOVNE r0, #L1_B
MOVEQ r0, #0
SUB sp, sp, #4 ; word for old state
MOV r2, sp ; pointer to word
MOV r3, sp ; pointer to word
BL RISCOS_AccessPhysicalAddress
MOV r2, r0
Pull r3 ; old state
Pull "r0,r1,r12,pc"
MOVS r2, r0 ; null pointer means invalid physical address
LDMIB sp, {r0,r1}
BEQ %FT90
LDR r3, [sp], #5*4 ; load old state, and skip stacked r0-r3
Pull "r12,pc"
90 ADRL r0, ErrorBlock_CantGetPhysMem
SETV
ADD sp, sp, #2*4
Pull "r1-r3,r12,pc"
;----------------------------------------------------------------------------------------
;ReleasePhysAddr - release temporary access that was claimed by AccessPhysAddr
......@@ -2004,6 +2037,23 @@ CheckMemoryAccess ROUT
BNE %BT31
40
; Everything else!
LDR r3, =L1PT + (PhysicalAccess:SHR:18)
LDR r3, [r3]
TEQ r3, #0
BEQ %FT50
TST r3, #L1_SS
LDR r3, =PhysicalAccess
LDREQ r4, =&100000 ; section mapped
LDRNE r4, =&1000000 ; supersection mapped
; Assume IO memory mapped there
[ MEMM_Type = "VMSAv6"
LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_Phys
|
LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_PrivXN+CMA_Partially_Phys
]
BL CMA_AddRange
50
ASSERT HALWorkspace > PhysicalAccess
LDR r3, =HALWorkspace
LDR r4, [r10, #HAL_WsSize]
LDR r5, =CMA_HALWorkspace
......@@ -2028,22 +2078,7 @@ CheckMemoryAccess ROUT
LDR r4, =UNDStackSize
LDR r5, =CMA_UNDStack
BL CMA_AddRange
ASSERT PhysicalAccess > UNDStackAddress
LDR r3, =L1PT + (PhysicalAccess:SHR:18)
LDR r3, [r3]
TEQ r3, #0
BEQ %FT50
LDR r3, =PhysicalAccess
LDR r4, =&100000
; Assume IO memory mapped there
[ MEMM_Type = "VMSAv6"
LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_Phys
|
LDR r5, =CMA_Partially_PrivR+CMA_Partially_PrivW+CMA_Partially_PrivXN+CMA_Partially_Phys
]
BL CMA_AddRange
50
ASSERT DCacheCleanAddress > PhysicalAccess
ASSERT DCacheCleanAddress > UNDStackAddress
LDR r4, =DCacheCleanAddress+DCacheCleanSize
CMP r1, r4
BHS %FT60
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment