Commit 03d3b37a authored by Jeffrey Lee's avatar Jeffrey Lee
Browse files

Add OS_Memory 24 implementation. Change OS_ValidateAddress to use it. Fix...

Add OS_Memory 24 implementation. Change OS_ValidateAddress to use it. Fix kernel leaving the physical access MB in a messy state. Try and protect against infinite abort loops caused by bad environment handlers.

  s/MemInfo - Added an implementation of ROL's OS_Memory 24 call. Unlike the old OS_ValidateAddress call, this call should successfully report the presence of all memory areas known to the kernel. It should also correctly indicate which parts of a sparse DA are mapped in, unlike the old OS_ValidateAddress implementation.
  s/ChangeDyn - Update dynamic area handling to construct a lookup table for mapping logical addresses to dynamic areas; this is used by OS_Memory 24 to quickly locate which DA(s) hit a given region
  s/AMBControl/main - Make sure lazy task swapping is marked as disabled when AMB_LazyMapIn is {FALSE} - required so that OS_Memory 24 will give application space the correct flags
  s/ArthurSWIs - Switch OS_ValidateAddress over to using OS_Memory 24, as per ROL. For compatibility, Service_ValidateAddress is still issued for any areas which the kernel doesn't recognise (currently, OS_Memory 24 doesn't issue any service calls itself)
  s/Convrsions - ADR -> ADRL to keep things happy
  s/HAL - Fix L2PT page allocation and RAM clear to release the physical access region once they're done with it
  s/Kernel - Make the error dispatcher validate the error handler code ptr & error buffer using OS_Memory 24 before attempting to use them. If they look bad, reset to default. Should prevent getting stuck in an infinite abort loop in some situations (e.g. as was the case with ticket 279). The system might not fully recover, but it's better than a hard crash.
  s/Middle - Rework data/prefetch/etc. abort handlers so that DumpyTheRegisters can validate the exception dump area via OS_Memory 24 before anything gets written to it. Should also help to prevent some infinite abort loops. Strip 26bit/pre-HAL code to make things a bit more readable.
  hdr/KernelWS - Update comment
  Tested on BB-xM, Raspberry Pi

Version 5.35, Tagged as 'Kernel-5_35-4_79_2_222'
parent af2d7844
......@@ -13,11 +13,11 @@
GBLS Module_ComponentPath
Module_MajorVersion SETS "5.35"
Module_Version SETA 535
Module_MinorVersion SETS ""
Module_Date SETS "19 Apr 2014"
Module_ApplicationDate SETS "19-Apr-14"
Module_MinorVersion SETS ""
Module_Date SETS "20 Apr 2014"
Module_ApplicationDate SETS "20-Apr-14"
Module_ComponentName SETS "Kernel"
Module_ComponentPath SETS "castle/RiscOS/Sources/Kernel"
Module_FullVersion SETS "5.35 ("
Module_HelpVersion SETS "5.35 (19 Apr 2014)"
Module_FullVersion SETS "5.35 ("
Module_HelpVersion SETS "5.35 (20 Apr 2014)"
......@@ -5,19 +5,19 @@
#define Module_MajorVersion_CMHG 5.35
#define Module_MinorVersion_CMHG
#define Module_Date_CMHG 19 Apr 2014
#define Module_MinorVersion_CMHG
#define Module_Date_CMHG 20 Apr 2014
#define Module_MajorVersion "5.35"
#define Module_Version 535
#define Module_MinorVersion ""
#define Module_Date "19 Apr 2014"
#define Module_MinorVersion ""
#define Module_Date "20 Apr 2014"
#define Module_ApplicationDate "19-Apr-14"
#define Module_ApplicationDate "20-Apr-14"
#define Module_ComponentName "Kernel"
#define Module_ComponentPath "castle/RiscOS/Sources/Kernel"
#define Module_FullVersion "5.35 ("
#define Module_HelpVersion "5.35 (19 Apr 2014)"
#define Module_FullVersion "5.35 ("
#define Module_HelpVersion "5.35 (20 Apr 2014)"
#define Module_LibraryVersionInfo "5:35"
......@@ -214,7 +214,7 @@ OldOpt SETA {OPT}
; Dynamic area node format
^ 0
DANode_Link # 4 ; points to next node
DANode_Link # 4 ; points to next node (in address order)
DANode_Number # 4 ; number of this area
DANode_Base # 4 ; base address of area (points in middle of doubly-mapped areas)
DANode_Flags # 4 ; various flags
......@@ -98,6 +98,9 @@ AMBControl_Init
TST R0,#CPUFlag_AbortRestartBroken ;but wait! can't use for bugged chips (eg. pre rev T StrongARM)
MOVNE R1,#AMBFlag_LazyMapIn_disable
MOV R1,#AMBFlag_LazyMapIn_disable
LDR R0,=ZeroPage+AMBControl_ws
STR R12,[R0] ;now initialisation is complete
......@@ -1283,109 +1283,57 @@ FreeSysHeapNode Entry
; return CC for OK, CS for naff
ValidateAddress_Code ROUT
Push "R1, lr"
CMP R0, R1
SUBNE R1, R1, #1 ; cope with zero length range sensibly
LDR R10, =ZeroPage
[ ZeroPage = 0
MOV R11, #0
MOV R11, #ScratchSpace
LDR R12, [R10, #AplWorkSize]
BL RangeCheck
LDR r11, =SVCStackAddress
ADD r12, r11, #SVCStackSize
BL RangeCheck
[ IRQStackAddress <> CursorChunkAddress
LDR r11, =IRQStackAddress
ADD r12, r11, #IRQStackSize
BL RangeCheck
LDR r11, =UNDStackAddress
ADD r12, r11, #UNDStackSize
BL RangeCheck
LDR r11, =ABTStackAddress
ADD r12, r11, #ABTStackSize
BL RangeCheck
! 0, "ValidateAddress - what about CAM and page tables? - strictly should be included"
LDR R11, =CursorChunkAddress
ADD R12, R11, #32*1024
BL RangeCheck
VDWS R12 ; in case of external framestore
LDR R11, [R12, #ScreenEndAddr]
LDR R12, [R12, #TotalScreenSize]
SUB R11, R11, R12
ADD R12, R11, R12, LSL #1 ; doubly-mapped friggage
BL RangeCheck
[ ZeroPage <> 0
MOV r11, r10
ADD r12, r11, #16*1024
BL RangeCheck
LDR r11, =DebuggerSpace
ADD r12, r11, #DebuggerSpace_Size
BL RangeCheck
; not in one of those ranges, so check against dynamic area list
LDR r10, =ZeroPage+DAList
LDR r10, [r10, #DANode_Link]
TEQ r10, #0 ; end of list
LDR r11, [r10, #DANode_Base]
LDR r12, [r10, #DANode_Flags]
TST r12, #DynAreaFlags_DoublyMapped
LDR r12, [r10, #DANode_Size]
SUBNE r11, r11, r12 ; if doubly mapped, move base back by size
MOVNE r12, r12, LSL #1 ; and double size
ADD r12, r12, r11 ; make r12 point at end (exclusive)
CMP r0, r12 ; if start >= end (excl)
BCS %BT10 ; then go onto next node
CMP r0, r11 ; if range starts below this area
BCC %FT20 ; then not totally within this area
CMP r1, r12 ; else if range ends before end+1 of this area
BCC AddressIsValid ; then it's valid
; not in one of those ranges, so issue service so modules can add other valid areas
Push "R2, R3"
MOV R2, R0 ; pass parameters to service in R2 and R3
LDR R3, [stack, #2*4] ; reload stacked R1 into R3
MOV R1, #Service_ValidateAddress
Push "r0-r3, lr"
MOV r2, r1
MOV r1, r0
MOV r0, #24
SWI XOS_Memory
; Pre-RISC OS 3.5, OS_ValidateAddress would return OK if the region was:
; (a) valid RAM in logical address space
; (b) the 2nd mapping of screen memory at the start of physical address space
; (c) anything claimed by Service_ValidateAddress
; Post-RISC OS 3.5, OS_ValidateAddress would return OK if the region was:
; (a) a dynamic area
; (b) screen memory
; (c) most special areas
; (d) anything claimed by Service_ValidateAddress
; RISC OS Select docs suggest that valid regions for their version are:
; (a) dynamic areas, including special areas which have been turned into DAs (e.g. ROM)
; (b) some special areas (e.g. zero page)
; (c) screen memory
; (d) anything claimed by Service_ValidateAddress (example given of sparse DA which uses OS_AbortTrap to map pages on demand)
; (e) NOT physically mapped areas (unless screen memory)
; Taking the above into account, our version will behave as follows:
; (a) anything completely accessible in any mode, which isn't physically mapped - dynamic areas, special areas, ROM, zero page, etc.
; (b) anything completely R/W in user mode, which is completely physically mapped (i.e. screen memory; this check should suffice until we decide on a better way of flagging screen memory/"IO RAM" as valid)
; (c) anything claimed by Service_ValidateAddress
TST r1, #CMA_Partially_Phys
MOVEQ r2, #1
ANDEQ r1, r1, #CMA_Completely_UserR+CMA_Completely_UserW+CMA_Completely_PrivR+CMA_Completely_PrivW
LDRNE r2, =CMA_Completely_UserR+CMA_Completely_UserW+CMA_Completely_Phys
ANDNE r1, r1, r2
CMP r1, r2
BHS AddressIsValid ; EQ case: At least one completely flag set
; NE case: Flags match required value
; OS_Memory check failed, try the service call
LDMIA sp, {r2-r3}
MOV r1, #Service_ValidateAddress
BL Issue_Service
TEQ R1, #0 ; EQ => service claimed, so OK
Pull "R2, R3"
Pull "R1, lr"
TEQ r1, #0 ; EQ => service claimed, so OK
Pull "r0-r3,lr"
ORRNE lr, lr, #C_bit ; return CS if invalid
BICEQ lr, lr, #C_bit ; return CC if valid
RangeCheck ; check R0 - R1 lies totally within R11 - (r12-1)
SUB R12, R12, #1
CMP R0, R11
MOVCC PC, lr ; failed
Pull "R1, lr"
Pull "r0-r3,lr"
BIC lr, lr, #C_bit
......@@ -123,7 +123,6 @@ AP_RAMDisc_SA * 2 ; user none,
AP_Duff * 2 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable ; user none, ~C~B
AP_FreePool * 2 :OR: DynAreaFlags_NotCacheable ; user none, ~CB
AP_CursorChunk * 1 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable :OR: PageFlags_Unavailable
AP_PageZero * 0
AP_L2PT * 2 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable ; user none, ~C~B
AP_L1PT * AP_L2PT :OR: PageFlags_Unavailable
......@@ -196,6 +195,9 @@ DynArea_NullNamePtrMeansHexString SETL {TRUE} :LAND: DynArea_QuickHandles
[ DynArea_QuickHandles
DynArea_MaxNameLength * 31 ;maximum length of DA name, excluding terminator (multiple of 4, -1)
DynArea_NumQHandles * 256 ;maximum no. of non-system quick handles available simultaneously
DynArea_AddrLookupBits * 8 ;LUT covers entire 4G logical space, so 4G>>8 = 16M granularity
DynArea_AddrLookupSize * 1<<(32-DynArea_AddrLookupBits) ; Address space covered by each entry
DynArea_AddrLookupMask * &FFFFFFFF-(DynArea_AddrLookupSize-1)
^ 0,R11
DynArea_TreacleGuess # 4 ;guess for next non-quick handle to allocate, if needed, is TreacleGuess+1
......@@ -227,6 +229,7 @@ DynArea_FreeQHandles # 4 ;index of first free quick h
DynArea_QHandleArray # 4*DynArea_NumQHandles ;1 word per quick handle
; - if free, word = index of next free quick handle (or 0 if none)
; - if used, word = ptr to DANode (must be > DynArea_NumQHandles)
DynArea_AddrLookup # 4<<DynArea_AddrLookupBits ; Lookup table for fast logaddr -> dynarea lookup
DynArea_ws_size * :INDEX:@ ;must be multiple of 4
......@@ -1171,6 +1174,7 @@ DAC_roundup
[ DynArea_QuickHandles
LDR r11, =ZeroPage
LDR r11, [r11, #DynArea_ws]
BL AddDAToAddrLookupTable
;so XOS_ChangeDynamicArea can pick up the node we are still creating
STR r1, DynArea_CreatingHandle
STR r2, DynArea_CreatingPtr
......@@ -1323,6 +1327,39 @@ StringNodeClaimFailed
; Add a dynamic area to the quick address lookup table
; In:
; R2 = DANode ptr
; R11 = DynArea_ws
Entry "r0-r1,r3,r6"
ADRL r0, DynArea_AddrLookup
LDR r1, [r2, #DANode_Flags]
LDR r3, [r2, #DANode_MaxSize]
LDR r6, [r2, #DANode_Base]
TST r1, #DynAreaFlags_DoublyMapped
SUBNE r6, r6, r3 ; Get true start address
MOVNE r3, r3, LSL #1
AND r1, r6, #DynArea_AddrLookupMask ; Round down start address
ADD lr, r6, r3
AND r3, lr, #DynArea_AddrLookupMask
TEQ lr, r3
ADDNE r3, r3, #DynArea_AddrLookupSize ; Round up end address
SUB r3, r3, r1
ADD r0, r0, r1, LSR #30-DynArea_AddrLookupBits
LDR lr, [r0], #4
TEQ lr, #0
STREQ r2, [r0, #-4]
LDR lr, [lr, #DANode_Base]
CMP lr, r6
STRHI r2, [r0, #-4] ; Update LUT if current entry starts after us
SUBS r3, r3, #DynArea_AddrLookupSize
; DynArea_Remove - Remove a dynamic area
......@@ -1387,7 +1424,7 @@ DAR_delink
; delink from sorted list
Push "r7,r8,r11"
Push "r0-r4,r7,r8,r11"
LDR r11, =ZeroPage
LDR r11, [r11, #DynArea_ws]
ADR r8, DynArea_SortedList - DANode_SortLink ;so that [r8, #DANode_SortLink] addresses list header)
......@@ -1401,7 +1438,44 @@ DAR_sdloop
LDR r8, [r8, #DANode_SortLink]
STR r8, [r7, #DANode_SortLink]
Pull "r7,r8,r11"
; Delink from address lookup table
ADRL r0, DynArea_AddrLookup
LDR r1, [r10, #DANode_Flags]
LDR r3, [r10, #DANode_MaxSize]
LDR r2, [r10, #DANode_Base]
TST r1, #DynAreaFlags_DoublyMapped
SUBNE r2, r2, r3 ; Get true start address
MOVNE r3, r3, LSL #1
AND r1, r2, #DynArea_AddrLookupMask ; Round down start address
ADD lr, r2, r3
AND r3, lr, #DynArea_AddrLookupMask
TEQ lr, r3
ADDNE r3, r3, #DynArea_AddrLookupSize ; Round up end address
SUB r3, r3, r1
ADD r0, r0, r1, LSR #30-DynArea_AddrLookupBits
LDR lr, [r0], #4
TEQ lr, r10
BNE DAR_adnext
; Update to point to next DA, or null if next is outside this chunk
LDR lr, [lr, #DANode_Link]
TEQ lr, #0
STREQ lr, [r0, #-4]
BEQ DAR_adnext
LDR r4, [lr, #DANode_Flags]
LDR r2, [lr, #DANode_Base]
TST r4, #DynAreaFlags_DoublyMapped
LDRNE r4, [lr, #DANode_MaxSize]
SUBNE r2, r2, r4
AND r2, r2, #DynArea_AddrLookupMask
TEQ r2, r1
MOVNE lr, #0
STR lr, [r0, #-4]
SUBS r3, r3, #DynArea_AddrLookupSize
ADD r1, r1, #DynArea_AddrLookupSize
BNE DAR_adloop
Pull "r0-r4,r7,r8,r11"
] ;DynArea_QuickHandles
......@@ -3010,6 +3084,16 @@ DynArea_QHinit_loop2
LDR r2, =ZeroPage+FreePoolDANode
STR r2, [r1, #ChangeDyn_FreePool:SHL:2]
; Initialise the address lookup table for the current DA's
; Assumes we have at least one DA to start with!
LDR r2, =ZeroPage+DAList
LDR r2, [r2]
BL AddDAToAddrLookupTable
LDR r2, [r2, #DANode_Link]
TEQ r2, #0
BNE DynArea_AddrLookup_loop
ASSERT ZeroPage = 0
MOV r0, #0
......@@ -579,7 +579,7 @@ BinaryToDecimal_Code ROUT
; now do digits.
01 RSB R0, R0, #0 ; get negative so minint works.
ADR R3, TenTimesTable
ADRL R3, TenTimesTable
MOV R10, #9 ; max entry 10^9
MOV R4, #0 ; non-0 had flag
02 LDR R11, [R3, R10, LSL #2]
......@@ -2012,13 +2012,18 @@ AllocateL2PT
MOVNE a1, v4 ; if not, just access v4
MOVEQ a1, #L1_B ; if so, map in v4
MOVEQ a2, v4
MOVEQ a3, #0
SUBEQ sp, sp, #4
MOVEQ a3, sp
BLEQ RISCOS_AccessPhysicalAddress
MOV a2, #0
MOV a3, #4*1024
BL memset
TEQ v3, #0
LDREQ a1, [sp], #4
BLEQ RISCOS_ReleasePhysicalAddress
MOV a1, v4 ; Map in the L2PT page itself
LDR a2, =L2PT ; (can't recurse, because L2PT
ADD a2, a2, v8, LSL #10 ; backing for L2PT is preallocated)
......@@ -2263,6 +2268,9 @@ ClearPhysRAM ROUT
MOVEQS r11, r11, LSR #12 ; anything left to do?
MOV a1, #L1_Fault
BL RISCOS_ReleasePhysicalAddress ; reset to default
LDR r0, =ZeroPage+InitClearRamWs
LDMIA r0, {r4-r11,r14} ;restore
......@@ -842,6 +842,30 @@ ErrHandler ROUT
BL OscliTidy ; close redirection, restore curr FS
LDR r10, =ZeroPage
Push "r0-r2"
LDR r1, [r10, #ErrHan]
; Check that the error handler points somewhere sensible
; Can be ROM or RAM or pretty much anywhere, but must be user-read
; Also require it to be word aligned, since we don't really support thumb
MOV r0, #24
ADD r2, r1, #4
SWI XOS_Memory
TST r1, #CMA_Completely_UserR
TSTEQ r2, #3
LDR r1, [r10, #ErrBuf]
MOV r0, #24
ADD r2, r1, #256+4
; Must be SVC-writable, user+SVC readable, word aligned
SWI XOS_Memory
AND r1, r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TEQ r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TSTEQ r2, #3
BL DEFHAN ; Restore default error (+escape) handler if the ones we've been given are obviously duff
Pull "r0-r2"
LDR r11, [r10, #ErrBuf] ; Get pointer to error buffer
[ No26bitCode
......@@ -67,6 +67,15 @@ MemReturn
B AccessPhysAddr ; 14
B ReleasePhysAddr ; 15
B MemoryAreaInfo ; 16
B %BT20 ; 17 |
B %BT20 ; 18 |
B %BT20 ; 19 |
B %BT20 ; 20 | Reserved for us
B %BT20 ; 21 |
B %BT20 ; 22 |
B %BT20 ; 23 |
B CheckMemoryAccess ; 24
; 25+ reserved for ROL
......@@ -1198,4 +1207,376 @@ MAI_Kbuffs
LDR r3, =(KbuffsSize + &FFF) :AND: :NOT: &FFF
; In: r0 = flags
; bit meaning
; 0-7 24 (reason code)
; 8-31 reserved (set to 0)
; r1 = low address (inclusive)
; r2 = high address (exclusive)
; Out: r1 = access flags:
; bit 0: completely readable in user mode
; bit 1: completely writable in user mode
; bit 2: completely readable in privileged modes
; bit 3: completely writable in privileged modes
; bit 4: partially readable in user mode
; bit 5: partially writable in user mode
; bit 6: partially readable in privileged modes
; bit 7: partially writable in privileged modes
; bit 8: completely physically mapped (i.e. IO memory)
; bit 9: completely abortable (i.e. custom data abort handler)
; bits 10,11: reserved
; bit 12: partially physically mapped
; bit 13: partially abortable
; bits 14+: reserved
; Return various attributes for the given memory region
CMA_Completely_UserR * 1<<0
CMA_Completely_UserW * 1<<1
CMA_Completely_PrivR * 1<<2
CMA_Completely_PrivW * 1<<3
CMA_Partially_UserR * 1<<4
CMA_Partially_UserW * 1<<5
CMA_Partially_PrivR * 1<<6
CMA_Partially_PrivW * 1<<7
CMA_Completely_Phys * 1<<8
CMA_Completely_Abort * 1<<9
CMA_Partially_Phys * 1<<12
CMA_Partially_Abort * 1<<13
CMA_CheckL2PT * 1<<31 ; Pseudo flag used internally for checking sparse areas
; AP_ equivalents
CMA_ROM * CMA_Partially_UserR+CMA_Partially_PrivR
CMA_Read * CMA_ROM+CMA_Partially_PrivW
CMA_Full * CMA_Read+CMA_Partially_UserW
CMA_None * CMA_Partially_PrivR+CMA_Partially_PrivW
CheckMemoryAccess ROUT
Entry "r0,r2-r10"
CMP r0, #24
LDR r10, =ZeroPage
; Set all the 'completely' flags, we'll clear them as we go along
LDR r0, =&0F0F0F0F
; Make end address inclusive so we don't have to worry so much about
; wrap around at 4G
TEQ r1, r2
SUBNE r2, r2, #1
; Split memory up into five main regions:
; * scratchspace/zeropage
; * application space
; * dynamic areas
; * IO memory
; * special areas (stacks, ROM, HAL workspace, system heap, etc.)
; All ranges are checked in increasing address order, so the
; completeness flags are returned correctly if we happen to cross from
; one range into another
; Note that application space can't currently be checked in DA block as
; (a) it's not linked to DAList/DynArea_AddrLookup
; (b) we need to manually add the abortable flag
CMP r1, #32*1024
; Check zero page
[ ZeroPage = 0
MOV r3, #0
MOV r4, #16*1024
MOV r5, #CMA_Read
BL CMA_AddRange
; DebuggerSpace
ASSERT DebuggerSpace < ScratchSpace
LDR r3, =DebuggerSpace
LDR r4, =(DebuggerSpace_Size + &FFF) :AND: &FFFFF000
MOV r5, #CMA_Read
BL CMA_AddRange
; Scratch space
LDR r3, =ScratchSpace
MOV r4, #16*1024
MOV r5, #CMA_Read
BL CMA_AddRange
; Application space
; Note - checking AplWorkSize as opposed to AplWorkMaxSize to cope with
; software which creates DAs within application space (e.g. Aemulor)
LDR r4, [r10, #AplWorkSize]
CMP r1, r4
LDR r3, [r10, #AMBControl_ws]
LDR r3, [r3, #:INDEX:AMBFlags]
MOV r5, #CMA_Full
TST r3, #AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend
MOV r3, #32*1024
ORREQ r5, r5, #CMA_Partially_Abort
BL CMA_AddRange2
; Dynamic areas
LDR r7, [r10, #IOAllocLimit]
CMP r1, r7
; Look through the quick lookup table until we find a valid DANode ptr
LDR r6, [r10, #DynArea_ws]
MOV r3, r1
ADD r6, r6, #(:INDEX:DynArea_AddrLookup) :AND: &00FF
ADD r6, r6, #(:INDEX:DynArea_AddrLookup) :AND: &FF00
AND r8, r3, #DynArea_AddrLookupMask
LDR r9, [r6, r8, LSR #30-DynArea_AddrLookupBits]
TEQ r9, #0
; Nothing here, skip ahead to next block
ADD r3, r8, #DynArea_AddrLookupSize
CMP r3, r2
BHI %FT90 ; Hit end of search area
CMP r3, r7
; Hit end of DA area and wandered into IO area
B %FT30
; Now that we've found a DA to start from, walk through and process all
; the entries until we hit the system heap (or any other DAs above
; IOAllocLimit)
LDR r3, [r9, #DANode_Base]
LDR r6, [r9, #DANode_Flags]
CMP r3, r7
; Decode AP flags
ANDS lr, r6, #3
MOVEQ r5, #CMA_Full
TEQ lr, #1
MOVEQ r5, #CMA_Read
CMP lr, #2
MOVEQ r5, #CMA_None
TST r6, #DynAreaFlags_SparseMap
LDREQ lr, [r9, #DANode_Size]
LDRNE lr, [r9, #DANode_SparseHWM] ; Use HWM as bounds when checking sparse areas
ORRNE r5, r5, #CMA_CheckL2PT ; ... and request L2PT check
TST r6, #DynAreaFlags_DoublyMapped
ADD r4, r3, lr
SUBNE r3, r3, lr
BL CMA_AddRange2
LDR r9, [r9, #DANode_Link]
TEQ r9, #0
; Hit the end of the list. This shouldn't happen with the current heap setup!
; IO memory
CMP r1, #IO
MOV r3, r1, LSR #20
LDR r4, [r10, #IOAllocPtr]
MOV r3, r3, LSL #20 ; Get MB-aligned addr of first entry to check
CMP r3, r4
LDR r7, =L1PT
MOVLO r3, r4 ; Skip all the unallocated regions
LDR r4, [r7, r3, LSR #20-2]
AND r4, r4, #L1_AP