Commit 03d3b37a authored by Jeffrey Lee's avatar Jeffrey Lee
Browse files

Add OS_Memory 24 implementation. Change OS_ValidateAddress to use it. Fix...

Add OS_Memory 24 implementation. Change OS_ValidateAddress to use it. Fix kernel leaving the physical access MB in a messy state. Try and protect against infinite abort loops caused by bad environment handlers.

Detail:
  s/MemInfo - Added an implementation of ROL's OS_Memory 24 call. Unlike the old OS_ValidateAddress call, this call should successfully report the presence of all memory areas known to the kernel. It should also correctly indicate which parts of a sparse DA are mapped in, unlike the old OS_ValidateAddress implementation.
  s/ChangeDyn - Update dynamic area handling to construct a lookup table for mapping logical addresses to dynamic areas; this is used by OS_Memory 24 to quickly locate which DA(s) hit a given region
  s/AMBControl/main - Make sure lazy task swapping is marked as disabled when AMB_LazyMapIn is {FALSE} - required so that OS_Memory 24 will give application space the correct flags
  s/ArthurSWIs - Switch OS_ValidateAddress over to using OS_Memory 24, as per ROL. For compatibili...
parent af2d7844
......@@ -13,11 +13,11 @@
GBLS Module_ComponentPath
Module_MajorVersion SETS "5.35"
Module_Version SETA 535
Module_MinorVersion SETS "4.79.2.221"
Module_Date SETS "19 Apr 2014"
Module_ApplicationDate SETS "19-Apr-14"
Module_MinorVersion SETS "4.79.2.222"
Module_Date SETS "20 Apr 2014"
Module_ApplicationDate SETS "20-Apr-14"
Module_ComponentName SETS "Kernel"
Module_ComponentPath SETS "castle/RiscOS/Sources/Kernel"
Module_FullVersion SETS "5.35 (4.79.2.221)"
Module_HelpVersion SETS "5.35 (19 Apr 2014) 4.79.2.221"
Module_FullVersion SETS "5.35 (4.79.2.222)"
Module_HelpVersion SETS "5.35 (20 Apr 2014) 4.79.2.222"
END
......@@ -5,19 +5,19 @@
*
*/
#define Module_MajorVersion_CMHG 5.35
#define Module_MinorVersion_CMHG 4.79.2.221
#define Module_Date_CMHG 19 Apr 2014
#define Module_MinorVersion_CMHG 4.79.2.222
#define Module_Date_CMHG 20 Apr 2014
#define Module_MajorVersion "5.35"
#define Module_Version 535
#define Module_MinorVersion "4.79.2.221"
#define Module_Date "19 Apr 2014"
#define Module_MinorVersion "4.79.2.222"
#define Module_Date "20 Apr 2014"
#define Module_ApplicationDate "19-Apr-14"
#define Module_ApplicationDate "20-Apr-14"
#define Module_ComponentName "Kernel"
#define Module_ComponentPath "castle/RiscOS/Sources/Kernel"
#define Module_FullVersion "5.35 (4.79.2.221)"
#define Module_HelpVersion "5.35 (19 Apr 2014) 4.79.2.221"
#define Module_FullVersion "5.35 (4.79.2.222)"
#define Module_HelpVersion "5.35 (20 Apr 2014) 4.79.2.222"
#define Module_LibraryVersionInfo "5:35"
......@@ -214,7 +214,7 @@ OldOpt SETA {OPT}
; Dynamic area node format
^ 0
DANode_Link # 4 ; points to next node
DANode_Link # 4 ; points to next node (in address order)
DANode_Number # 4 ; number of this area
DANode_Base # 4 ; base address of area (points in middle of doubly-mapped areas)
DANode_Flags # 4 ; various flags
......
......@@ -98,6 +98,9 @@ AMBControl_Init
TST R0,#CPUFlag_AbortRestartBroken ;but wait! can't use for bugged chips (eg. pre rev T StrongARM)
MOVNE R1,#AMBFlag_LazyMapIn_disable
STR R1,AMBFlags
|
MOV R1,#AMBFlag_LazyMapIn_disable
STR R1,AMBFlags
]
LDR R0,=ZeroPage+AMBControl_ws
STR R12,[R0] ;now initialisation is complete
......
......@@ -1283,109 +1283,57 @@ FreeSysHeapNode Entry
; return CC for OK, CS for naff
ValidateAddress_Code ROUT
Push "R1, lr"
CMP R0, R1
SUBNE R1, R1, #1 ; cope with zero length range sensibly
LDR R10, =ZeroPage
[ ZeroPage = 0
MOV R11, #0
|
MOV R11, #ScratchSpace
]
LDR R12, [R10, #AplWorkSize]
BL RangeCheck
LDR r11, =SVCStackAddress
ADD r12, r11, #SVCStackSize
BL RangeCheck
[ IRQStackAddress <> CursorChunkAddress
LDR r11, =IRQStackAddress
ADD r12, r11, #IRQStackSize
BL RangeCheck
]
LDR r11, =UNDStackAddress
ADD r12, r11, #UNDStackSize
BL RangeCheck
LDR r11, =ABTStackAddress
ADD r12, r11, #ABTStackSize
BL RangeCheck
! 0, "ValidateAddress - what about CAM and page tables? - strictly should be included"
LDR R11, =CursorChunkAddress
ADD R12, R11, #32*1024
BL RangeCheck
VDWS R12 ; in case of external framestore
LDR R11, [R12, #ScreenEndAddr]
LDR R12, [R12, #TotalScreenSize]
SUB R11, R11, R12
ADD R12, R11, R12, LSL #1 ; doubly-mapped friggage
BL RangeCheck
[ ZeroPage <> 0
MOV r11, r10
ADD r12, r11, #16*1024
BL RangeCheck
LDR r11, =DebuggerSpace
ADD r12, r11, #DebuggerSpace_Size
BL RangeCheck
]
; not in one of those ranges, so check against dynamic area list
LDR r10, =ZeroPage+DAList
10
LDR r10, [r10, #DANode_Link]
TEQ r10, #0 ; end of list
BEQ %FT20
LDR r11, [r10, #DANode_Base]
LDR r12, [r10, #DANode_Flags]
TST r12, #DynAreaFlags_DoublyMapped
LDR r12, [r10, #DANode_Size]
SUBNE r11, r11, r12 ; if doubly mapped, move base back by size
MOVNE r12, r12, LSL #1 ; and double size
ADD r12, r12, r11 ; make r12 point at end (exclusive)
CMP r0, r12 ; if start >= end (excl)
BCS %BT10 ; then go onto next node
CMP r0, r11 ; if range starts below this area
BCC %FT20 ; then not totally within this area
CMP r1, r12 ; else if range ends before end+1 of this area
BCC AddressIsValid ; then it's valid
20
; not in one of those ranges, so issue service so modules can add other valid areas
Push "R2, R3"
MOV R2, R0 ; pass parameters to service in R2 and R3
LDR R3, [stack, #2*4] ; reload stacked R1 into R3
MOV R1, #Service_ValidateAddress
Push "r0-r3, lr"
MOV r2, r1
MOV r1, r0
MOV r0, #24
SWI XOS_Memory
; Pre-RISC OS 3.5, OS_ValidateAddress would return OK if the region was:
; (a) valid RAM in logical address space
; (b) the 2nd mapping of screen memory at the start of physical address space
; (c) anything claimed by Service_ValidateAddress
;
; Post-RISC OS 3.5, OS_ValidateAddress would return OK if the region was:
; (a) a dynamic area
; (b) screen memory
; (c) most special areas
; (d) anything claimed by Service_ValidateAddress
;
; RISC OS Select docs suggest that valid regions for their version are:
; (a) dynamic areas, including special areas which have been turned into DAs (e.g. ROM)
; (b) some special areas (e.g. zero page)
; (c) screen memory
; (d) anything claimed by Service_ValidateAddress (example given of sparse DA which uses OS_AbortTrap to map pages on demand)
; (e) NOT physically mapped areas (unless screen memory)
;
; Taking the above into account, our version will behave as follows:
; (a) anything completely accessible in any mode, which isn't physically mapped - dynamic areas, special areas, ROM, zero page, etc.
; (b) anything completely R/W in user mode, which is completely physically mapped (i.e. screen memory; this check should suffice until we decide on a better way of flagging screen memory/"IO RAM" as valid)
; (c) anything claimed by Service_ValidateAddress
TST r1, #CMA_Partially_Phys
MOVEQ r2, #1
ANDEQ r1, r1, #CMA_Completely_UserR+CMA_Completely_UserW+CMA_Completely_PrivR+CMA_Completely_PrivW
LDRNE r2, =CMA_Completely_UserR+CMA_Completely_UserW+CMA_Completely_Phys
ANDNE r1, r1, r2
CMP r1, r2
BHS AddressIsValid ; EQ case: At least one completely flag set
; NE case: Flags match required value
; OS_Memory check failed, try the service call
LDMIA sp, {r2-r3}
MOV r1, #Service_ValidateAddress
BL Issue_Service
TEQ R1, #0 ; EQ => service claimed, so OK
Pull "R2, R3"
Pull "R1, lr"
TEQ r1, #0 ; EQ => service claimed, so OK
Pull "r0-r3,lr"
ORRNE lr, lr, #C_bit ; return CS if invalid
BICEQ lr, lr, #C_bit ; return CC if valid
ExitSWIHandler
RangeCheck ; check R0 - R1 lies totally within R11 - (r12-1)
SUB R12, R12, #1
CMP R0, R11
CMPCS R12, R0
CMPCS R1, R11
CMPCS R12, R1
MOVCC PC, lr ; failed
AddressIsValid
Pull "R1, lr"
Pull "r0-r3,lr"
BIC lr, lr, #C_bit
ExitSWIHandler
LTORG
END
......@@ -123,7 +123,6 @@ AP_RAMDisc_SA * 2 ; user none,
AP_Duff * 2 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable ; user none, ~C~B
AP_FreePool * 2 :OR: DynAreaFlags_NotCacheable ; user none, ~CB
AP_CursorChunk * 1 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable :OR: PageFlags_Unavailable
AP_PageZero * 0
AP_L2PT * 2 :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable ; user none, ~C~B
AP_L1PT * AP_L2PT :OR: PageFlags_Unavailable
......@@ -196,6 +195,9 @@ DynArea_NullNamePtrMeansHexString SETL {TRUE} :LAND: DynArea_QuickHandles
[ DynArea_QuickHandles
DynArea_MaxNameLength * 31 ;maximum length of DA name, excluding terminator (multiple of 4, -1)
DynArea_NumQHandles * 256 ;maximum no. of non-system quick handles available simultaneously
DynArea_AddrLookupBits * 8 ;LUT covers entire 4G logical space, so 4G>>8 = 16M granularity
DynArea_AddrLookupSize * 1<<(32-DynArea_AddrLookupBits) ; Address space covered by each entry
DynArea_AddrLookupMask * &FFFFFFFF-(DynArea_AddrLookupSize-1)
;
^ 0,R11
DynArea_TreacleGuess # 4 ;guess for next non-quick handle to allocate, if needed, is TreacleGuess+1
......@@ -227,6 +229,7 @@ DynArea_FreeQHandles # 4 ;index of first free quick h
DynArea_QHandleArray # 4*DynArea_NumQHandles ;1 word per quick handle
; - if free, word = index of next free quick handle (or 0 if none)
; - if used, word = ptr to DANode (must be > DynArea_NumQHandles)
DynArea_AddrLookup # 4<<DynArea_AddrLookupBits ; Lookup table for fast logaddr -> dynarea lookup
;
DynArea_ws_size * :INDEX:@ ;must be multiple of 4
;
......@@ -1171,6 +1174,7 @@ DAC_roundup
[ DynArea_QuickHandles
LDR r11, =ZeroPage
LDR r11, [r11, #DynArea_ws]
BL AddDAToAddrLookupTable
;so XOS_ChangeDynamicArea can pick up the node we are still creating
STR r1, DynArea_CreatingHandle
STR r2, DynArea_CreatingPtr
......@@ -1323,6 +1327,39 @@ StringNodeClaimFailed
LTORG
; Add a dynamic area to the quick address lookup table
; In:
; R2 = DANode ptr
; R11 = DynArea_ws
AddDAToAddrLookupTable
Entry "r0-r1,r3,r6"
ADRL r0, DynArea_AddrLookup
LDR r1, [r2, #DANode_Flags]
LDR r3, [r2, #DANode_MaxSize]
LDR r6, [r2, #DANode_Base]
TST r1, #DynAreaFlags_DoublyMapped
SUBNE r6, r6, r3 ; Get true start address
MOVNE r3, r3, LSL #1
AND r1, r6, #DynArea_AddrLookupMask ; Round down start address
ADD lr, r6, r3
AND r3, lr, #DynArea_AddrLookupMask
TEQ lr, r3
ADDNE r3, r3, #DynArea_AddrLookupSize ; Round up end address
SUB r3, r3, r1
ADD r0, r0, r1, LSR #30-DynArea_AddrLookupBits
71
LDR lr, [r0], #4
TEQ lr, #0
STREQ r2, [r0, #-4]
BEQ %FT72
LDR lr, [lr, #DANode_Base]
CMP lr, r6
STRHI r2, [r0, #-4] ; Update LUT if current entry starts after us
72
SUBS r3, r3, #DynArea_AddrLookupSize
BNE %BT71
EXIT
;**************************************************************************
;
; DynArea_Remove - Remove a dynamic area
......@@ -1387,7 +1424,7 @@ DAR_delink
;
; delink from sorted list
;
Push "r7,r8,r11"
Push "r0-r4,r7,r8,r11"
LDR r11, =ZeroPage
LDR r11, [r11, #DynArea_ws]
ADR r8, DynArea_SortedList - DANode_SortLink ;so that [r8, #DANode_SortLink] addresses list header)
......@@ -1401,7 +1438,44 @@ DAR_sdloop
LDR r8, [r8, #DANode_SortLink]
STR r8, [r7, #DANode_SortLink]
DAR_sddone
Pull "r7,r8,r11"
; Delink from address lookup table
ADRL r0, DynArea_AddrLookup
LDR r1, [r10, #DANode_Flags]
LDR r3, [r10, #DANode_MaxSize]
LDR r2, [r10, #DANode_Base]
TST r1, #DynAreaFlags_DoublyMapped
SUBNE r2, r2, r3 ; Get true start address
MOVNE r3, r3, LSL #1
AND r1, r2, #DynArea_AddrLookupMask ; Round down start address
ADD lr, r2, r3
AND r3, lr, #DynArea_AddrLookupMask
TEQ lr, r3
ADDNE r3, r3, #DynArea_AddrLookupSize ; Round up end address
SUB r3, r3, r1
ADD r0, r0, r1, LSR #30-DynArea_AddrLookupBits
DAR_adloop
LDR lr, [r0], #4
TEQ lr, r10
BNE DAR_adnext
; Update to point to next DA, or null if next is outside this chunk
LDR lr, [lr, #DANode_Link]
TEQ lr, #0
STREQ lr, [r0, #-4]
BEQ DAR_adnext
LDR r4, [lr, #DANode_Flags]
LDR r2, [lr, #DANode_Base]
TST r4, #DynAreaFlags_DoublyMapped
LDRNE r4, [lr, #DANode_MaxSize]
SUBNE r2, r2, r4
AND r2, r2, #DynArea_AddrLookupMask
TEQ r2, r1
MOVNE lr, #0
STR lr, [r0, #-4]
DAR_adnext
SUBS r3, r3, #DynArea_AddrLookupSize
ADD r1, r1, #DynArea_AddrLookupSize
BNE DAR_adloop
Pull "r0-r4,r7,r8,r11"
] ;DynArea_QuickHandles
......@@ -3010,6 +3084,16 @@ DynArea_QHinit_loop2
LDR r2, =ZeroPage+FreePoolDANode
STR r2, [r1, #ChangeDyn_FreePool:SHL:2]
; Initialise the address lookup table for the current DA's
; Assumes we have at least one DA to start with!
LDR r2, =ZeroPage+DAList
LDR r2, [r2]
DynArea_AddrLookup_loop
BL AddDAToAddrLookupTable
LDR r2, [r2, #DANode_Link]
TEQ r2, #0
BNE DynArea_AddrLookup_loop
|
ASSERT ZeroPage = 0
MOV r0, #0
......
......@@ -579,7 +579,7 @@ BinaryToDecimal_Code ROUT
; now do digits.
01 RSB R0, R0, #0 ; get negative so minint works.
ADR R3, TenTimesTable
ADRL R3, TenTimesTable
MOV R10, #9 ; max entry 10^9
MOV R4, #0 ; non-0 had flag
02 LDR R11, [R3, R10, LSL #2]
......
......@@ -2012,13 +2012,18 @@ AllocateL2PT
MOVNE a1, v4 ; if not, just access v4
MOVEQ a1, #L1_B ; if so, map in v4
MOVEQ a2, v4
MOVEQ a3, #0
SUBEQ sp, sp, #4
MOVEQ a3, sp
BLEQ RISCOS_AccessPhysicalAddress
MOV a2, #0
MOV a3, #4*1024
BL memset
TEQ v3, #0
LDREQ a1, [sp], #4
BLEQ RISCOS_ReleasePhysicalAddress
MOV a1, v4 ; Map in the L2PT page itself
LDR a2, =L2PT ; (can't recurse, because L2PT
ADD a2, a2, v8, LSL #10 ; backing for L2PT is preallocated)
......@@ -2263,6 +2268,9 @@ ClearPhysRAM ROUT
MOVEQS r11, r11, LSR #12 ; anything left to do?
BNE %BT50
MOV a1, #L1_Fault
BL RISCOS_ReleasePhysicalAddress ; reset to default
LDR r0, =ZeroPage+InitClearRamWs
LDMIA r0, {r4-r11,r14} ;restore
......
......@@ -842,6 +842,30 @@ ErrHandler ROUT
BL OscliTidy ; close redirection, restore curr FS
LDR r10, =ZeroPage
Push "r0-r2"
LDR r1, [r10, #ErrHan]
; Check that the error handler points somewhere sensible
; Can be ROM or RAM or pretty much anywhere, but must be user-read
; Also require it to be word aligned, since we don't really support thumb
MOV r0, #24
ADD r2, r1, #4
SWI XOS_Memory
TST r1, #CMA_Completely_UserR
TSTEQ r2, #3
BEQ %FT05
LDR r1, [r10, #ErrBuf]
MOV r0, #24
ADD r2, r1, #256+4
; Must be SVC-writable, user+SVC readable, word aligned
SWI XOS_Memory
AND r1, r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TEQ r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TSTEQ r2, #3
BEQ %FT10
05
BL DEFHAN ; Restore default error (+escape) handler if the ones we've been given are obviously duff
10
Pull "r0-r2"
LDR r11, [r10, #ErrBuf] ; Get pointer to error buffer
[ No26bitCode
......
......@@ -67,6 +67,15 @@ MemReturn
B AccessPhysAddr ; 14
B ReleasePhysAddr ; 15
B MemoryAreaInfo ; 16
B %BT20 ; 17 |
B %BT20 ; 18 |
B %BT20 ; 19 |
B %BT20 ; 20 | Reserved for us
B %BT20 ; 21 |
B %BT20 ; 22 |
B %BT20 ; 23 |
B CheckMemoryAccess ; 24
; 25+ reserved for ROL
40
......@@ -1198,4 +1207,376 @@ MAI_Kbuffs
LDR r3, =(KbuffsSize + &FFF) :AND: :NOT: &FFF
EXIT
;----------------------------------------------------------------------------------------
;
; In: r0 = flags
; bit meaning
; 0-7 24 (reason code)
; 8-31 reserved (set to 0)
; r1 = low address (inclusive)
; r2 = high address (exclusive)
;
; Out: r1 = access flags:
; bit 0: completely readable in user mode
; bit 1: completely writable in user mode
; bit 2: completely readable in privileged modes
; bit 3: completely writable in privileged modes
; bit 4: partially readable in user mode
; bit 5: partially writable in user mode
; bit 6: partially readable in privileged modes
; bit 7: partially writable in privileged modes
; bit 8: completely physically mapped (i.e. IO memory)
; bit 9: completely abortable (i.e. custom data abort handler)
; bits 10,11: reserved
; bit 12: partially physically mapped
; bit 13: partially abortable
; bits 14+: reserved
;
; Return various attributes for the given memory region
CMA_Completely_UserR * 1<<0
CMA_Completely_UserW * 1<<1
CMA_Completely_PrivR * 1<<2
CMA_Completely_PrivW * 1<<3
CMA_Partially_UserR * 1<<4
CMA_Partially_UserW * 1<<5
CMA_Partially_PrivR * 1<<6
CMA_Partially_PrivW * 1<<7
CMA_Completely_Phys * 1<<8
CMA_Completely_Abort * 1<<9
CMA_Partially_Phys * 1<<12
CMA_Partially_Abort * 1<<13
CMA_CheckL2PT * 1<<31 ; Pseudo flag used internally for checking sparse areas
; AP_ equivalents
CMA_ROM * CMA_Partially_UserR+CMA_Partially_PrivR
CMA_Read * CMA_ROM+CMA_Partially_PrivW
CMA_Full * CMA_Read+CMA_Partially_UserW
CMA_None * CMA_Partially_PrivR+CMA_Partially_PrivW
CheckMemoryAccess ROUT
Entry "r0,r2-r10"
CMP r0, #24
BNE %FT99
LDR r10, =ZeroPage
; Set all the 'completely' flags, we'll clear them as we go along
LDR r0, =&0F0F0F0F
; Make end address inclusive so we don't have to worry so much about
; wrap around at 4G
TEQ r1, r2
SUBNE r2, r2, #1
; Split memory up into five main regions:
; * scratchspace/zeropage
; * application space
; * dynamic areas
; * IO memory
; * special areas (stacks, ROM, HAL workspace, system heap, etc.)
; All ranges are checked in increasing address order, so the
; completeness flags are returned correctly if we happen to cross from
; one range into another
; Note that application space can't currently be checked in DA block as
; (a) it's not linked to DAList/DynArea_AddrLookup
; (b) we need to manually add the abortable flag
CMP r1, #32*1024
BHS %FT10
; Check zero page
[ ZeroPage = 0
MOV r3, #0
MOV r4, #16*1024
MOV r5, #CMA_Read
BL CMA_AddRange
|
; DebuggerSpace
ASSERT DebuggerSpace < ScratchSpace
LDR r3, =DebuggerSpace
LDR r4, =(DebuggerSpace_Size + &FFF) :AND: &FFFFF000
MOV r5, #CMA_Read
BL CMA_AddRange
]
; Scratch space
LDR r3, =ScratchSpace
MOV r4, #16*1024
MOV r5, #CMA_Read
BL CMA_AddRange
10
; Application space
; Note - checking AplWorkSize as opposed to AplWorkMaxSize to cope with
; software which creates DAs within application space (e.g. Aemulor)
LDR r4, [r10, #AplWorkSize]
CMP r1, r4
BHS %FT20
LDR r3, [r10, #AMBControl_ws]
LDR r3, [r3, #:INDEX:AMBFlags]
MOV r5, #CMA_Full
TST r3, #AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend
MOV r3, #32*1024
ORREQ r5, r5, #CMA_Partially_Abort
BL CMA_AddRange2
20
; Dynamic areas
LDR r7, [r10, #IOAllocLimit]
CMP r1, r7
BHS %FT30
; Look through the quick lookup table until we find a valid DANode ptr
LDR r6, [r10, #DynArea_ws]
MOV r3, r1
ADD r6, r6, #(:INDEX:DynArea_AddrLookup) :AND: &00FF
ADD r6, r6, #(:INDEX:DynArea_AddrLookup) :AND: &FF00
21
AND r8, r3, #DynArea_AddrLookupMask
LDR r9, [r6, r8, LSR #30-DynArea_AddrLookupBits]
TEQ r9, #0
BNE %FT22
; Nothing here, skip ahead to next block
ADD r3, r8, #DynArea_AddrLookupSize
CMP r3, r2
BHI %FT90 ; Hit end of search area
CMP r3, r7
BLO %BT21
; Hit end of DA area and wandered into IO area
B %FT30
22
; Now that we've found a DA to start from, walk through and process all
; the entries until we hit the system heap (or any other DAs above
; IOAllocLimit)
LDR r3, [r9, #DANode_Base]
LDR r6, [r9, #DANode_Flags]
CMP r3, r7
BHS %FT30
; Decode AP flags
ANDS lr, r6, #3
MOVEQ r5, #CMA_Full
TEQ lr, #1
MOVEQ r5, #CMA_Read
CMP lr, #2
MOVEQ r5, #CMA_None
MOVGT r5, #CMA_ROM
TST r6, #DynAreaFlags_SparseMap
LDREQ lr, [r9, #DANode_Size]
LDRNE lr, [r9, #DANode_SparseHWM] ; Use HWM as bounds when checking sparse areas
ORRNE r5, r5, #CMA_CheckL2PT ; ... and request L2PT check
TST r6, #DynAreaFlags_DoublyMapped
ADD r4, r3, lr
SUBNE r3, r3, lr
BL CMA_AddRange2
LDR r9, [r9, #DANode_Link]
TEQ r9, #0
BNE %BT22
; Hit the end of the list. This shouldn't happen with the current heap setup!
30
; IO memory
CMP r1, #IO
BHS %FT40
MOV r3, r1, LSR #20
LDR r4, [r10, #IOAllocPtr]
MOV r3, r3, LSL #20 ; Get MB-aligned addr of first entry to check
CMP r3, r4
LDR r7, =L1PT
MOVLO r3, r4 ; Skip all the unallocated regions
31
LDR r4, [r7, r3, LSR #20-2]
AND r4, r4, #L1_AP
; Decode page AP flags
MOV r5, #CMA_ROM
CMP r4, #AP_None*L1_APMult
MOVEQ r5, #CMA_None
CMP r4, #AP_Read*L1_APMult
MOVEQ r5, #CMA_Read
CMP r4, #AP_Full*L1_APMult
MOVEQ r5, #CMA_Full
ADD r4, r3, #1<<20
ORR r5, r5, #CMA_Partially_Phys
BL CMA_AddRange2
CMP r4, #IO
MOV r3, r4
BNE %BT31
40
; Everything else!
LDR r3, =HALWorkspace
LDR r4, [r10, #HAL_WsSize]
MOV r5, #CMA_Read
BL CMA_AddRange
ASSERT IRQStackAddress > HALWorkspace
LDR r3, =IRQStackAddress
LDR r4, =IRQStackSize
MOV r5, #CMA_None
BL CMA_AddRange
ASSERT SVCStackAddress > IRQStackAddress
LDR r3, =SVCStackAddress
LDR r4, =SVCStackSize
MOV r5, #CMA_Read
BL CMA_AddRange
ASSERT ABTStackAddress > SVCStackAddress
LDR r3, =ABTStackAddress
LDR r4, =ABTStackSize
MOV r5, #CMA_None
BL CMA_AddRange
ASSERT UNDStackAddress > ABTStackAddress
LDR r3, =UNDStackAddress
LDR r4, =UNDStackSize
MOV r5, #CMA_None
BL CMA_AddRange
ASSERT PhysicalAccess > UNDStackAddress
LDR r3, =L1PT + (PhysicalAccess:SHR:18)
LDR r3, [r3]
TEQ r3, #0
BEQ %FT50
LDR r3, =PhysicalAccess
LDR r4, =&100000
LDR r5, =CMA_None+CMA_Partially_Phys ; Assume IO memory mapped there
BL CMA_AddRange
50
ASSERT DCacheCleanAddress > PhysicalAccess
LDR r4, =DCacheCleanAddress+DCacheCleanSize
CMP r1, r4
BHS %FT60
; Check that DCacheCleanAddress is actually used
Push "r0-r2,r9"
AddressHAL r10
MOV a1, #-1
CallHAL HAL_CleanerSpace
CMP a1, #-1
Pull "r0-r2,r9"
BEQ %FT60
SUB r3, r4, #DCacheCleanSize
MOV r4, #DCacheCleanSize
LDR r5, =CMA_None+CMA_Partially_Phys ; Mark as IO, it may not be actual memory there
BL CMA_AddRange
60
ASSERT KbuffsBaseAddress > DCacheCleanAddress
LDR r3, =KbuffsBaseAddress
LDR r4, =(KbuffsSize + &FFF) :AND: &FFFFF000
MOV r5, #CMA_Read
BL CMA_AddRange
ASSERT HALWorkspaceNCNB > KbuffsBaseAddress
LDR r3, [r10, #HAL_Descriptor]
LDR r3, [r3, #HALDesc_Flags]
TST r3, #HALFlag_NCNBWorkspace
BEQ %FT70
LDR r3, =HALWorkspaceNCNB
LDR r4, =32*1024
MOV r5, #CMA_None
BL CMA_AddRange
70
ASSERT CursorChunkAddress > HALWorkspaceNCNB
LDR r3, =CursorChunkAddress
MOV r4, #32*1024
MOV r5, #CMA_Read
BL CMA_AddRange
ASSERT L2PT > CursorChunkAddress
LDR r3, =L2PT
MOV r5, #CMA_None
MOV r4, #4*1024*1024
ORR r5, r5, #CMA_CheckL2PT ; L2PT contains gaps due to logical indexing
BL CMA_AddRange
ASSERT L1PT > L2PT
LDR r3, =L1PT
MOV r4, #16*1024
MOV r5, #CMA_None
BL CMA_AddRange
; Note that system heap needs to be checked manually due to being
; outside main DA address range
ASSERT SysHeapChunkAddress > L1PT
LDR r3, =SysHeapChunkAddress
LDR r4, [r10, #SysHeapDANode + DANode_Size]
MOV r5, #CMA_Full
BL CMA_AddRange
ASSERT CAM > SysHeapChunkAddress
LDR r3, =CAM
LDR r4, [r10, #SoftCamMapSize]
MOV r5, #CMA_None
BL CMA_AddRange
ASSERT ROM > CAM
LDR r3, =ROM
LDR r4, =OSROM_ImageSize*1024
MOV r5, #CMA_ROM
BL CMA_AddRange
; Finally, high processor vectors/relocated zero page
[ ZeroPage > 0
ASSERT ZeroPage > ROM
MOV r3, r10
LDR r4, =16*1024
MOV r5, =CMA_Read
BL CMA_AddRange
]
90
; If there's anything else, we've wandered off into unallocated memory
LDR r3, =&0F0F0F0F
BIC r1, r0, r3
CLRV
EXIT
99
PullEnv
ADRL r0, ErrorBlock_BadParameters
SETV
MOV pc, lr
; Add range r3..r4 to attributes in r0
; Corrupts r8
CMA_AddRange ROUT ; r3 = start, r4 = length
ADD r4, r3, r4
CMA_AddRange2 ; r3 = start, r4 = end (excl.)
LDR r8, =&0F0F0F0F
; Increment r1 and exit if we hit r2
; Ignore any ranges which are entirely before us
CMP r1, r4
MOVHS pc, lr
; Check for any gap at the start, i.e. r3 > r1
CMP r3, r1
BICHI r0, r0, r8
MOVHI r1, r3 ; Update r1 for L2PT check code
; Exit if the range starts after our end point
CMP r3, r2
BHI %FT10
; Process the range
TST r5, #CMA_CheckL2PT
BNE %FT20
CMP r3, r4 ; Don't apply any flags for zero-length ranges
04 ; Note L2PT check code relies on NE condition here
ORR r8, r5, r8
ORRNE r0, r0, r5 ; Set new partial flags
ANDNE r0, r0, r8, ROR #4 ; Discard completion flags which aren't for this range
05
CMP r4, r2
MOV r1, r4 ; Continue search from the end of this range
MOVLS pc, lr
10
; We've ended inside this range
MOV r1, r0
CLRV
EXIT
20
; Check L2PT for sparse region r1..min(r2+1,r4)
; r4 guaranteed page aligned
CMP r3, r4
BIC r5, r5, #CMA_CheckL2PT
BEQ %BT05
Push "r2,r4,r5,r8,r9,r10,lr"
LDR lr, =&FFF
CMP r4, r2
ADDHS r2, r2, #4096
BICHS r2, r2, lr
MOVLO r2, r4
; r2 is now page aligned min(r2+1,r4)
BIC r4, r1, lr
LDR r8, =L2PT
MOV r10, #0
30
BL logical_to_physical
ORRCC r10, r10, #1
ADD r4, r4, #4096
ORRCS r10, r10, #2
CMP r4, r2
BNE %BT30
CMP r10, #2
; 01 -> entirely mapped
; 10 -> entirely unmapped
; 11 -> partially mapped
Pull "r2,r4,r5,r8,r9,r10,lr"
BICHS r0, r0, r8 ; Not fully mapped, clear completion flags
BNE %BT04 ; Partially/entirely mapped
B %BT05 ; Completely unmapped
END
......@@ -441,7 +441,7 @@ DEFHN2 MOV R12, R14
ADRL R0, CLIEXIT
MOV R1, #0
MOV R2, #0
ADR R4, UNDEF
ADRL R4, UNDEF
ADRL R5, ABORTP
ADRL R6, ABORTD
ADRL R7, ADDREX
......@@ -667,16 +667,12 @@ PrintErrorString
; Exception handling
DumpyTheRegisters ROUT
[ No26bitCode
; In ABT32 or UND32, PC, PSR already stored, PSR in R1
; R0 points at r8 in register dump. r0-r7, PC, PSR saved
; R1 is PSR
; R14 points at error block
; In ABT32 or UND32
MOV R4, R14 ; put error address into unbanked register
TST R1, #&0F
|
; In SVC26, fake 26-bit PC at 0
LDR R1, [R0, -R0] ; PC when exception happened
STR R1, [R0, #(15-8)*4] ; In the right slot now ...
TST R1, #SVC_mode
]
[ SASTMhatbroken
STMEQIA R0!,{R8-R12}
STMEQIA R0, {R13,R14}^ ; user mode case done.
......@@ -686,7 +682,6 @@ DumpyTheRegisters ROUT
]
BEQ UNDEF2
[ No26bitCode
ORR R2, R1, #I32_bit :OR: F32_bit
BIC R2, R2, #T32_bit
MSR CPSR_c, R2 ; change into original mode
......@@ -697,7 +692,6 @@ DumpyTheRegisters ROUT
EORS R2, R2, #FIQ_mode ; Was we in FIQ ? Zero if so
STR R14, [R0, #6*4]
[ HAL
BNE UNDEF2
MSR CPSR_c, #I32_bit+F32_bit+SVC32_mode ; into SVC mode so we have a stack
[ ZeroPage <> 0
......@@ -705,61 +699,50 @@ DumpyTheRegisters ROUT
]
AddressHAL R2
CallHAL HAL_FIQDisableAll
|
MOVEQ R3, #IOC
STREQB R2, [R3, #IOCFIQMSK] ; Blow away all FIQ sources
]
UNDEF2
MSR CPSR_c, #I32_bit+F32_bit+SVC32_mode ; into SVC mode
MOV R14, R4 ; corrupt R14_SVC (but already saved if we were in SVC)
; ... and fall into
UNDEF1
|
TST R1, #1 ; SWI mode?
TSTNE R1, #2
BNE %FT02
ORR R1, R1, #I_bit :OR: F_bit
; keep interrupts off until handlers restored
TEQP R1, #0 ; get at registers
NOP
STMIA R0, {R8-R14}
TEQP PC, #SVC_mode :OR: I_bit :OR: F_bit
AND R1, R1, #SVC_mode
EORS R2, R1, #FIQ_mode ; Was we in FIQ ? Zero if so
[ HAL
BNE UNDEF1
MOV R4, R14 ; R14_SVC will be corrupted by the HAL call
AddressHAL R2
CallHAL HAL_FIQDisableAll
MOV R14, R4
|
MOVEQ R3, #IOC
STREQB R2, [R3, #IOCFIQMSK] ; Blow away all FIQ sources
]
B UNDEF1
02 STMIA R0, {R8-R14}
; ... and fall into
UNDEF2
UNDEF1
]
LDR sp, =SVCSTK ; Flatten superstack
LDR R0, =ZeroPage
LDR R0, [R0, #ExceptionDump]
ADD R1, R0, #10*4 ; point at dumped R10
LDMIA R1, {R10-R12} ; try and put back user registers
Push "R10-R12" ; for error handler to find on stack
; Check that ExceptionDump is safe to use
Push "R14" ; Preserve error ptr
LDR R4, =ZeroPage
MOV R3, R0
LDR R1, [R4, #ExceptionDump]
MOV R0, #24
ADD R2, R1, #17*4
; Must be SVC-writable, user+SVC readable, word aligned
SWI XOS_Memory
AND r1, r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TEQ r1, #CMA_Completely_UserR+CMA_Completely_PrivR+CMA_Completely_PrivW
TSTEQ r2, #3
BEQ %FT05
; Reset to default location. Unfortunately the Debugger module is a bit
; braindead and will only show the contents of the register dump located
; within its workspace, so resetting it to the kernel buffer isn't the
; best for debugging. But it's better than an infinite abort loop!
MOV R0, #ExceptionDumpArea
LDR R1, =ZeroPage+DUMPER
SWI XOS_ChangeEnvironment
05
Pull "R14"
; Copy the dump from the stack to ExceptionDump
LDR R0, [R4, #ExceptionDump]
LDMDB R3, {R1-R2,R4-R9} ; R0-R7
STMIA R0!, {R1-R2,R4-R9}
LDMIA R3, {R1-R2,R4-R10} ; R8-R15, PSR
STMIA R0, {R1-R2,R4-R10}
SUB R0, R0, #8*4
; try and put back user R10-R12
Push "R4-R6" ; for error handler to find on stack
LDR R1, [R0, #15*4]
Push R1
[ :LNOT:No26bitCode
LDR R0, BranchThroughZeroInstruction ; load the B RESET1 instr.
STR R0, [R1, -R1] ; and store it at zero again
BIC R14, R14, #ARM_CC_Mask
]
LDR R0, =GeneralMOSBuffer+128 ; so can do deviant sharing !
[ International
......@@ -786,9 +769,6 @@ UNDEF1
LDR R0, =ZeroPage
LDR R0, [R0, #ExceptionDump]
LDR R0, [R0, #15*4] ; saved PC
[ :LNOT: No26bitCode
BIC R0, R0, #ARM_CC_Mask
]
SWI XOS_ConvertHex8
[ International
......@@ -803,8 +783,8 @@ UNDEF1
LDR R0, =GeneralMOSBuffer+128
]
[ No26bitCode
; Flatten UND and ABT stacks, jic
; Also a convenient way of getting rid of the temp exception dump
MRS R2, CPSR
BIC R2, R2, #F32_bit + &1F
ORR R3, R2, #ABT32_mode
......@@ -815,9 +795,6 @@ UNDEF1
LDR r13_undef, =UNDSTK
ORR R3, R2, #IRQ32_mode
MSR CPSR_c, R3
|
TEQP PC, #IRQ_mode+I_bit
]
LDR R1, =ZeroPage
[ ZeroPage = 0
STR R1, [R1, #IRQsema]
......@@ -831,68 +808,41 @@ UNDEF1
LTORG
UNDEF ROUT
[ No26bitCode
; In UND32 mode, with a stack
Push R14
; Place exception dump on stack until we can be sure ExceptionDump is safe
STR R14, [SP, #-8]!
SETPSR F_bit :OR: I_bit, R14
|
TEQP pc, #F_bit :OR: I_bit :OR: SVC_mode ; FIQ off too
STR R14, [R0, -R0]
]
LDR R14, =ZeroPage
LDR R14, [R14, #ExceptionDump]
SUB SP, SP, #17*4-8
MOV R14, SP
STMIA R14!, {R0-R7}
[ No26bitCode
MRS R1, SPSR
STR R1, [R14, #(16-8)*4] ; save PSR
Pull R0
STR R0, [R14, #(15-8)*4] ; save PC
]
MOV R0, R14
BL DumpyTheRegisters
MakeErrorBlock UndefinedInstruction
ABORTP ROUT
[ No26bitCode
; In ABT32 mode, with a stack
Push R14
STR R14, [SP, #-8]!
SETPSR F_bit :OR: I_bit, R14
|
TEQP pc, #F_bit :OR: I_bit :OR: SVC_mode ; FIQ off too
STR R14, [R0, -R0]
]
LDR R14, =ZeroPage
LDR R14, [R14, #ExceptionDump]
SUB SP, SP, #17*4-8
MOV R14, SP
STMIA R14!, {R0-R7}
[ No26bitCode
MRS R1, SPSR
STR R1, [R14, #(16-8)*4] ; save PSR
Pull R0
STR R0, [R14, #(15-8)*4] ; save PC
]
MOV R0, R14
BL DumpyTheRegisters
MakeErrorBlock InstructionAbort
ABORTD ROUT
[ No26bitCode
; In ABT32 mode, with a stack
Push R14
STR R14, [SP, #-8]!
SETPSR F_bit :OR: I_bit, R14
|
TEQP pc, #F_bit :OR: I_bit :OR: SVC_mode ; FIQ off too
STR R14, [R0, -R0]
]
LDR R14, =ZeroPage
LDR R14, [R14, #ExceptionDump]
SUB SP, SP, #17*4-8
MOV R14, SP
STMIA R14!, {R0-R7}
[ No26bitCode
MRS R1, SPSR
STR R1, [R14, #(16-8)*4] ; save PSR
Pull R0
STR R0, [R14, #(15-8)*4] ; save PC
]
MOV R0, R14
BL DumpyTheRegisters
MakeErrorBlock DataAbort
......@@ -900,26 +850,16 @@ ABORTD ROUT
ADDREX ROUT
; This really can't happen. Honest
[ No26bitCode
; ??? in ABT32 mode, with a stack?
Push R14
STR R14, [SP, #-8]!
SETPSR F_bit :OR: I_bit, R14
|
TEQP pc, #F_bit :OR: I_bit :OR: SVC_mode ; FIQ off too
STR R14, [R0, -R0]
]
LDR R14, =ZeroPage
LDR R14, [R14, #ExceptionDump]
SUB SP, SP, #17*4-8
MOV R14, SP
STMIA R14!, {R0-R7}
[ No26bitCode
MRS R1, SPSR
STR R1, [R14, #(16-8)*4] ; save PSR
Pull R0
STR R0, [R14, #(15-8)*4]
]
MOV R0, R14
BL DumpyTheRegisters
MakeErrorBlock AddressException
......@@ -968,9 +908,12 @@ Branch0_FromTrampoline
LDR R1, [R1, #ExceptionDump]
STR R0, [R1, #15*4]
]
MOV R0, #0
SWI XOS_EnterOS
LDR R0, =ZeroPage
LDR R0, [R0, #ExceptionDump]
ADD R0, R0, #8*4
BL UNDEF1
MakeErrorBlock BranchThrough0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment