Commit 3d1317e7 authored by Jeffrey Lee's avatar Jeffrey Lee
Browse files

Add VMSAv6 MMU support, fixes to allow booting on beagleboard

Detail:
  s/ARM600 - fix to SyncCodeAreasRange to correctly read cache line length for WB_CR7_Lx caches
  s/ARMops - Cortex cache handling fixes. Enable L2 cache for Cortex.
  s/ChangeDyn - VMSAv6 support in AllocateBackingLevel2
  s/HAL - Improve RISCOS_InitARM to set/clear correct CP15 flags for ARMv6/v7. VMSAv6 support in code to generate initial page tables.
  s/NewReset - Extra DebugTX calls during OS startup. Disable pre-HAL Processor_Type for HAL builds.
  s/VMSAv6 - Main VMSAv6 MMU code - stripped down version of s/ARM600 with support for basic VMSAv6 features.
  hdr/Options - Use VMSAv6 MMU code, not ARM600. Disable ARM6support since current VMSAv6 code will conflict with it.
Admin:
  Tested basic OS functionality under qemu-omap3 and revision B6 beagleboard.


Version 5.35, 4.79.2.98.2.3. Tagged as 'Kernel-5_35-4_79_2_98_2_3'
parent ad9cdf41
......@@ -13,11 +13,11 @@
GBLS Module_ComponentPath
Module_MajorVersion SETS "5.35"
Module_Version SETA 535
Module_MinorVersion SETS "4.79.2.98.2.2"
Module_Date SETS "21 Feb 2009"
Module_ApplicationDate SETS "21-Feb-09"
Module_MinorVersion SETS "4.79.2.98.2.3"
Module_Date SETS "06 Mar 2009"
Module_ApplicationDate SETS "06-Mar-09"
Module_ComponentName SETS "Kernel"
Module_ComponentPath SETS "castle/RiscOS/Sources/Kernel"
Module_FullVersion SETS "5.35 (4.79.2.98.2.2)"
Module_HelpVersion SETS "5.35 (21 Feb 2009) 4.79.2.98.2.2"
Module_FullVersion SETS "5.35 (4.79.2.98.2.3)"
Module_HelpVersion SETS "5.35 (06 Mar 2009) 4.79.2.98.2.3"
END
......@@ -5,19 +5,19 @@
*
*/
#define Module_MajorVersion_CMHG 5.35
#define Module_MinorVersion_CMHG 4.79.2.98.2.2
#define Module_Date_CMHG 21 Feb 2009
#define Module_MinorVersion_CMHG 4.79.2.98.2.3
#define Module_Date_CMHG 06 Mar 2009
#define Module_MajorVersion "5.35"
#define Module_Version 535
#define Module_MinorVersion "4.79.2.98.2.2"
#define Module_Date "21 Feb 2009"
#define Module_MinorVersion "4.79.2.98.2.3"
#define Module_Date "06 Mar 2009"
#define Module_ApplicationDate "21-Feb-09"
#define Module_ApplicationDate "06-Mar-09"
#define Module_ComponentName "Kernel"
#define Module_ComponentPath "castle/RiscOS/Sources/Kernel"
#define Module_FullVersion "5.35 (4.79.2.98.2.2)"
#define Module_HelpVersion "5.35 (21 Feb 2009) 4.79.2.98.2.2"
#define Module_FullVersion "5.35 (4.79.2.98.2.3)"
#define Module_HelpVersion "5.35 (06 Mar 2009) 4.79.2.98.2.3"
#define Module_LibraryVersionInfo "5:35"
......@@ -219,7 +219,7 @@ SAcleanflushbroken SETL {TRUE} :LAND: StrongARM
SASTMhatbroken SETL {TRUE} :LAND: StrongARM
StrongARM_POST SETL {TRUE} :LAND: StrongARM
ARM6support SETL {TRUE}
ARM6support SETL {FALSE} ; Needs updating for VMSAv6 compatability
XScaleMiniCache SETL {FALSE}
......@@ -406,7 +406,7 @@ GetFlashROM SETS ""
GBLS GetPalette
GBLS GetMemInfo
GBLS GetHAL
GetKernelMEMC SETS "GET s.ARM600"
GetKernelMEMC SETS "GET s.VMSAv6"
GetMemInfo SETS "GET s.MemInfo"
GetPalette SETS "GET s.vdu.vdupalxx"
......
......@@ -3550,6 +3550,12 @@ SyncCodeAreasRange
MOV r0, r1
ADD r1, r2, #4 ;exclusive end address
MOV r2, #0
LDRB lr, [r2, #Cache_Type]
CMP lr, #CT_ctype_WB_CR7_Lx ; DCache_LineLen lin or log?
LDRB lr, [r2, #DCache_LineLen]
MOVEQ r2, #4
MOVEQ lr, r2, LSL lr
MOVEQ r2, #0
LDRB lr, [r2, #DCache_LineLen]
SUB lr, lr, #1
ADD r1, r1, lr ;rounding up end address
......
......@@ -523,7 +523,7 @@ Analyse_WB_CR7_Lx
; Read the cache info into Cache_Lx_*
MRC p15, 1, a1, c0, c0, 1 ; Cache level ID register
MOV a2, v6 ; Work around DTable/ITable alignment issues
MOV v2, v6 ; Work around DTable/ITable alignment issues
STR a1, [v2, #Cache_Lx_Info]!
ADD a1, v2, #Cache_Lx_DTable-Cache_Lx_Info
ADD a2, v2, #Cache_Lx_ITable-Cache_Lx_Info
......@@ -533,7 +533,7 @@ Analyse_WB_CR7_Lx
10
MCR p15, 2, a3, c0, c0, 0 ; Program cache size selection register
MRC p15, 1, v1, c0, c0, 0 ; Get size info (data/unified)
STR v1, [a1,#4]
STR v1, [a1],#4
CMP v1, #0 ; Does the cache exist?
AND v1, v1, #7 ; Get line size
CMPNE v1, v2
......@@ -541,7 +541,7 @@ Analyse_WB_CR7_Lx
ADD a3, a3, #1
MCR p15, 2, a3, c0, c0, 0 ; Program cache size selection register
MRC p15, 1, v1, c0, c0, 0 ; Get size info (instruction)
STR v1, [a2,#4]
STR v1, [a2],#4
CMP v1, #0 ; Does the cache exist?
AND v1, v1, #7 ; Get line size
CMPNE v1, a4
......@@ -604,6 +604,13 @@ Analyse_WB_CR7_Lx
ADRL a1, XCBTableWBR ; assume read-allocate WB/WT cache
STR a1, [v6, #MMU_PCBTrans]
; Enable L2 cache. This could probably be moved earlier on in the boot sequence (e.g. when the MMU is turned on), but for now it will go here to reduce the chances of stuff breaking
BL Cache_CleanInvalidateAll_WB_CR7_Lx ; Ensure L2 cache is clean
MRC p15, 0, a1, c1, c0, 1
ORR a1, a1, #2 ; L2EN
MCR p15, 0, a1, c1, c0, 1
B %FT90
90
......@@ -711,7 +718,7 @@ KnownCPUTable
; Simplified CPUDesc table for Fancy ARMs
; The cache size data is ignored
KnownCPUTable_Fancy
CPUDesc Cortex_A8, &00C080, &00FFF0, ARMv5TE, WB_CR7_Lx, 1, 16K, 32, 16, 16K, 32, 16
CPUDesc Cortex_A8, &00C080, &00FFF0, ARMvF, WB_CR7_Lx, 1, 16K, 32, 16, 16K, 32, 16
DCD -1
; Peculiar characteristics of individual ARMs not deducable otherwise. First field is
......@@ -733,7 +740,7 @@ KnownCPUFlags
DCD 0, 0 ; ARM 922T
DCD CPUFlag_ExtendedPages+CPUFlag_XScale, 0 ; X80200
DCD CPUFlag_XScale, 0 ; X80321
DCD 0, 0 ; Cortex_A8
DCD CPUFlag_ExtendedPages, 0 ; Cortex_A8
; --------------------------------------------------------------------------
; ----- ARM_Analyse_Fancy --------------------------------------------------
......
......@@ -2444,10 +2444,14 @@ AllocateBackingLevel2 Entry "r0-r8,r11"
LDR r6, [r1, #-4] ; r6 = physical address for L2 page + other L2 bits
MOV r6, r6, LSR #12 ; r6 = phys.addr >> 12
[ ECC
LDR lr, =L1_Page + L1_U + L1_P ; form other bits to put in L1
[ MEMM_Type = "VMSAv6"
LDR lr, =L1_Page
|
[ ECC
LDR lr, =L1_Page + L1_U + L1_P ; form other bits to put in L1
|
LDR lr, =L1_Page + L1_U ; form other bits to put in L1
]
]
ORR lr, lr, r6, LSL #12 ; complete L1 entry
STR lr, [r8, #0] ; store entry for 1st MB
......
......@@ -28,6 +28,20 @@ DRAMOffset_LastFixed # 0
; IMPORT ARM_Analyse
[ MEMM_Type = "VMSAv6"
mmuc_table ; Table of MMUC init values. First word is value to ORR, second is value to BIC
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv3
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv4
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv4T
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv5
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv5T
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv5TE
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S ; ARMv5TEJ
DCD MMUC_F+MMUC_L+MMUC_D+MMUC_P, MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M+MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S+MMUC_VE+MMUC_EE+MMUC_L2 ; ARMv6
; Skip undefined architecture numbers
mmuc_table_armvf
DCD 0, MMUC_C+MMUC_A+MMUC_M+MMUC_V+MMUC_I+MMUC_Z+MMUC_RR+MMUC_TRE+MMUC_AFE+MMUC_TE ; ARMvF (Cortex)
]
; void RISCOS_InitARM(unsigned int flags)
;
......@@ -37,6 +51,17 @@ RISCOS_InitARM
BL Init_ARMarch
MOVEQ a3, #0
ARM_read_control a3, NE
[ MEMM_Type = "VMSAv6"
; Use a lookup table to get the correct control register set/clear mask
CMP a1, #ARMvF
ADREQ a2, mmuc_table_armvf
ADRLT a2, mmuc_table
ADDLT a2, a2, a1, LSL #3
LDMIA a2, {a2, a4}
ORR a3, a3, a2
BIC a3, a3, a4
CMP a1,#0
|
; Late abort (ARM6 only), 32-bit Data and Program space. No Write buffer (ARM920T
; spec says W bit should be set, but I reckon they're bluffing).
;
......@@ -47,6 +72,7 @@ RISCOS_InitARM
; All of these bits should be off already, but just in case...
BIC a3, a3, #MMUC_B+MMUC_W+MMUC_C+MMUC_A+MMUC_M
BIC a3, a3, #MMUC_RR+MMUC_V+MMUC_I+MMUC_Z+MMUC_R+MMUC_S
]
; Off we go.
ARM_write_control a3
......@@ -515,6 +541,7 @@ RISCOS_Start
MOV a4, #1024*1024
BL Init_MapIn
; Examine HAL and RISC OS locations
LDMFD sp, {v4,v5,v6} ; v4 = flags, v5 = RO desc, v6 = HAL desc
LDR lr, [v6, #HALDesc_Size]
......@@ -538,7 +565,6 @@ RISCOS_Start
ADD a3, a3, ip
ADD a4, a4, ip
STMIB sp, {a3, a4}
MOV a3, #(AP_ROM * L2X_APMult) + L2_C + L2_B
SUB a4, v7, v6
BL Init_MapIn
......@@ -599,7 +625,6 @@ RISCOS_Start
BLO %BT76
MMU_activation_zone
; The time has come to activate the MMU. Steady now... Due to unpredictability of MMU
; activation, need to ensure that mapped and unmapped addresses are equivalent. To
; do this, we temporarily make the section containing virtual address MMUon_instr map
......@@ -616,11 +641,15 @@ MMU_activation_zone
MOV a1, a1, LSR #20 ; a1 = megabyte number (stays there till end)
ADD lr, v3, a1, LSL #2 ; lr -> L1PT entry
LDMIA lr, {a2, a3} ; remember old mappings
[ MEMM_Type = "VMSAv6"
LDR ip, =(AP_ROM * L1_APMult) + L1_Section
|
[ ARM6support
LDR ip, =(AP_None * L1_APMult) + L1_U + L1_Section
|
LDR ip, =(AP_ROM * L1_APMult) + L1_U + L1_Section
]
]
ORR a4, ip, a1, LSL #20 ; not cacheable, as we don't want
ADD v4, a4, #1024*1024 ; to fill the cache with rubbish
STMIA lr, {a4, v4}
......@@ -646,6 +675,15 @@ MMU_activation_zone
MCREQ p15, 0, lr, c5, c0 ; MMU may already be on (but flat mapped)
MCRNE p15, 0, lr, c8, c7 ; if HAL needed it (eg XScale with ECC)
; so flush TLBs now
[ MEMM_Type = "VMSAv6"
CMP ip, #ARMv6
MCRGE p15, 0, lr, c2, c0, 2 ; Ensure only TTBR0 is used (v6)
MCRGT p15, 0, lr, c12, c0, 0 ; Ensure exception vector base is 0 (Cortex)
ORRGE v5, v5, #MMUC_XP ; Extended pages enabled (v6)
BICGT v5, v5, #MMUC_TRE+MMUC_AFE ; TEX remap, Access Flag disabled (Cortex)
BICGE v5, v5, #MMUC_EE+MMUC_TE+MMUC_VE ; Exceptions = nonvectored LE ARM
CMP ip, #0
]
MMUon_instr
ARM_write_control v5
MOVEQ sp, v5
......@@ -654,11 +692,12 @@ MMUon_instr
CMP ip, #ARMvF
MOV lr, #0 ; junk MMU-off contents of I-cache
MCRNE ARM_config_cp,0,lr,ARMv4_cache_reg,C7 ; (works on ARMv3)
BLEQ HAL_InvalidateCache_ARMvF
MCREQ p15, 0, lr, c7, c5, 0 ; invalidate instruction cache
MCREQ p15, 0, lr, c8, c7, 0 ; invalidate TLBs
BLEQ HAL_InvalidateCache_ARMvF ; invalidate data cache (and instruction+TLBs again!)
; HACK HACK HACK - all domains remain in manager mode
; MOV ip, #4_0000000000000001 ; domain 0 client only
; ARM_MMU_domain ip
MOV ip, #4_0000000000000001 ; domain 0 client only
ARM_MMU_domain ip
; MMU now on. Need to jump to logical copy of ourselves. Complication arises if our
; physical address overlaps our logical address - in that case we need to map
......@@ -683,11 +722,15 @@ MMUon_instr
; Oh dear. We know the ROM lives high up, so we'll mangle 00100000-002FFFFF.
; But as we're overlapping the ROM, we know we're not overlapping the page tables.
LDR lr, =L1PT ; accessing the L1PT virtually now
[ MEMM_Type = "VMSAv6"
LDR ip, =(AP_ROM * L1_APMult) + L1_Section
|
[ ARM6support
LDR ip, =(AP_None * L1_APMult) + L1_U + L1_Section
|
LDR ip, =(AP_ROM * L1_APMult) + L1_U + L1_Section
]
]
ORR v6, a4, ip
ADD ip, v6, #1024*1024
LDMIB lr, {v7, v8} ; sections 1 and 2
......@@ -983,6 +1026,9 @@ HAL_InvalidateCache_ARMvF
; The only register we can safely change is ip, but we can switch into FIQ mode with interrupts disabled and use the banked registers there
MRS ip, CPSR
MSR CPSR_c, #F32_bit+I32_bit+FIQ32_mode
MOV r8, #0
MCR p15, 0, r8, c7, c5, 0 ; invalidate instruction cache
MCR p15, 0, r8, c8, c7, 0 ; invalidate TLBs
MRC p15, 1, r8, c0, c0, 1 ; Cache level ID register
BIC r8, r8, #&FF000000 ; Discard unification/coherency bits
MOV r9, #0 ; Current cache level
......@@ -1103,6 +1149,7 @@ ConstructCAMfromPageTables
ANDS v6, v5, #2_11 ; move to next page if fault
BEQ %FT80
[ MEMM_Type <> "VMSAv6"
TEQ v6, #L2_SmallPage ; convert small pages to extended pages
BNE %FT50
MOV lr, #0 ; if we now know that CPU supports them
......@@ -1125,6 +1172,7 @@ ConstructCAMfromPageTables
MOV a2, #ZeroPage
ARMop MMU_ChangingEntry,,,a2
STR v5, [v4, v2, LSR #10] ; update page table
]
50 MOV a1, v5, LSR #12
MOV a1, a1, LSL #12 ; a1 = address (flags stripped out),,,
......@@ -1139,6 +1187,37 @@ ConstructCAMfromPageTables
ADD a2, v1, a1, LSL #3 ; a2 -> CAM entry
[ MEMM_Type = "VMSAv6"
AND a1, v5, #L2_AP ; a1 = access permission
MOV a1, a1, LSR #L2_APShift
; Map AP_ROM to 0
CMP a1, #AP_ROM
MOVEQ a1, #0
; Now ARM access goes 0 => all R/O, 1 => user none, 2 => user R/O, 3 => user R/W
; PPL access goes 0 => user R/W, 1 => user R/O, 2 => user none, (and let's say 3 all R/O)
RSB v6, a1, #3 ; v6 = PPL access
AND a1, v5, #2_11 ; a1 = page type
CMP a1, #L2_ExtPage
ANDHS a1, v5, #L2_TEX+L2_C+L2_B ; Extended TEX and CB bits
ANDLO a1, v5, #L2_C+L2_B ; Large CB bits only
ANDLO lr, v5, #L2L_TEX ; Large TEX bits
ORRLO a1, a1, lr, LSR #L2L_TEXShift-L2_TEXShift ; Move Large TEX back to Extended TEX position
MOV lr, #3 ; lr = PCB value (funny loop to do NCNB first)
60 LDRB a3, [ip, lr] ; look in XCBTrans table
TEQ a3, a1 ; found a match for our XCB?
BEQ %FT70
TST lr, #2_11
SUBNE lr, lr, #1 ; loop goes 3,2,1,0,7,6,5,4,...,31,30,29,28
ADDEQ lr, lr, #7
TEQ lr, #35
BNE %BT60
70 AND a1, lr, #2_00011
ORR v6, v6, a1, LSL #4 ; extract NCNB bits
AND a1, lr, #2_11100
ORR v6, v6, a1, LSL #10 ; extract P bits
ORR v6, v6, #PageFlags_Unavailable ; ???? pages from scratch to cam only?
STMIA a2, {v2, v6} ; store logical address, PPL
|
AND a1, v5, #&30 ; a1 = access permission
MOV a1, a1, LSR #4
; ARM access goes 0 => all R/O, 1 => user none, 2 => user R/O, 3 => user R/W
......@@ -1165,6 +1244,7 @@ ConstructCAMfromPageTables
ORR v6, v6, a1, LSL #10 ; extract P bits
ORR v6, v6, #PageFlags_Unavailable ; ???? pages from scratch to cam only?
STMIA a2, {v2, v6} ; store logical address, PPL
]
80 ADD v2, v2, #&00001000
TST v2, #&000FF000
......@@ -1251,7 +1331,7 @@ Init_MapInRAM ROUT
; On entry:
; a1 = physical address
; a2 = logical address
; a3 = access permissions + C + B bits (bits 11-2 of an extended descriptor)
; a3 = access permissions+C+B bits (bits 11-2 of an L2 extended small page)
; (also set bit 31 to indicate that P bit in L1PT should
; be set)
; a4 = area size
......@@ -1270,6 +1350,12 @@ Init_MapIn ROUT
ORRNE a3, a3, #L2_ExtPage ; then extended small pages (4K)
BNE %FT10
[ MEMM_Type = "VMSAv6"
ORR a3, a3, #L2_LargePage ; else large pages (64K)
AND lr, a3, #L2_TEX ; extract TEX from ext page flags
BIC a3, a3, #L2_TEX ; small page TEX bits SBZ for large pages
ORR a3, a3, lr, LSL #L2L_TEXShift-L2_TEXShift ; replace TEX in large page position
|
ORR a3, a3, #L2_LargePage ; else large pages (64K)
AND lr, a3, #L2_TEX ; extract TEX from ext page flags
AND ip, a3, #L2X_AP ; extract AP from ext page flags
......@@ -1278,6 +1364,7 @@ Init_MapIn ROUT
ORR ip, ip, ip, LSL #4
ORR a3, a3, lr, LSL #6 ; replace TEX in large page position
ORR a3, a3, ip ; replace quadrupled AP
]
10
Push "v4-v7"
MOV v4, a1 ; v4 = physaddr
......@@ -1295,6 +1382,22 @@ Init_MapIn ROUT
BNE %BT20
Pull "v4-v7,pc"
[ MEMM_Type = "VMSAv6"
Init_MapIn_Sections
MOVS ip, v3 ; is MMU on?
LDREQ ip, =L1PT ; then use virtual address
AND lr, a3, #L2_TEX + L2_AP ; extract TEX, AP, APX bits (input is extended small page)
BIC a3, a3, #L2_TEX + L2_AP ; and clear them
ORR a3, a3, lr, LSL #6 ; put TEX and AP bits back in new position
ORR a3, a3, #L1_Section ; Mark as section
ORR a1, a1, a3 ; Merge with physical address
ADD a2, ip, a2, LSR #18 ; a2 -> L1PT entry
70 STR a1, [a2], #4 ; And store in L1PT
ADD a1, a1, #1024*1024 ; Advance one megabyte
SUBS a4, a4, #1024*1024 ; and loop
BNE %BT70
Pull "pc"
|
Init_MapIn_Sections
MOVS ip, v3 ; is MMU on?
LDREQ ip, =L1PT ; then use virtual address
......@@ -1320,7 +1423,7 @@ Init_MapIn_Sections
SUBS a4, a4, #1024*1024 ; and loop
BNE %BT70
Pull "pc"
]
; Map a logical page to a physical page, allocating L2PT as necessary.
;
......@@ -1360,6 +1463,7 @@ Init_MapInPage
40 AND lr, v6, #3
TEQ lr, #L2_LargePage ; strip out surplus address bits from
BICEQ v6, v6, #&0000F000 ; large page descriptors
[ MEMM_Type <> "VMSAv6"
TEQ lr, #L2_ExtPage
BNE %FT50
TEQ v3, #0 ; if we've been given an extended page
......@@ -1374,6 +1478,7 @@ Init_MapInPage
ORR lr, lr, lr, LSL #2 ; (losing the TEX bits in the
ORR v6, v6, lr ; process, but they should have been 0)
ORR v6, v6, lr, LSL #4
]
50 ORR lr, v4, v6 ; lr = value for L2PT entry
[ ARM6support
ASSERT AP_ROM = 0
......@@ -1462,6 +1567,9 @@ AllocateL2PT
20 BL Init_ClaimPhysicalPage ; Claim a page to put L2PT in
MOV v4, a1
[ MEMM_Type = "VMSAv6"
ORR a3, a1, #L1_Page
|
[ ARM6support
ASSERT AP_ROM = 0
ARM_6 lr ; if ARM 6
......@@ -1475,6 +1583,7 @@ AllocateL2PT
[ ECC
ORR a3, a3, #L1_P
]
]
AND lr, v8, #3
ORR a3, a3, lr, LSL #10
STR a3, [v6, v8, LSL #2] ; fill in the L1PT
......@@ -1523,7 +1632,11 @@ RISCOS_AccessPhysicalAddress
Pull "a1-a3,lr"
]
LDR ip, =L1PT + (PhysicalAccess:SHR:18) ; ip -> L1PT entry
[ MEMM_Type = "VMSAv6"
LDR a4, =(AP_None * L1_APMult) + L1_Section
|
LDR a4, =(AP_None * L1_APMult) + L1_U + L1_Section
]
AND a1, a1, #L1_B ; user can ask for bufferable
[ ECC
ORRNE a1, a1, #L1_P
......@@ -1777,9 +1890,37 @@ InitProcVecs
InitProcVec_FIQ
DCD 0
InitProcVecsEnd
; Debug InitProcVecs - on an exception they output the PC to a UART
; GET ADFS::4.$.work.riscos.omap3dev.riscos.sources.hal.omap3.hdr.UART
;InitProcVecs
; NOP ; Reset
; NOP ; Undefined instruction
; NOP ; SWI
; NOP ; Prefetch abort
; NOP ; data abort
; NOP ; address exception
; NOP ; IRQ
; NOP ; FIQ
; ADR a3,hextab
; LDR a2,[a3],#4
; MOV a4, #8
;10
; LDRB v1, [a2, #UART_LSR]
; TST v1, #THRE
; BEQ %BT10
; LDRB v1, [a3, lr, LSR #28]
; STRB v1, [a2, #UART_THR]
; MOV lr, lr, LSL #4
; SUBS a4, a4, #1
; BNE %BT10
;20
; B %BT20
;hextab DCD &49020000 ; UART address
; DCB "0123456789abcdef"
;InitProcVecsEnd
;
; In: a1 = flags (L1_B,L1_C,L1_AP)
; In: a1 = flags (L1_B,L1_C,L1_AP,L1_APX)
; a2 = physical address
; a3 = size
; Out: a1 = assigned logical address, or 0 if failed (no room)
......@@ -1790,7 +1931,11 @@ InitProcVecsEnd
ASSERT L1_B = 1:SHL:2
ASSERT L1_C = 1:SHL:3
[ MEMM_Type = "VMSAv6"
ASSERT L1_AP = 2_100011 :SHL: 10
|
ASSERT L1_AP = 3:SHL:10
]
MapInFlag_DoublyMapped * 1:SHL:20
MapInFlag_APSpecified * 1:SHL:21
......@@ -1811,6 +1956,7 @@ RISCOS_MapInIO ROUT
TST v5, #MapInFlag_APSpecified
BICEQ a1, a1, #L1_AP
; For VMSAv6, assume HAL knows what it's doing and requests correct settings for AP_ROM
ORREQ a1, a1, #L1_APMult * AP_None
ANDS v5, v5, #MapInFlag_DoublyMapped
......
......@@ -682,12 +682,15 @@ kbdwait
SUBS r6, r6, #1 ; else wait a maximum of 5 seconds.
BNE kbdwait
kbddone
DebugTX "Keyboard scan complete"
MSR CPSR_c, #I32_bit+SVC32_mode
DebugTX "FIQ enabled"
CallHAL HAL_KbdScanFinish
MOV r1, #InitIRQWs
MOV r0, #0
STRB r0, [r1, #KbdScanActive]
MSR CPSR_c, #SVC32_mode
DebugTX "IRQ enabled"
|
[ KeyWait <> 0
; Check for keyboard there every 1/5 sec. but give up after 2 secs.
......@@ -1832,13 +1835,20 @@ ResetPart1Done ; R0 is reset type
SWI XOS_ChangeEnvironment
VDWS WsPtr ; main MOS initialisation
DebugTX "VduInit"
BL VduInit
DebugTX "ExecuteInit"
BL ExecuteInit
DebugTX "KeyInit"
BL KeyInit
DebugTX "MouseInit"
BL MouseInit
DebugTX "OscliInit"
BL OscliInit ; before initialising modules
DebugTX "Enabling IRQs"
WritePSRc SVC_mode, R14 ; enable IRQs
DebugTX "IRQs on"
[ DebugTerminal
MOV R0, #RdchV
......@@ -1849,6 +1859,7 @@ ResetPart1Done ; R0 is reset type
MOV R0, #WrchV
ADRL R1, DebugTerminal_Wrch
SWI XOS_Claim
DebugTX "Debug terminal on"
]
[ DoInitialiseMode :LOR: :LNOT: Embedded_UI
......@@ -1865,6 +1876,7 @@ ResetPart1Done ; R0 is reset type
; HardResetPart2
[ HAL
DebugTX "HAL_InitDevices"
AddressHAL
MOV R0, #0
STR R0, [R0, #DeviceCount]
......@@ -1873,8 +1885,11 @@ ResetPart1Done ; R0 is reset type
|
BL L1L2PTenhancements ; little tricks on cacheability etc for performance
]
DebugTX "InitVariables"
BL InitVariables
DebugTX "AMBControl_Init"
BL AMBControl_Init ; initialise AMBControl section
DebugTX "ModuleInit"
BL ModuleInit ; initialise modules
; scan podules, copy modules.
......@@ -2132,7 +2147,7 @@ MessageFileName DCB "Resources:$.Resources.Kernel.Messages",0
ALIGN
]
[ StrongARM
[ StrongARM :LAND: :LNOT: HAL
Processor_Type
MOV r0,#IOMD_Base
LDRB r1,[r0,#IOMD_ID0]
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment