Commit 3249caeb authored by Mike Stephens's avatar Mike Stephens
Browse files

Changed compile switches, to build Ursula kernel for RPC and A7000(+), switches now set as follows:

  ARM67Support      TRUE  (for 610,710,7500,7500FE)
  ARMSASupport      TRUE  (for StrongARM)
  ARMSASupport_RevS FALSE (for StrongARMs before rev S)
  IOMD1Support      TRUE  (for old machines)
  IOMD2Support      FALSE (They killed Phoebe!)
Version set to 4.00 (RISC OS 4)
This is the same as my last commit to the Ursula branch
parent 1bcabcf6
......@@ -11,9 +11,9 @@ VString SETS "4.52"
Date SETS "07 Nov 1996" ; version for STB/NC OS
OSVersionID SETA &A6
|
Version SETA 380
VString SETS "3.80"
Date SETS "28 Apr 1998" ; version for RISC OS on desktop computers
Version SETA 400
VString SETS "4.00"
Date SETS "20 Nov 1998" ; version for RISC OS on desktop computers
OSVersionID SETA &A8 ; was &A7 for 3.70,3.71
]
......
......@@ -17,13 +17,6 @@
GET Hdr:System
GET Hdr:Machine.<Machine>
; Hack!
GBLL med_00001_debug
med_00001_debug SETL {FALSE}
; and another...
GBLL StrongARM
StrongARM SETL {TRUE}
GET ^.PublicWS
GET ^.KernelWS
......
......@@ -978,7 +978,13 @@ ACS_SCflipflop * &00800000 ; bit 23 SC flipflop - controls
ACS_SCflipflop_SHIFT * 23
ACS_SynchCAsemaphore * &00400000 ; bit 22 SychCA semaphore - set during SynchroniseCodeAreas (re-entrancy guard)
ACS_Scacheflag * &00200000 ; bit 21 Scache flag - set if screen currently cacheable (mainly for info to OS_ScreenMode)
; bit 0..20 reserved - 0
ACS_SoftVIDMRD * &00100000 ; bit 20 VIDMRD emulate - if no h/w VIDMRD, abort mechanism emulates 'dirty screen' flag here
; if h/w VIDMRD, used as sticky soft copy of 'dirty screen' flag in bit 0 of h/w VIDMRD
; (sticky because h/w flag clears on read, but s/w flag may need to persist)
ACS_VSCpending_MASK * &03100000 ; VSCcountdown OR SoftVIDMRD
ACS_HardVIDMRD * &00080000 ; bit 19 VIDMRD present - set only if VIDMRD h/w register available (IOMD2)
ACS_MiniDataCache * &00040000 ; bit 18 mini cache - clear for SA-110, set for SA-120 (has 1k mini data cache, kernel uses it for screen)
; bit 0..17 reserved - 0
! 0, "AMBControl_ws at ":CC::STR:(AMBControl_ws)
! 0, "ARMA_Cleaner_flipflop at ":CC::STR:(ARMA_Cleaner_flipflop)
......@@ -1047,6 +1053,8 @@ PhysRamTable # 0 ; 6 pairs of words (physaddr, size) indicating
; RAM present in machine (NB normally you would need at most 5
; on IOMD machines, but the extra one is if a soft-loaded ROM image
; causes a bank to split
; mjs: Ursula needs to support an extra 4 pairs for Phoebe, since each SDRAM bank
; appears as two fragments - this is accommodated by extra pairs allocated for Morris
VideoPhysAddr # 4 ; Address of video RAM (in the case of DRAM-only machines,
VideoSize # 4 ; this is actually a chunk out of DRAM)
DRAMPhysAddrA # 4 ; Next the DRAM - note that any banks with no memory
......@@ -1059,13 +1067,12 @@ DRAMPhysAddrD # 4 ; of zero (and probably addresses of zero too)
DRAMSizeD # 4
DRAMPhysAddrE # 4
DRAMSizeE # 4
[ MorrisSupport
DRAMPhysAddrExtra # 4 * 12 ; The DRAM used with MORRIS can fragment into four
DRAMSizeExtra # 4 * 12 ; blocks so allocate 3 extra word pairs per bank
]
PhysRamTableEnd # 0
! 0, "VideoPhysAddr at ":CC::STR:(VideoPhysAddr)
SoftROMaddr # 4 ; reserved physical adddress for soft ROM (0 if none, currently non-0 for Phoebe only)
! 0, "SoftROMaddr at ":CC::STR:(SoftROMaddr)
VRAMSize # 4 ; Amount of VRAM (in bytes) (may be more than 2M) (at &200 last time I checked)
VRAMWidth # 4 ; 0 => no VRAM, 1 => 32-bits wide, 2 => 64-bits wide
VideoBandwidth # 4 ; video bandwidth in bytes/sec
......@@ -1077,30 +1084,20 @@ SoftCamMapSize # 4 ; Amount of memory (in bytes) used for soft CAM
InitKbdWs # 16 ; Workspace for reset keyboard IRQ code (was 12 changed for Morris)
CLine_Softcopy # 1 ; Added for Morris - Monitor id
[ :LNOT: STB
LCD_Active # 1 ; Added to support LCD/CRT switching. bm 6 bits 0=>External CRT in use, 1=>Mono, 2=>Passive colour, 3=>Active colour
; bit 7 unset=>single panel, set=>dual panel
LCD_Inverted # 1 ; Added to support LCD palette inversion. 0=normal, 1=inverted. Note that the inversion is invisible to apps.
! 0, "LCD_Active flag byte at ":CC::STR:(LCD_Active)
]
[ StrongARM
ProcessorType # 1 ; Processor type (handles 600 series onwards)
ProcessorFlags # 1 ; Processor flags (IMB, Arch4 etc)
! 0, "ProcessorType at ":CC::STR:(ProcessorType)
! 0, "ProcessorFlags at ":CC::STR:(ProcessorFlags)
]
AlignSpace 32 ; skipped bit must end on 32-byte boundary (due to speedup)
SkippedTablesEnd # 0
! 0, "SkippedTablesEnd at ":CC::STR:(SkippedTablesEnd)
CMOSRAMCache # 240 ; Cache for CMOS RAM
[ STB
[ E2ROMSupport
NVRamSize # 1 ; Size of NVRam (E2ROM & CMOS) fitted in 256byte units
RTCFitted # 1 ; flag =1 iff RTC is fitted
]
]
AlignSpace
......@@ -1320,6 +1317,14 @@ Abort32_dumparea # 6*4 ;info for OS_ReadSysInfo 7 - 32-bit PSR, fault
Help_guard # 4 ;for *help, guard against foreground re-entrancy (multiple taskwindows)
Help_msgdescr # 4*4 ;for *help, 4 words MessageTrans descriptor
! 0, "Help_guard at ":CC::STR:(Help_guard)
;
PCI_status # 4 ;bit 0 = 1 if PCI exists or 0 if PCI does not exist, bits 1..31 reserved (0)
! 0, "PCI_status at ":CC::STR:(PCI_status)
IOMD_NoInterrupt # 4 ;no. of irq devices for extant IOMD
IOMD_DefaultIRQ1Vcode # 4 ;default irq code start address (ROM) for extant IOMD
IOMD_DefaultIRQ1Vcode_end # 4 ;default irq code end address (ROM)
IOMD_Devices # 4 ;default irq devices table address (ROM)
! 0, "IOMD_NoInterrupt at ":CC::STR:(IOMD_NoInterrupt)
;
[ mjsSysHeapNodesTrace
mjsSHNodesTrace_ws # 0
......@@ -1339,9 +1344,9 @@ mjsSHNT_vfh_total # 4 ;total SysVar FreeVNode calls that dropped a node to t
! 0, "**WARNING** compiling in code to trace some SysHeap node statistics (mjsSysHeapNodesTrace TRUE)"
! 0, ""
! 0, "mjsSHNodesTrace_ws at ":CC::STR:(mjsSHNodesTrace_ws)
ModuleSHT_Padding1 # 752-12-4*ModuleSHT_Entries-11*4-6*4-5*4-12*4 ;spare
ModuleSHT_Padding1 # 752-12-4*ModuleSHT_Entries-11*4-6*4-5*4-4-4*4-12*4 ;spare
|
ModuleSHT_Padding1 # 752-12-4*ModuleSHT_Entries-11*4-6*4-5*4 ;spare
ModuleSHT_Padding1 # 752-12-4*ModuleSHT_Entries-11*4-6*4-5*4-4-4*4 ;spare
]
;
ASSERT @ = &C34 + 752
......@@ -1625,11 +1630,7 @@ RedirectBuff |#| OscliBuffSize
; 6 interrupts for I/O and sound DMA (this is really IOMD specific, not
; ARM600/700 specific but for the moment it is assumed that they are
; used on the same machines).
[ MorrisSupport
DefIRQ1Vspace * 12*4+12*23+2*256+64 + 7*4+12*16+32+256 ;Morris adds 2 more IRQ registers
|
DefIRQ1Vspace * 12*4+12*23+2*256+64 ; for size checking in MOS
]
DefaultIRQ1V |#| DefIRQ1Vspace
[ AssemblingArthur
......
......@@ -57,7 +57,7 @@
BLO %BT01
MEND
[ AMB_LazyMapIn
[ AMB_LazyMapIn :LAND: ARMSASupport
; ----------------------------------------------------------------------------------
;
......@@ -142,7 +142,7 @@ AMB_LazyFixUp ROUT
MOV r12,r7
MOV pc,lr
] ;AMB_LazyMapIn
] ;AMB_LazyMapIn :LAND: ARMSASupport
; ----------------------------------------------------------------------------------
......@@ -190,10 +190,14 @@ AMB_movepagesin_L2PT ROUT
SUBS r8,r8,#1
BNE %BT30
35
[ :LNOT: ARMSASupport_Only
ARM_read_ID r0
AND r0,r0,#&F000
CMP r0,#&A000
ARMA_drain_WB EQ ;because L2PT area for AppSpace will be bufferable
|
ARMA_drain_WB ;because L2PT area for AppSpace will be bufferable
]
Pull "r0-r10,r12,pc"
; ----------------------------------------------------------------------------------
......@@ -348,40 +352,18 @@ AMB_movepagesout_L2PT ROUT
SUBS r8,r8,#1
BNE %BT30
35
[ :LNOT: ARMSASupport_Only
ARM_read_ID r0
AND r0,r0,#&F000
CMP r0,#&A000
ARMA_drain_WB EQ ;because L2PT area for AppSpace will be bufferable
|
ARMA_drain_WB ;because L2PT area for AppSpace will be bufferable
]
Pull "r0-r8,pc"
; ----------------------------------------------------------------------------------
[ ARM810support
;Previously supported ARMs all tolerate cache (clean and) flush _after_
;remapping - ARMs 6,7 because there is no clean, StrongARM because the cache
;writebacks use physical address.
;ARM810 does not support clean of writeback cache after remapping, since
;writebacks use virtual address. Rather than completely restructure code,
;this routine is called before remapping where necessary, and cleans/flushes
;if it finds we are running on ARM 810.
;
;corrupts r3
;
AMB_cachecleanflush_ifARM810
ARM_read_ID r3
AND r3,r3,#&F000
CMP r3,#&8000
MOVNE pc,lr ;not ARM8
[ ARM810cleanflushbroken
Push "lr"
ARM8_cleanflush_IDC r3,lr
Pull "pc"
|
ARM8_cleanflush_IDC r3
MOV pc,lr
]
] ;ARM810support
; ----------------------------------------------------------------------------------
;
......@@ -453,9 +435,6 @@ AMB_SetMemMapEntries ROUT
AMB_smme_mapnotlimpid
]
;
[ ARM810support
BL AMB_cachecleanflush_ifARM810
]
MOV r3,r5
BL AMB_movepagesout_L2PT
BL AMB_movepagesin_L2PT
......@@ -477,9 +456,6 @@ AMB_smme_mapin
;all pages destined for same new logical page 'nowhere'
AMB_smme_mapout
[ ARM810support
BL AMB_cachecleanflush_ifARM810
]
LDR r3,=DuffEntry
BL AMB_movepagesout_L2PT
BL AMB_movepagesout_CAM
......@@ -503,17 +479,16 @@ AMB_smme_exit
; exit: trashes r0-r4,r7-r11 (assumed protected by client)
;
AMB_cachecleanflush
[ :LNOT: ARMSASupport_Only
ARM_read_ID r0
AND r0,r0,#&F000
[ ARM810support
CMP r0,#&8000 ;cache clean/flush done before remapping if ARM810
ARM8_flush_TLB EQ
MOVEQ pc,lr
]
CMP r0,#&A000
ARM67_flush_cache NE
ARM67_flush_TLB NE
MOVNE pc,lr
]
[ ARMSASupport
;we have a StrongARM then
;
......@@ -523,17 +498,6 @@ AMB_cachecleanflush
MOV r0,r4 ;r0 := start address for clean/flush
ADD r1,r0,r8,LSL #Log2PageSize ;r1 := end address for clean/flush (exclusive)
[ ChocolateScreen
;quick check - if screen cleaner has pending clean required, then we might as well choose
;to do a full clean (forget the threshold check below), since this will save screen cleaner work
;(we're only reading here, so interrupts not a problem)
MOV r2,#ARMA_Cleaner_status
LDR r2,[r2]
TST r2,#ACS_VSCcountdown_MASK
BNE AMB_ccf_StrongARM_flushwhole ;if countdown not zero, screen clean work pending
]
SUB r2,r1,r0
CMP r2,#AMB_ARMA_CleanRange_thresh
BLO AMB_ccf_StrongARM_flushrange
......@@ -545,6 +509,8 @@ AMB_ccf_StrongARM_flushwhole
MOV r2,#0
LDR r3,[r2,#ARMA_Cleaner_status]
ORR r3,r3,#ACS_NSCsemaphore ;set semaphore for non-screen clean
TST r3,#ACS_MiniDataCache
BICEQ r3,r3,#ACS_VSCpending_MASK ;unless screen uses mini cache, force pending VSC clear since we're about to clean cache
STR r3,[r2,#ARMA_Cleaner_status] ;update status
TEQP r1,#0 ;restore IRQ state
LDR r1,[r2,#ARMA_Cleaner_flipflop]
......@@ -559,13 +525,16 @@ AMB_ccf_StrongARM_flushwhole
TEQP r2,#0 ;disable IRQs to mess with ARMA_Cleaner_status and MMUdomain
MOV r2,#0
LDR r3,[r2,#ARMA_Cleaner_status]
BIC r3,r3,#ACS_NSCsemaphore:OR:ACS_VSCcountdown_MASK ;clear semaphore and any pending VSC
BIC r3,r3,#ACS_NSCsemaphore ;clear semaphore
STR r3,[r2,#ARMA_Cleaner_status]
[ ChocolateScreen
TST r3,#ACS_SCdisable:OR:ACS_SCsuspend
BNE AMB_ccf_nochoc ;do nothing if disabled or suspended
TST r3,#ACS_HardVIDMRD
ARMA_read_MMUdomain r3,EQ
BICEQ r3,r3,#&C
ARMA_write_MMUdomain r3,EQ ;if SC not disabled or suspended, reset screen (domain 1) to fault
ARMA_write_MMUdomain r3,EQ ;if h/w VIDMRD absent, reset screen (domain 1) to fault for VIDMRD emulation
AMB_ccf_nochoc
]
TEQP r1,#0 ;restore IRQ state
MOV pc,lr
......@@ -581,8 +550,9 @@ AMB_ccf_StrongARM_flushrange
MOV pc,lr
] ;ARMSASupport
[ AMB_LazyMapIn
[ AMB_LazyMapIn :LAND: ARMSASupport
; ----------------------------------------------------------------------------------
;
......@@ -613,9 +583,6 @@ AMB_SetMemMapEntries_SparseMapOut ROUT
;if the number of pages mapped in is small enough, we'll do a clean/flush of data
;cache as we go (potentially much cheaper than full clean/flush)
;(Arguably we could check for pending VSync cleans if ChocolateScreen, and always
;choose to do full clean if so. Currently considered best to go for the saving always
;here, with our own - possibly smaller - threshold.)
CMP r3,#AMB_ARMA_CleanSparseRange_thresh:SHR:Log2PageSize
MOVLO r6,#0 ;r6 := 0 if we are to clean/flush as we go
......@@ -725,7 +692,7 @@ AMB_MakeUnsparse ROUT
Pull "r0-r2,r12,pc"
] ;AMB_LazyMapIn
] ;AMB_LazyMapIn :LAND: ARMSASupport
; ----------------------------------------------------------------------------------
;
......
This diff is collapsed.
This diff is collapsed.
......@@ -140,7 +140,6 @@ Signed_Output ROUT
Cardinal_Spaced_Output ROUT
Signed_Spaced_Output
[ StrongARM
SUB sp, sp, #12 ; get 12 byte buffer
Push "r1,r2,lr"
LDR r10,code_of_swi
......@@ -173,50 +172,6 @@ space_conv_exit
code_of_swi
DCD XOS_ConvertCardinal1 - OS_ConvertSpacedCardinal1
|
Push "R1, R2"
; copy our code into the stack (!!)
ADR R1, code_segment
LDMIA R1, {R1, R2, R12}
ADD R2, R2, R11
Pull "R10, R11"
Push "R1, R2, R12"
SUB sp, sp, #12 ; get 12 byte buffer
MOV R1, sp
MOV R2, #12
MOV lr, pc
ADD pc, sp, #12 ; oh for an "execute" instruction!
; note can't get VSet back from this "SWI"
RSB R0, R2, #12 ; bytes got
MOV R1, R10
MOV R2, R11
MOV R12, #0
MOV R11, sp
01 LDRB R10, [R11], #1
BL addconvchar
BVS space_conv_exit
SUBS R0, R0, #1
BEQ space_conv_exit
CMP R10, #"-"
BEQ %BT01
CMP R0, #3
CMPNE R0, #6
CMPNE R0, #9
BNE %BT01
MOV R10, #" "
BL addconvchar
BVC %BT01
space_conv_exit
ADD sp, sp, #12+12
B endconversion
code_segment
Push "lr"
SWI XOS_ConvertCardinal1 - OS_ConvertSpacedCardinal1
Pull "PC"
]
; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
......
......@@ -188,171 +188,8 @@ C15 CN 15
; -------------- ARM 810 only ----------------------------------------------
;
[ ARM810support
;turn off branch prediction
; - the forced mispredicted branch ensures that the predictor is trapped in
; this code segment when turned off
; - corrupts $temp and status flags
;
MACRO
ARM8_branchpredict_off $temp
01
ARM_read_control $temp
BIC $temp,$temp,#&800 ;z bit (branch prediction)
ARM_write_control $temp
SEC ;set carry flag
BCC %BT01
MEND
;turn on branch prediction
MACRO
ARM8_branchpredict_on $temp
ARM_read_control $temp
ORR $temp,$temp,#&800 ;z bit (branch prediction)
ARM_write_control $temp
MEND
;flush branch prediction, which is sufficient for an IMB (instruction memory
;barrier) on ARM 810, BUT...
; - intended for in line use only, where efficiency matters, or SWI call is
; awkward
; - general code should use SWI OS_SynchroniseCodeAreas to implement
; an IMB (instruction memory barrier) in future proof, ARM independent way
; - kernel code may use this without regard to which ARM running - ie. assumed
; harmless on other ARMs
;
MACRO
ARM8_branchpredict_flush
SUB PC,PC,#4 ;flush, because PC is written by data op
MEND
;clean cache entry
; - segment,index spec in $reg
; - bits 4..6 = segment (0..7)
; - bits 26..31 = index (0..63)
; - all other bits zero
MACRO
ARM8_clean_IDCentry $reg,$cond
MCR$cond ARM_config_cp,0,$reg,ARM8A_cache_reg,C11,1
MEND
;flush cache entry - segment,index spec in $reg, as for ARM8_clean_IDCentry
MACRO
ARM8_flush_IDCentry $reg,$cond
MCR$cond ARM_config_cp,0,$reg,ARM8A_cache_reg,C7,1
MEND
;clean and flush cache entry - segment,index spec in $reg, as for ARM8_clean_IDCentry
;
;if ARM810cleanflushbroken is TRUE, interrupts *must* be currently diabled (see below)
;
MACRO
ARM8_cleanflush_IDCentry $reg,$cond
[ ARM810cleanflushbroken
ARM8_clean_IDCentry $reg,$cond
ARM8_flush_IDCentry $reg,$cond
|
MCR$cond ARM_config_cp,0,$reg,ARM8A_cache_reg,C15,1
]
MEND
;fully clean and flush cache (assumes no locked-down entries to preserve)
;
;if ARM810cleanflushbroken is TRUE, then we have to make sure interrupts are disabled during
;the sequence of 2 MCRs that make up ARM8_cleanflush_IDCentry, to avoid an interrupt hole.
;The hole occurs if an interrupt fills and dirties the particular cache entry after the clean
;but before the flush. We don't have this problem with StrongARM, because the entry is
;specified by virtual address, and RISC OS only cleans/flushes address space not currently
;involved in interrupts.
;
[ ARM810cleanflushbroken
MACRO
ARM8_cleanflush_IDC $temp,$temp2
;for simplicity, disable interrupts during entire operation - 26-bit assumed
MOV $temp2,pc
AND $temp2,$temp2,#I_bit
EOR $temp2,$temp2,#I_bit ;temp := <current I> EOR <I set>
TEQP $temp2,pc ;disable I
MOV $temp,#0 ;initial segment and index
01
ARM8_cleanflush_IDCentry $temp
ADD $temp,$temp,#1 :SHL: 26 ;next index
CMP $temp,#1 :SHL: 26 ;last index done if index field wrapped to 0
BHS %BT01
ADD $temp,$temp,#1 :SHL: 4 ;next segment
CMP $temp,#8 :SHL: 4 ;8 segments done?
BLO %BT01
TEQP $temp2,pc ;restore I
MEND
|
MACRO
ARM8_cleanflush_IDC $temp
MOV $temp,#0 ;initial segment and index
01
ARM8_cleanflush_IDCentry $temp
ADD $temp,$temp,#1 :SHL: 26 ;next index
CMP $temp,#1 :SHL: 26 ;last index done if index field wrapped to 0
BHS %BT01
ADD $temp,$temp,#1 :SHL: 4 ;next segment
CMP $temp,#8 :SHL: 4 ;8 segments done?
BLO %BT01
MEND
]
;flush whole TLB (actually, same as ARMA_flush_TLBs)
MACRO
ARM8_flush_TLB $cond
MCR$cond ARM_config_cp,0,R0,ARM8A_TLB_reg,C7,0
MEND
;flush TLB entry, virtual address in $reg
MACRO
ARM8_flush_TLBentry $reg,$cond
MCR$cond ARM_config_cp,0,$reg,ARM8A_TLB_reg,C7,1
MEND
;select external Refclk pin as fast clock (dynamic switching, asynchronous)
MACRO
ARM8_refclk_fclk $temp
MRC ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
BIC $temp, $temp,#&1 ;turn off dynamic bus switching (bit0)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
BIC $temp,$temp,#&2 ;select asynchronous mode (default) (bit1)
ORR $temp,$temp,#&4 ;select REFCLK as the FCLK source (bits3:2)
BIC $temp,$temp,#&10 ;ensure L=0 when writing (PLL locked) (bit4)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
NOP
NOP
NOP
NOP
ORR $temp,$temp,#&1 ;select dynamic clock switching (bit0)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
MEND
;select PLL output as fast clock (dynamic switching, asynchronous)
MACRO
ARM8_pll_fclk $temp
MRC ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
BIC $temp,$temp,#&1 ;turn off dynamic bus switching (bit0)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
BIC $temp,$temp,#&2 ;select asynchronous mode (default) (bit1)
ORR $temp,$temp,#&C ;select PLLClkOut as the FCLK source (bits3:2)
BIC $temp,$temp,#&10 ;ensure L=0 when writing (PLL locked) (bit4)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
NOP
NOP
NOP
NOP
ORR $temp,$temp,#&1 ;select dynamic clock switching (bit0)
MCR ARM_config_cp,0,$temp,ARM8_CTC_reg,C0,0
MEND
] ;ARM810support
;mjs
;now removed ARM810 support as part of kernel clean up (RISC OS 4)
;
; -------------- StrongARM only ------------------------------------------
......
......@@ -25,35 +25,43 @@
$GetMEMM
$GetVIDC
; amg 7/12/96 Renaissance
; Forcibly ensure that options only intended for one class of platform
; stay there. Generally this involves combining switches with the STB
; switch. The exception is processor architectural stuff. 7500FE is
; included always, and StrongARM has an independed switch.
;
; When you want to migrate features from one platform to another you'll
; find that every occurence of feature switches has been qualified with
; the appropriate sense of the STB switch. This is to remind you to think
; about what you're about to do! Check whether the code actually will work
; at all on something that is or is not a STB class product.
;
; Using the STB switch this aggressively also help ensure that there's
; no unexpected code crossover in the initial merge.
;whether compiling to run on (Risc PC) emulator
GBLL RunningOnEmul
RunningOnEmul SETL {FALSE} :LAND: STB
RunningOnEmul SETL {FALSE}
; ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
; we can only build for medusa-ish h/w
; ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
ASSERT Keyboard_Type = "PC"
ASSERT CPU_Type = "ARM600"
ASSERT IO_Type = "IOMD"
ASSERT VIDC_Type = "VIDC20"
ASSERT MEMC_Type = "IOMD"
ASSERT MEMM_Type = "ARM600"
; mjs:
; We cannot build for STB. If STB code is ever 're-merged', it is suggested
; that it is done in a more structured way than a basic CVS munge with
; fix-ups. For example, look at separating files for affected, hardware
; dependent areas of kernel. This is a lot of work, but is better than
; the obfuscation caused by pretend hardware abstraction.
;
ASSERT :LNOT: STB
;
; We choose to distinguish IOMD variants by differing IOMD_ID0 bytes only
;
; Note that decision points based on IOMD variant now do exhaustive checks,
; and deliberately hang up in a loop for unrecognised IOMD. The panic
; hang-up loops continually read IOMD_ID0, so that examining the hang up
; with a logic analyser will give a clue. Doing exhaustive checks like
; this may seem a bit verbose, but is much less ad hoc than previous
; hacks, and may make it easier to see how to add support for other
; variants.
;
ASSERT (IOMD_7500FE :AND: &FF) < (IOMD_7500 :AND: &FF)
ASSERT (IOMD_7500 :AND: &FF) < (IOMD_Original :AND: &FF)
ASSERT (IOMD_Original :AND: &FF) < (IOMD_IOMD2 :AND: &FF)
ASSERT CPU_Type = "ARM600"
ASSERT IO_Type = "IOMD"
ASSERT VIDC_Type = "VIDC20"
ASSERT MEMC_Type = "IOMD"
ASSERT MEMM_Type = "ARM600"