Commit 01b119d8 authored by Neil Turton's avatar Neil Turton

Import from cleaned 370 CD

parent 02a1dfb6
......@@ -14,7 +14,7 @@
|
Dir <Obey$Dir>
wimpslot -min 1000k
echo amu_machine export
echo Do you really want to do this!!
echo its ok ... -n is set
amu_machine export -n
echo amu_machine export PHASE=hdrs
amu_machine export PHASE=hdrs
echo amu_machine export PHASE=libs
amu_machine export PHASE=libs
......@@ -17,7 +17,9 @@ wimpslot -min 1000k
echo amu_machine lib.stubs
|amu_machine lib.stubs
echo amu_machine lib.risc_oslib
|amu_machine lib.risc_oslib
amu_machine lib.risc_oslib
echo amu_machine lib.clib
amu_machine lib.clib
|amu_machine lib.clib
echo amu_machine lib.ansilib
amu_machine lib.ansilib
echo MkLibs: all done
| Copyright 1996 Acorn Computers Ltd
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
|
Dir <Obey$Dir>
wimpslot -min 1000k
echo amu_machine lib.clib
amu_machine lib.clib
......@@ -657,10 +657,16 @@ void _backtrace(int why, int *address, _kernel_unwindblock *uwb)
{ int *z, i, nargs, *argp;
char *name = 0;
int *fp = (int *) uwb->fp;
_kernel_swi_regs r;
if (lang[0]=='C' && lang[1]==0) {
z = (int *)((fp[0] & 0x03fffffc) - 12);
z = (int *)(fp[0] & 0x03fffffc);
/* Note that when I save pc in a STM instruction it points 12 beyond the */
/* instruction, not just 8! */
/* instruction, not just 8! Unless it's a StrongARM or similar. */
r.r[0] = 0;
if (!_kernel_swi(OS_PlatformFeatures, &r, &r) && (r.r[0] & 8))
z -= 2;
else
z -= 3;
/* If the word before the STM is itself STM sp!, {a1-a4} that shows */
/* where I should find args, and suggests that there are >= 5. */
/* (this needs to work whether sp is r12 or r13) */
......@@ -681,7 +687,6 @@ void _backtrace(int why, int *address, _kernel_unwindblock *uwb)
/* Print args from the highest one downwards, in hex and decimal */
argp += nargs;
while (nargs!=0) {
_kernel_swi_regs r;
int v = *(--argp);
int carry;
......
......@@ -23,6 +23,7 @@
GET s.h_stack
GET s.h_workspc
GET s.h_StrongA
EXPORT |_kernel_exit|
EXPORT |_kernel_setreturncode|
......@@ -425,6 +426,12 @@ movne_sl_sla
ADR r5, AEHandlerInDataInitValue
BL CopyHandler
[ StrongARM ;CopyHandler does some dynamic code
;r0,lr are free to use here
MOV r0, #0
SWI XOS_SynchroniseCodeAreas
]
MOV r0, #0
BL InstallHandlers
......@@ -471,6 +478,8 @@ NoMainProgram
FatalError Keep
SWI GenerateError
;StrongARM - there is dynamic code here, but this is sorted in _kernel_init, after
;all calls to CopyHandler
CopyHandler
LDMIA r5!, {r6, r7}
STMIA r4!, {r6, r7}
......@@ -766,7 +775,15 @@ Aborted Keep
STMFD r13!, {r14} ; remember the abort pc
BL |_kernel_copyerror|
ADD r14, r12, #O_registerDump
[ SAnaffsilicon
NOP
NOP
]
STMIB r14, {r1-r14}^
[ SAnaffsilicon
NOP
NOP
]
LDMFD r13!, {r1, r2, r3}
TST r1, #3
LDRNE r1, [r14, #lr * 4]
......@@ -1350,6 +1367,10 @@ ErrorHandler Keep
; we switch back to SWI mode to save the registers.
SWI EnterSVC
ADD r14, r0, #O_registerDump
[ SAnaffsilicon
NOP
NOP
]
STMIA r14, {r0-r14}^
MOV r12, r0
ADD r0, r0, #O_errorNumber
......@@ -1382,6 +1403,10 @@ EventHandler Keep
STR r0, [r12, #O_eventCode]
ADD r11, r12, #O_eventRegisters
STMIA r11, {r0-r10, r13}
[ SAnaffsilicon
NOP
NOP
]
STMDB r11, {r13}^
MOV v6, r12
MOV v2, r11
......@@ -1601,6 +1626,9 @@ ReloadUserState
NOOP
ADD r14, r0, #pc*4
LDMDB r14, {r0-r14}^
[ SAnaffsilicon
NOP
]
NOOP
LDMIA r14, {pc}^
......@@ -1715,7 +1743,18 @@ default_unwind_handler Keep
LDR a3, [a4, #frame_entrypc]
BIC a3, a3, #PSRBits
[ StrongARM
STMFD sp!, {a1-a2}
MOV a1, #0
SWI XOS_PlatformFeatures
MOVVS a1, #0
TST a1, #8
LDREQ v1, [a3, #-12]
LDRNE v1, [a3, #-8]
LDMFD sp!, {a1-a2}
|
LDR v1, [a3, #-12]
]
; check that the save mask instruction is indeed the right sort of STM
; If not, return indicating stack corruption.
......@@ -1807,6 +1846,25 @@ duh_corrupt
ExitLeafProcContainingSWI
|_kernel_swi_c|
[ StrongARM
MOV ip, sp
STMFD sp!, {a3, a4, v1-v6, fp, ip, r14, pc}
SUB fp, ip, #4
BIC r12, a1, #&80000000
TST a1, #&80000000 ; non-X bit requested?
ORREQ r12, r12, #X
LDMIA r1, {r0-r9}
SWI XOS_CallASWIR12
LDMFD sp!, {ip, lr}
STMIA ip, {r0 - r9}
MOV ip, #0
MOVCS ip, #1
MOVVS ip, #0
STR ip, [lr]
BLVS CopyError
MOVVC a1, #0
LDMDB fp, {v1-v6, fp, sp, pc}^
|
; Set up a proper frame here, so if an error happens (and not X)
; a sensible traceback can be given.
MOV ip, sp
......@@ -1822,6 +1880,9 @@ duh_corrupt
ORR a1, a1, #&EF000000 ; SWI + Always
ORREQ a1, a1, #X
STMFD sp!, {a1, a4}
[ {TRUE}
SyncStackCode 2
]
LDMIA a2, {r0 - r9}
MOV pc, sp
AfterSWI
......@@ -1835,17 +1896,34 @@ AfterSWI
BLVS CopyError
MOVVC a1, #0
LDMDB fp, {v1-v6, fp, sp, pc}^
]
swi_ret_inst
MOV pc, ip
|_kernel_swi|
[ StrongARM
STMDB sp!, {a3, v1-v6, lr}
BIC r12, a1, #&80000000
TST a1, #&80000000
ORREQ r12, r12, #X
LDMIA r1, {r0-r9}
SWI XOS_CallASWIR12
LDR ip, [sp]
STMIA ip, {r0-r9}
BLVS CopyError
MOVVC a1, #0
LDMIA sp!, {a3, v1-v6, pc}^
|
STMDB sp!, {a3, v1-v6, lr}
LDR a4, swi_ret_inst
TST a1, #&80000000
ORR a1, a1, #&ef000000
ORREQ a1, a1, #X
STMDB sp!, {a1, a4}
[ {TRUE}
SyncStackCode 2
]
LDMIA a2, {r0-r9}
MOV ip, pc
MOV pc, sp
......@@ -1854,6 +1932,8 @@ swi_ret_inst
BLVS CopyError
MOVVC a1, #0
LDMIA sp!, {a3, v1-v6, pc}^
]
|_kernel_command_string|
LoadStaticBase a1
......@@ -2052,6 +2132,12 @@ CopyUp CMP r14, #0
STMDB v5!, {r0,r2-r4}
CMP r1, v4 ; r1 < %B01 ?
BGT %B01 ; no, so keep going...
[ StrongARM
;in case we are jumping to code we have just copied here (ie not shared Clib)...
CMP v2, #0
MOVNE r0, #0
SWINE XOS_SynchroniseCodeAreas
]
ADD r0, pc, v2 ; ... go to moved image
MOV pc, r0 ; and continue copying up...
01 LDMDB r1!, {r0,r2-r4}
......@@ -2059,6 +2145,9 @@ CopyUp CMP r14, #0
CMP r1, v3 ; src > imageBase ?
BGT %B01 ; yes, so continue
;StrongARM - no need to synchronise for rest of copied code here, since we will not
;be executing it (we have to synchronise later, after copying down)
CopyUpDone
; ip is the relocated sp.
LDR r0, [ip, #4] ; chain/subr
......@@ -2136,6 +2225,14 @@ CopyDn
STMIA v2!, {r1-r3,ip}
CMP r0, v3 ; copied the copy code?
BLT %B01 ; no, so continue...
[ StrongARM
;in case we are jumping to code we have just copied here (ie not shared Clib)...
MOV r1, r0
CMP v1, #0
MOVNE r0, #0
SWINE XOS_SynchroniseCodeAreas
MOV r0, r1
]
SUB ip, pc, v1 ; yes => copied this far ...
MOV pc, ip ; ... so branch to copied copy loop
01 LDMIA r0!, {r1-r3,ip}
......@@ -2144,6 +2241,11 @@ CopyDn
BLT %B01 ; no, so continue...
02
CopyDnDone
[ StrongARM
;you've guessed it
MOV r0, #0
SWI XOS_SynchroniseCodeAreas
]
LDMFD sp!, {r0} ; old memoryLimit
BL InstallHandlers
......@@ -2426,7 +2528,18 @@ alloc_return_block
; v1 is save area in old frame... will be temp sp in old frame
ADD a1, v1, #4*4 ; temp fp in old frame
LDMDA fp, {v3-v6} ; old fp, sp,lr, pc
[ StrongARM
STMFD sp!,{a1-a2}
MOV a1,#0
SWI XOS_PlatformFeatures
MOVVS a1,#0
TST a1,#8
ADREQ v6, StkOvfPseudoEntry+12
ADRNE v6, StkOvfPseudoEntry+8
LDMFD sp!,{a1-a2}
|
ADR v6, StkOvfPseudoEntry+12
]
STMDA a1, {v3-v6} ; new return frame in old chunk...
ADR lr, StackOverflowExit
MOV a2, sp ; saved sp in old frame = NEW sp
......@@ -2497,7 +2610,16 @@ DoneArgumentCopy
LDMDA v1!, {a1, a2, v2-v6} ; and the 2nd 7 regs
STMFD sp!, {a1, a2, v2-v6} ; copied to the new frame
; Now adjust the PC value saved in the old chunk to say "no registers"
[ StrongARM
MOV a1,#0
SWI XOS_PlatformFeatures
MOVVS a1,#0
TST a1,#8
ADREQ v2, StkOvfPseudoEntry+12
ADRNE v2, StkOvfPseudoEntry+8
|
ADR v2, StkOvfPseudoEntry+12
]
STR v2, [v1, #26*4]
; Set the SP to be FP - requiredFrameSize and return by reloading regs
; from where they were saved in the old chunk on entry to STKOVF/N
......@@ -2911,6 +3033,9 @@ dividebyzero
SWI EnterSVC
LDR r14, [ip, #pc * 4]
LDMIB ip, {r1-r14}^
[ SAnaffsilicon
NOP
]
NOOP
STMDB sp!, {r10, r11, r12}
STMDB sp!, {r14}
......
......@@ -30,10 +30,175 @@ lr RN 14
pc RN 15
GET s.h_StrongA
AREA |C$$code|, CODE, READONLY
EXPORT |_swix|
EXPORT |_swi|
[ StrongARM
; tedious static _swi(x) entry handling, to avoid generating dynamic code, and
; requiring an expensive XOS_SynchroniseCodeAreas
|_swix|
ORR r0, r0, #&20000
TST r1, #&FF0 ; check for use of input regs. 4 to 9, or of block param
BNE swix_even_more_tedious ; if so, do full stuff
STMFD sp!, {r2, r3} ; put 1st two variadic args on stack
STMDB sp!, {r1, r4-r9, lr} ; save stuff
SUB sp, sp, #5*4 ; so we can use tail code common with dynamic version (and room for regs stash)
ADD r14, sp, #(5+8)*4 ; r14 -> input args
MOV r12, r0 ; target SWI code
STR fp, [sp] ; stash fp
MOV r11, r1
TST r11, #&001
LDRNE r0, [r14], #4
TST r11, #&002
LDRNE r1, [r14], #4
TST r11, #&004
LDRNE r2, [r14], #4
TST r11, #&008
LDRNE r3, [r14], #4
STR r14, [sp, #4] ; stash args ptr
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ; restore (ip -> args)
B SWIXReturn
swix_even_more_tedious
|_swi|
STMFD sp!, {r2, r3} ; put 1st two variadic args on stack
STMDB sp!, {r1, r4-r9, lr} ; save stuff
SUB sp, sp, #5*4 ; so we can use tail code common with dynamic version (and room for regs stash)
ADD r14, sp, #(5+8)*4 ; r14 -> input args
MOV r12, r0 ; target SWI code
STR fp, [sp] ; stash fp
MOV r11, r1
TST r11, #&001
LDRNE r0, [r14], #4
TST r11, #&002
LDRNE r1, [r14], #4
TST r11, #&004
LDRNE r2, [r14], #4
TST r11, #&008
LDRNE r3, [r14], #4
TST r11, #&010
LDRNE r4, [r14], #4
TST r11, #&020
LDRNE r5, [r14], #4
TST r11, #&040
LDRNE r6, [r14], #4
TST r11, #&080
LDRNE r7, [r14], #4
TST r11, #&100
LDRNE r8, [r14], #4
TST r11, #&200
LDRNE r9, [r14], #4
STR r14, [sp, #4] ; stash args ptr
TST r11, #&800 ; use of block parameter input?
BLNE swi_blockhead ; if so, handle it and...
LDRNE r14, [sp, #4] ; ...restore arg ptr
TST r12, #&20000 ; if non X SWI, could be a return value register
BEQ swi_beyond_a_joke
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ; restore (ip -> args)
B SWIXReturn
swi_beyond_a_joke
;so we have to deal with a return value then
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ;restore (ip -> args)
STR pc, [sp, #4*4]!
LDR lr, [sp, #1*4]
;right, if R0 is also required as an output param, we'd better sort that first
TST lr,#&80000000
BEQ swi_beyond_a_joke_R0safe
LDRNE lr, [r12], #4
STRNE r0, [lr]
LDR lr, [sp, #1*4]
BIC lr,lr,#&80000000 ;done it now
STR lr, [sp, #1*4]
swi_beyond_a_joke_R0safe
ANDS lr, lr, #&000F0000 ;select return value register
BEQ SWIReturn2
CMP lr, #&00010000
MOVEQ r0, r1
CMP lr, #&00020000
MOVEQ r0, r2
CMP lr, #&00030000
MOVEQ r0, r3
CMP lr, #&00040000
MOVEQ r0, r4
CMP lr, #&00050000
MOVEQ r0, r5
CMP lr, #&00060000
MOVEQ r0, r6
CMP lr, #&00070000
MOVEQ r0, r7
CMP lr, #&00080000
MOVEQ r0, r8
CMP lr, #&00090000
MOVEQ r0, r9
CMP lr, #&000F0000 ;for goodness sake!
LDREQ r0, [sp]
B SWIReturn2
swi_blockhead
STMFD sp!, {r10-r12, lr}
LDR r12, [sp, #(4+1)*4] ;pick up args ptr from stack
;r12 currently -> first output arg, so crank it past them
MOVS r11, r11, ASL #1
ADDCS r12, r12, #4 ;tests R0 output bit
ADDMI r12, r12, #4 ;tests R1 output bit
MOV r10, #5 ;5 more reg bit pairs to go (includes PC and one dummy)
swi_blockhead1
MOVS r11, r11, ASL #2
ADDCS r12, r12, #4
ADDMI r12, r12, #4
SUBS r10, r10, #1
BNE swi_blockhead1
;now r12 -> parameter block args on stack
LDR r11, [sp,#4]
ANDS r11, r11, #&f000 ;select reg for parameter block pointer
MOVEQ r0, r12
CMP r11, #&1000
MOVEQ r1, r12
CMP r11, #&2000
MOVEQ r2, r12
CMP r11, #&3000
MOVEQ r3, r12
CMP r11, #&4000
MOVEQ r4, r12
CMP r11, #&5000
MOVEQ r5, r12
CMP r11, #&6000
MOVEQ r6, r12
CMP r11, #&7000
MOVEQ r7, r12
CMP r11, #&8000
MOVEQ r8, r12
CMP r11, #&9000
MOVEQ r9, r12
LDMFD sp!, {r10-r12, pc}^ ;must restore flags
] ; StrongARM
[ :LNOT: StrongARM
|_swi|
; Construct a stack frame that looks something like this:
......@@ -73,9 +238,16 @@ swix0
ADD r6, r6, #&ea000000
STMDB sp!, {r0,r2,r3,r5,r6}
ADD r12, sp, #(5+8)*4 ; Point R12 at input regs on stack.
[ StrongARMfudge
; so that dynamic version would at least work
SyncStackCode 5
]
MOV pc, sp ; Call routine on stack
SWIReturn
STR pc, [sp, #4*4]!
] ; not StrongARM
SWIReturn2
LDR lr, [sp, #1*4]
MOVS lr, lr, ASL #1 ; Shift out setting C if R0 to be written, N
LDRCS lr, [r12], #4 ; if R1 to be written.
......@@ -129,6 +301,7 @@ VSetReturn
ADD sp, sp, #2 * 4
MOVS pc, lr
[ :LNOT: StrongARM
BuildBlockInst
MOV r4, #6
AND r2, r1, #&f000
......@@ -141,5 +314,6 @@ BuildBlockInst1
SUBS r4, r4, #1
BNE BuildBlockInst1
MOVS pc, lr
]
END
; Copyright 1996 Acorn Computers Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
r0 RN 0
r1 RN 1
r2 RN 2
r3 RN 3
r4 RN 4
r5 RN 5
r6 RN 6
r7 RN 7
r8 RN 8
r9 RN 9
r10 RN 10
r11 RN 11
r12 RN 12
sp RN 13
lr RN 14
pc RN 15
GET s.h_StrongA
AREA |C$$code|, CODE, READONLY
EXPORT |_swix|
EXPORT |_swi|
[ StrongARM
; tedious static _swi(x) entry handling, to avoid generating dynamic code, and
; requiring an expensive XOS_SynchroniseCodeAreas
|_swix|
ORR r0, r0, #&20000
TST r1, #&FF0 ; check for use of input regs. 4 to 9, or of block param
BNE swix_even_more_tedious ; if so, do full stuff
STMFD sp!, {r2, r3} ; put 1st two variadic args on stack
STMDB sp!, {r1, r4-r9, lr} ; save stuff
SUB sp, sp, #5*4 ; so we can use tail code common with dynamic version (and room for regs stash)
ADD r14, sp, #(5+8)*4 ; r14 -> input args
MOV r12, r0 ; target SWI code
STR fp, [sp] ; stash fp
MOV r11, r1
TST r11, #&001
LDRNE r0, [r14], #4
TST r11, #&002
LDRNE r1, [r14], #4
TST r11, #&004
LDRNE r2, [r14], #4
TST r11, #&008
LDRNE r3, [r14], #4
STR r14, [sp, #4] ; stash args ptr
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ; restore (ip -> args)
B SWIXReturn
swix_even_more_tedious
|_swi|
STMFD sp!, {r2, r3} ; put 1st two variadic args on stack
STMDB sp!, {r1, r4-r9, lr} ; save stuff
SUB sp, sp, #5*4 ; so we can use tail code common with dynamic version (and room for regs stash)
ADD r14, sp, #(5+8)*4 ; r14 -> input args
MOV r12, r0 ; target SWI code
STR fp, [sp] ; stash fp
MOV r11, r1
TST r11, #&001
LDRNE r0, [r14], #4
TST r11, #&002
LDRNE r1, [r14], #4
TST r11, #&004
LDRNE r2, [r14], #4
TST r11, #&008
LDRNE r3, [r14], #4
TST r11, #&010
LDRNE r4, [r14], #4
TST r11, #&020
LDRNE r5, [r14], #4
TST r11, #&040
LDRNE r6, [r14], #4
TST r11, #&080
LDRNE r7, [r14], #4
TST r11, #&100
LDRNE r8, [r14], #4
TST r11, #&200
LDRNE r9, [r14], #4
STR r14, [sp, #4] ; stash args ptr
TST r11, #&800 ; use of block parameter input?
BLNE swi_blockhead ; if so, handle it and...
LDRNE r14, [sp, #4] ; ...restore arg ptr
TST r12, #&20000 ; if non X SWI, could be a return value register
BEQ swi_beyond_a_joke
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ; restore (ip -> args)
B SWIXReturn
swi_beyond_a_joke
SWI XOS_CallASWIR12
LDMIA sp, {fp, ip} ; restore (ip -> args)
STR pc, [sp, #4*4]!
LDR lr, [sp, #1*4]
ANDS lr, lr, #&000F0000 ; select return value register
BEQ SWIReturn2
CMP lr, #&00010000
MOVEQ r0, r1
CMP lr, #&00020000
MOVEQ r0, r2
CMP lr, #&00030000
MOVEQ r0, r3
CMP lr, #&00040000
MOVEQ r0, r4
CMP lr, #&00050000
MOVEQ r0, r5
CMP lr, #&00060000
MOVEQ r0, r6
CMP lr, #&00070000
MOVEQ r0, r7
CMP lr, #&00080000
MOVEQ r0, r8
CMP lr, #&00090000
MOVEQ r0, r9
CMP lr, #&000F0000 ; for goodness sake!
LDREQ r0, [sp]
B SWIReturn2
swi_blockhead
STMFD sp!, {r10-r12, lr}
LDR r12, [sp, #(4+1)*4] ; pick up args ptr from stack
;r12 currently -> first output arg, so crank it past them
MOV r10, #6
swi_blockhead1
MOVS r11, r11, ASL #2
ADDCS r12, r12, #4
ADDMI r12, r12, #4
SUBS r10, r10, #1
BNE swi_blockhead1
;now r12 -> parameter block args on stack
LDR r11, [sp,#4]
ANDS r11, r11, #&f000 ;select reg for parameter block pointer
MOVEQ r0, r12
CMP r11, #&1000
MOVEQ r1, r12
CMP r11, #&2000
MOVEQ r2, r12
CMP r11, #&3000
MOVEQ r3, r12
CMP r11, #&4000
MOVEQ r4, r12
CMP r11, #&5000
MOVEQ r5, r12
CMP r11, #&6000
MOVEQ r6, r12
CMP r11, #&7000
MOVEQ r7, r12
CMP r11, #&8000
MOVEQ r8, r12
CMP r11, #&9000
MOVEQ r9, r12
LDMFD sp!, {r10-r12, pc}^ ;must restore flags
] ; StrongARM
[ :LNOT: StrongARM
|_swi|
; Construct a stack frame that looks something like this:
; LDMIA r12!, {r0..rn} ; Or NOP if no input regs
; ADD Rb, R12, #Nout * 4 ; Or NOP if no parameter block
; SWI xxxxxx
; MOV R0, Rn ; Use ADD because Rn is correct bitfield
; B SWIReturn
; saved r4-r11,lr
; saved r1
; saved input values (r2...rn)
STMFD sp!, {r2-r3} ; Save r1 and put 1st two variadic args on stack
STMDB sp!, {r1, r4-r9, lr}
ADR r6, SWIReturn-4
B swix0
|_swix|