; Copyright 1996 Acorn Computers Ltd ; ; Licensed under the Apache License, Version 2.0 (the "License"); ; you may not use this file except in compliance with the License. ; You may obtain a copy of the License at ; ; http://www.apache.org/licenses/LICENSE-2.0 ; ; Unless required by applicable law or agreed to in writing, software ; distributed under the License is distributed on an "AS IS" BASIS, ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ; See the License for the specific language governing permissions and ; limitations under the License. ; ; -*- Mode: Assembler -*- ;* Body of shared library kernel for Arthur/Brazil ;* Lastedit: 13 Dec 90 14:39:20 by Harry Meekings * ; ; Copyright (C) Acorn Computers Ltd., 1988. ; ; 23-Sep-94 AMcC __rt_ symbols defined and exported (compatible with cc vsn 5 etc) ; GET s.h_stack GET s.h_workspc GET s.h_StrongA EXPORT |_kernel_exit| EXPORT |_kernel_setreturncode| EXPORT |_kernel_exittraphandler| EXPORT |_kernel_unwind| EXPORT |_kernel_procname| EXPORT |_kernel_language| EXPORT |_kernel_command_string| EXPORT |_kernel_hostos| EXPORT |_kernel_swi| EXPORT |_kernel_swi_c| EXPORT |_kernel_osbyte| EXPORT |_kernel_osrdch| EXPORT |_kernel_oswrch| EXPORT |_kernel_osbget| EXPORT |_kernel_osbput| EXPORT |_kernel_osgbpb| EXPORT |_kernel_osword| EXPORT |_kernel_osfind| EXPORT |_kernel_osfile| EXPORT |_kernel_osargs| EXPORT |_kernel_oscli| EXPORT |_kernel_last_oserror| EXPORT |_kernel_system| EXPORT |_kernel_getenv| EXPORT |_kernel_setenv| EXPORT |_kernel_register_allocs| EXPORT |_kernel_register_slotextend| EXPORT |_kernel_alloc| EXPORT |_kernel_current_stack_chunk| EXPORT |_kernel_stkovf_split_0frame| EXPORT |_kernel_stkovf_split| EXPORT |_kernel_stkovf_copyargs| EXPORT |_kernel_stkovf_copy0args| EXPORT |_kernel_udiv| EXPORT |_kernel_urem| EXPORT |_kernel_udiv10| EXPORT |_kernel_sdiv| EXPORT |_kernel_srem| EXPORT |_kernel_sdiv10| EXPORT |_kernel_escape_seen| EXPORT |_kernel_init| EXPORT |_kernel_client_is_module| [ ModeMayBeNonUser EXPORT |_kernel_entermodule| EXPORT |_kernel_moduleinit| EXPORT |_kernel_irqs_on| EXPORT |_kernel_irqs_off| EXPORT |_kernel_irqs_disabled| ] EXPORT |_kernel_processor_mode| EXPORT |_kernel_RMAalloc| EXPORT |_kernel_RMAfree| EXPORT |_kernel_RMAextend| EXPORT |_kernel_fpavailable| EXPORT |_kernel_call_client| EXPORT |_kernel_raise_error| EXPORT |__rt_udiv10| EXPORT |__rt_sdiv10| EXPORT |__counter| PSRBits * &FC000003 PSRZBit * &40000000 PSRVBit * &10000000 PSRIBit * &08000000 PSRSVCMode * &00000003 OSBase * &1800000 ; A RTS$$Data area ^ 0 lang_size # 4 lang_codeBase # 4 lang_codeLimit # 4 lang_name # 4 lang_Init # 4 lang_Finalise # 4 lang_Trap # 4 lang_UncaughtTrap # 4 lang_Event # 4 lang_UnhandledEvent # 4 lang_FastEvent # 4 lang_Unwind # 4 lang_ProcName # 4 ; a _kernel_unwindblock ^ 0 uwb_r4 # 4 uwb_r5 # 4 uwb_r6 # 4 uwb_r7 # 4 uwb_r8 # 4 uwb_r9 # 4 uwb_fp # 4 uwb_sp # 4 uwb_pc # 4 uwb_sl # 4 uwb_f4 # 3*4 uwb_f5 # 3*4 uwb_f6 # 3*4 uwb_f7 # 3*4 uwb_size # 0 Wimp_ReadSysInfo EQU X+&400f2 Wimp_SlotSize EQU X+&400ec ; Not needed anymore since behaviour under debugger is now identical ;Debugger_BeingDebugged EQU X+&41d41 [ DDE DDEUtils_GetCLSize EQU X+&42583 DDEUtils_GetCL EQU X+&42584 ] MACRO NOOP MOV r0, r0 MEND MACRO CallClient $r ; Always called with v6 = the kernel static base. ; If shared library, need to worry about calling standard change. [ (:LNOT:SharedLibrary) MOV lr, pc MOV pc, $r | LDRB ip, [v6, #O_APCS_A_Client] CMP ip, #0 MOVNE r12, sp MOVNE r13, sl MOVNE r10, fp MOV lr, pc MOV pc, $r MOVNE fp, r10 MOVNE sl, r13 MOVNE sp, r12 ] MEND |_kernel_call_client| [ (:LNOT:SharedLibrary) MOV pc, a4 | STMFD sp!, {v6, lr} LoadStaticBase v6 LDRB ip, [v6, #O_APCS_A_Client] CMP ip, #0 MOVNE r12, sp movne_sla_sl MOVNE r13, sl MOVNE r10, fp MOV lr, pc MOV pc, a4 MOVNE fp, r10 movne_sl_sla MOVNE sl, r13 MOVNE sp, r12 LDMFD sp!, {v6, pc}^ ] [ ModeMayBeNonUser |_kernel_moduleinit| ; Preserve r9 (== v6) across this call ADD sl, r1, #SC_SLOffset LoadStaticBase ip, r1 LDMIB r0, {r1-r2} MOV r0, #0 STMIA ip, {r0-r2} ADR r0, |_kernel_RMAalloc| STR r0, [ip, #O_allocProc] ; default alloc proc ADR r0, |_kernel_RMAfree| STR r0, [ip, #O_freeProc] ; and no dealloc proc LDMFD sp!, {pc}^ |_kernel_irqs_disabled| ANDS a1, r14, #PSRIBit MOVS pc, r14 |_kernel_RMAalloc| STMFD sp!, {r14} MOVS r3, a1 LDMEQFD sp!, {pc}^ MOVNE r0, #Module_Claim SWINE Module MOVVS a1, #0 MOVVC a1, r2 LDMFD sp!, {pc}^ |_kernel_RMAextend| CMP a1, #0 MOVEQ a1, a2 BEQ |_kernel_RMAalloc| CMP a2, #0 BEQ |_kernel_RMAfree| STMFD sp!, {r14} LDR r3, [a1, #-4] SUB r3, r3, #4 ; Correct to useable size SUB r3, a2, r3 MOV r2, a1 MOV r0, #Module_Extend SWI Module MOVVS a1, #0 MOVVC a1, r2 LDMFD sp!, {pc}^ |_kernel_RMAfree| STMFD sp!, {r14} MOVS r2, a1 MOVNE r0, #Module_Free SWINE Module MOV a1, #0 LDMFD sp!, {pc}^ | |_kernel_RMAalloc| |_kernel_RMAextend| |_kernel_RMAfree| MOV a1, #0 MOVS pc, r14 ] |_kernel_irqs_on| BICS pc, r14, #PSRIBit |_kernel_irqs_off| ORRS pc, r14, #PSRIBit |_kernel_processor_mode| AND a1, r14, #3 ; the question is anyway about the caller's MOVS pc, r14 ; state, not ours - the answers are probably ; the same. |_kernel_client_is_module| LoadStaticBase a1, ip TST r14, #PSRSVCMode MOVNE a1, #0 LDREQ a1, [a1, #O_moduleDataWord] MOVS pc, r14 [ ModeMayBeNonUser |_kernel_entermodule| ; user entry to a module. Need to allocate a stack in application ; workspace. ; r0 points to a kernel init block ; (for old stubs) r12 is the module's private word pointer. ; (for new stubs) r12 is -1 ; r8 is the module's private word pointer ; r6 is the requested root stack size MOV r9, r0 SWI GetEnv MOV r4, r1 MOV r1, #Application_Base CMPS r12, #0 MOVLT r12, r8 MOVGE r6, #OldRootStackSize STR r6, [r1, #SC_size] LDR r12, [r12] LDMIB r12, {r2, r3} ; relocation offsets ADD r5, r1, #SC_SLOffset+SL_Lib_Offset STMIA r5, {r2, r3} ; transfer to user stack chunk ADD r2, r1, r6 MOV r3, #1 ; 'is a module' flag MOV r0, r9 ] |_kernel_init| ; r0 points to a kernel init block. ; r1 = base of root stack chunk ; r2 = top of root stack chunk (= initial sp) ; r3 = 0 if ordinary application, 1 if module ; r4 = end of workspace (= heap limit). ; Always in user mode. MOV sp, r2 ADD sl, r1, #SC_SLOffset MOV fp, #0 ; mark base of stack STR fp, [r1, #SC_next] STR fp, [r1, #SC_prev] STR fp, [r1, #SC_deallocate] LDR r5, =IsAStackChunk STR r5, [r1, #SC_mark] LoadStaticBase v6, ip STR r1, [v6, #O_heapBase] STR r1, [v6, #O_rootStackChunk] LDMIA r0, {r0-r2} CMP r3, #0 ; if module, imagebase (in RMA) isn't MOVNE r0, #Application_Base ; interesting STMIA v6, {r0-r2} ; Copy the argument string (in SWI mode), so we can access it ; (assumed in page 0), while all user access to page 0 is disabled MOV r6, sp ; (sp may be different in SWI mode) SWI EnterSVC SWI GetEnv MOV r1, r0 01 LDRB r5, [r1], #+1 [ DDE CMP r5, #' ' ; I seem to be getting LF terminated BCS %B01 ; commands. I don't know why. | CMP r5, #0 BNE %B01 ] SUB r1, r1, r0 ADD r1, r1, #3 BIC r1, r1, #3 02 SUBS r1, r1, #4 LDR r5, [r0, r1] STR r5, [r6, #-4]! BNE %B02 TEQP pc, #0 ; back to user mode [ DDE NOOP SWI DDEUtils_GetCLSize MOVVS r0, #0 CMP r0, #0 BEQ %F04 ADD r0, r0, #3 BIC r0, r0, #3 SUB r1, r6, r0 MOV r0, r1 03 LDRB r2, [r6], #+1 CMP r2, #' ' MOVCC r2, #' ' STRB r2, [r0], #+1 BCS %B03 SWI DDEUtils_GetCL MOV r6, r1 04 | NOOP ] STR r6, [v6, #O_ArgString] ADRL r0, |_kernel_malloc| STR r0, [v6, #O_allocProc] ; default alloc proc ADRL r0, |_kernel_free| STR r0, [v6, #O_freeProc] ; and no dealloc proc ; set up a small stack chunk for use in performing stack extension. ; We needn't bother with most of the fields in the description of ; this chunk - they won't ever be used. We must set mark (to be ; not IsAStackChunk) and SL_xxx_Offset. Also prev. STR sp, [v6, #O_extendChunk] ADD r0, sl, #SL_Lib_Offset LDMIA r0, {r1, r2} ADD r0, sp, #SC_SLOffset+SL_Lib_Offset STMIA r0, {r1, r2} STR sp, [sp, #SC_mark] STR fp, [sp, #SC_prev] ; 0 to mark end of chain MOV r0, #ExtendStackSize STR r0, [sp, #SC_size] ADD sp, sp, #ExtendStackSize STR sp, [v6, #O_heapTop] ; save updated value for heap base STR r4, [v6, #O_heapLimit] MOV sp, r6 MOV r0, #1 STRB r0, [v6, #O_callbackInactive] STR fp, [v6, #O_hadEscape] STR r3, [v6, #O_moduleDataWord] STRB fp, [v6, #O_escapeSeen] ; Determine whether FP is available (to decide whether fp regs need ; saving over _kernel_system) ; The SWI will fail if it isn't SWI FPE_Version MOVVC r0, #&70000 ; IVO, DVZ, OFL cause a trap WFSVC r0 MOVVC r0, #1 MOVVS r0, #0 STRB r0, [v6, #O_fpPresent] ADD r4, v6, #O_IIHandlerInData ADR r5, IIHandlerInDataInitValue BL CopyHandler ADD r4, v6, #O_PAHandlerInData ADR r5, PAHandlerInDataInitValue BL CopyHandler ADD r4, v6, #O_DAHandlerInData ADR r5, DAHandlerInDataInitValue BL CopyHandler ADD r4, v6, #O_AEHandlerInData ADR r5, AEHandlerInDataInitValue BL CopyHandler [ StrongARM ;CopyHandler does some dynamic code ;r0,lr are free to use here MOV r0, #0 SWI XOS_SynchroniseCodeAreas ] MOV r0, #0 BL InstallHandlers MOV r0, #0 SWI Wimp_ReadSysInfo MOVVS r0, #0 ; error - presumably Wimp not present CMP r0, #0 MOVNE r0, #1 STRB r0, [v6, #O_underDesktop] MOVNE r0, #Env_ApplicationSpace MOVNE r1, #0 SWINE ChangeEnv LDREQ r1, [v6, #O_heapLimit] STR r1, [v6, #O_knownSlotSize] STR r1, [v6, #O_initSlotSize] MOV v1, #0 LDMIB v6, {v2, v3} CallInitProcs Keep CMP v2, v3 BGE EndInitProcs LDR v4, [v2, #lang_size] CMP v4, #lang_Init BLE NoInitProc LDR a1, [v2, #lang_Init] CMP a1, #0 BEQ NoInitProc CallClient a1 CMP a1, #0 MOVNE v1, a1 NoInitProc ADD v2, v2, v4 B CallInitProcs EndInitProcs CMP v1, #0 BEQ NoMainProgram CallClient v1 NoMainProgram BL Finalise ADR r0, E_NoMainProgram BL |_kernel_copyerror| FatalError Keep SWI GenerateError ;StrongARM - there is dynamic code here, but this is sorted in _kernel_init, after ;all calls to CopyHandler CopyHandler LDMIA r5!, {r6, r7} STMIA r4!, {r6, r7} SUB r6, r5, r4 SUB r6, r6, #8 MOV r6, r6, ASR #2 BIC r6, r6, #&ff000000 ORR r6, r6, #&ea000000 STR r6, [r4] MOVS pc, r14 |Sys$RCLimit| DCB "Sys$RCLimit", 0 ALIGN |_kernel_exit| [ ModeMayBeNonUser TST r14, #3 BEQ |_kernel_user_exit| MOV ip, sp SUB sp, sp, #4*12 ; set up an unwind block STMFD sp!, {sl} STMFD sp!, {a1, r3-r9, fp, ip, r14} ; r3 to reserve an extra word BL |_kernel_exittraphandler| 01 ADD a1, sp, #8 ADD a2, sp, #4 BL |_kernel_unwind| CMP a1, #0 BNE %B01 MOV r0, #1 ; get base address of RMA SWI XOS_ReadDynamicArea MOVVC r14, r0 MOVVS r14, #&01800000 ; default to fixed base if SWI fails LDMFD sp!, {a1} CMP a1, r14 ADRLT a1, E_Exit BLLT |_kernel_copyerror| LDMIB sp, {r4-r9, fp, sp, pc}^ ALIGN ErrorBlock Exit, "Exit called", C49 |_kernel_user_exit| ] LoadStaticBase v6, ip ; the work of finalisation gets done in the Exit handler. ; Due to a bug in the RISC OS kernel where the PC value in the error block ; is incorrectly written if OS_Exit is called with a return value outside ; the range 0 .. Sys$RCLimit we perform the check ourselves and call ; GenerateError directly LDR r2, [v6, #O_returnCode] CMP r2, #0 SWIEQ Exit ADR r0, |Sys$RCLimit| ADD r1, v6, #O_returnCodeLimit MOV r2, #4 MOV r3, #0 MOV r4, #0 SWI XOS_ReadVarVal MOVVS r3, #&100 BVS KernelExit1 MOV r3, #0 KernelExit2 LDRB r0, [r1], #1 SUB r0, r0, #'0' CMP r0, #10 BCS KernelExit1 ADD r3, r3, r3, LSL #2 ADD r3, r0, r3, LSL #1 B KernelExit2 KernelExit1 ADR r0, E_BadReturnCode BL |_kernel_copyerror| LDR r2, [v6, #O_returnCode] CMP r2, #0 ADDLTS r2, r2, r3 CMPNE r2, r3 LDR r1, ABEXString SWICC Exit STMDB sp!, {r0} BL Finalise LDMIA sp!, {r0} SWI GenerateError ; Generate an external error. Re-init the stack to the root stack chunk. ; The stack contents are not needed since this is an external error. ; In some cases we must re-init the stack (eg. stack overflow). ; The only case where it might may a difference is under a debugger in which ; case tough luck, you don't get a backtrace. |_kernel_raise_error| TEQ r0, #0 MOVEQS pc, lr [ ModeMayBeNonUser TST r14, #3 BNE |_kernel_exit| ] LoadStaticBase v6, ip LDR sl, [v6, #O_rootStackChunk] LDR sp, [sl, #SC_size] ADD sp, sl, sp ADD sl, sl, #SC_SLOffset MOV fp, #0 STMFD sp!, {a1} BL Finalise LDMFD sp!, {a1} SWI GenerateError Finalise STMFD sp!, {lr} LDMIB v6, {v2, v3} CallFinaliseProcs CMP v2, v3 BGE EndFinaliseProcs LDR v4, [v2, #lang_size] CMP v4, #lang_Finalise BLE NoFinaliseProc LDR v1, [v2, #lang_Finalise] CMP v1, #0 BEQ NoFinaliseProc CallClient v1 NoFinaliseProc ADD v2, v2, v4 B CallFinaliseProcs EndFinaliseProcs [ SharedLibrary ; Then do finalisation for the shared library (if we are one) ; Not CallClient here, because change of calling standards is ; inappropriate. LDR v2, =|RTSK$$Data$$Base| LDR v3, =|RTSK$$Data$$Limit| CallFinaliseProcs_SL CMP v2, v3 BGE EndFinaliseProcs_SL LDR v4, [v2, #lang_size] CMP v4, #lang_Finalise BLE NoFinaliseProc_SL ; Before doing a shared library module finalisation we must check it ; was initialised by the client. Do this by comparing the language ; names. STMDB sp!, {v1, v2, v3} LDR v1, [v2, #lang_name] LDMIB v6, {v2, v3} CheckClientInit CMP v2, v3 LDMGEIA sp!, {v1, v2, v3} BGE NoFinaliseProc_SL ; No client init found so can't finalise MOV a1, v1 LDR a2, [v2, #lang_name] CompareClientAndLibraryNames LDRB a3, [a1], #1 LDRB a4, [a2], #1 CMP a3, a4 LDRNE a1, [v2, #lang_size] ADDNE v2, v2, a1 BNE CheckClientInit CMP a3, #0 BNE CompareClientAndLibraryNames LDMIA sp!, {v1, v2, v3} LDR v1, [v2, #lang_Finalise] CMP v1, #0 MOVNE lr, pc MOVNE pc, v1 NoFinaliseProc_SL ADD v2, v2, v4 B CallFinaliseProcs_SL EndFinaliseProcs_SL ] LDMFD sp!, {lr} B RestoreOSHandlers |_kernel_setreturncode| LoadStaticBase ip, a2 STR a1, [ip, #O_returnCode] MOVS pc, r14 |_kernel_fpavailable| LoadStaticBase ip, a1 LDRB a1, [ip, #O_fpPresent] MOVS pc, r14 ABEXString = "ABEX" ErrorBlock BadReturnCode, "Return code too large", C50 ErrorBlock NoMainProgram, "No main program", C51 ;*-------------------------------------------------------------------* ;* Abort Handlers * ;*-------------------------------------------------------------------* ; The handlers called by the OS are in my static data, written there on ; startup, because that's the only way they can find out where the static data ; is. They don't do much, other than save some registers and load r12 with ; the static base. They are all the same length, and all immediately precede ; the real handler; when they are installed, a branch to the real handler is ; tacked on the end. IIHandlerInDataInitValue STMFD r13!, {r0, r12} SUB r12, pc, #O_IIHandlerInData+12 ; Now the bits of the abort handlers which get executed from the code. ; r12 is the address of my static data; the user's values of r0 and r12 are ; on the SVC stack. (SVC mode, interrupts disabled). IIHandler Keep SUB r14, r14, #4 ADR r0, E_IllegalInstruction B Aborted PAHandlerInDataInitValue STMFD r13!, {r0, r12} SUB r12, pc, #O_PAHandlerInData+12 PAHandler Keep SUB r14, r14, #4 ADR r0, E_PrefetchAbort B Aborted DAHandlerInDataInitValue STMFD r13!, {r0, r12} SUB r12, pc, #O_DAHandlerInData+12 DAHandler Keep SUB r14, r14, #8 ADR r0, E_DataAbort B Aborted2 AEHandlerInDataInitValue STMFD r13!, {r0, r12} SUB r12, pc, #O_AEHandlerInData+12 AEHandler Keep SUB r14, r14, #8 ADR r0, E_AddressException ; B Aborted2 Aborted2 Keep ; Abort which may be in the FP emulator, and if so should be reported ; as occurring at the instruction being emulated. ; If in user mode, can't be in FPE TST r14, #3 BEQ Aborted ; Otherwise, find out where the FPE module is STMFD sp!, {r0 - r6, r14} BIC r6, r14, #PSRBits MOV r0, #18 ADR r1, FPEName SWI Module LDMVSFD sp!, {r0 - r6, r14} BVS Aborted ; (r3 = code base of FPE; word before is length of FPE code) CMP r6, r3 LDRGE r4, [r3, #-4] ADDGE r3, r3, r4 CMPGE r3, r6 LDMFD sp!, {r0 - r6, r14} BLT Aborted ; It was a storage fault in the FP emulator. ; Where to find the user context differs between Brazil and Arthur. ; on Arthur, r13 points to the base of a full register save (r13 needs ; resetting to above this point). ; on Brazil r13 points to the word above a full register save. ; NB - is the pc value right?? ADD r13, r13, #8 ; pop the saved values of r0 and r12 ADD r14, r12, #O_registerDump MOV r1, r13 LDMIA r1!, {r2-r9} STMIA r14!, {r2-r9} LDMIA r1!, {r2-r9} STMIA r14!, {r2-r9} ADD r13, r13, #16*4 MOV r1, r9 B AbortFindHandler ErrorBlock PrefetchAbort, "Prefetch Abort", C60 ErrorBlock DataAbort, "Data Abort", C61 ErrorBlock AddressException, "Address Exception", C53 ErrorBlock IllegalInstruction, "Illegal Instruction", C54 FPEName = "FPEmulator",0 ALIGN SVC_StackSize EQU 8192 Aborted Keep ; entry here in SVC mode, r0 a pointer to an error block describing ; the abort. ; all user registers except r0, r12 are as at the time of the abort. ; r0 & r12 are on the stack. ; First, save all user registers in registerDump. STMFD r13!, {r14} ; remember the abort pc BL |_kernel_copyerror| ADD r14, r12, #O_registerDump [ SASTMhatbroken STMIB r14!, {r1-r12} STMIB r14, {r13,r14}^ NOP SUB r14, r14, #12*4 | STMIB r14, {r1-r14}^ ] LDMFD r13!, {r1, r2, r3} TST r1, #3 LDRNE r1, [r14, #lr * 4] STR r1, [r14, #pc*4] STR r2, [r14, #r0*4] STREQ r3, [r14, #r12*4] BEQ AbortFindHandler MOV r4, sp, LSR #20 MOV r4, r4, LSL #20 ADD r4, r4, #SVC_StackSize SUB r1, r4, sp CMP r1, #3 * 4 BCC AbortFindHandler LDMEA r4, {r1, r2, r3} ADD r4, r14, #10 * 4 STMIA r4, {r1, r2, r3} AbortFindHandler Keep ; We can only call an abort handler if we had a stack at the ; time of the abort. If not, we have to say 'uncaught trap'. ; There is a problem as soon as interrupts are enabled, that an event may ; arrive and trash the register dump. If there's a stack, this is solved ; by copying the dump onto it. Otherwise, we protect ourselves while ; constructing the error by pretending there's a callback going on. ; Entry may be in SWI mode (faults) or user mode (stack overflow, ; divide by zero). ; r0 is the address of an error block describing the fault. ; r12 is the address of our static data. MOV v6, r12 BL CopyErrorV6OK LDRB r2, [v6, #O_inTrapHandler] CMP r2, #0 BNE RecursiveTrap LDRB r2, [v6, #O_unwinding] CMP r2, #0 BNE duh_abort MOV r2, #1 STRB r2, [v6, #O_inTrapHandler] ADD r11, v6, #O_registerDump+16*4 LDR r10, [v6, #O_registerDump+sl*4] LDR r1, [v6, #O_heapBase] LDR r2, [v6, #O_heapLimit] [ SharedLibrary ; Shared Library, with the possibility that the fault occurred ; when APCS_A was in force. In order to sort this mess out, it ; is necessary that sl at the time of the fault was a valid ; stackchunk handle. In the non-shared case, this may not be true - ; it won't be within Fortran code, for example. ; Also, in non-user C code it won't be a full handle (no mark etc). ; But I know that in that case, we have APCS_U. ; LDR r3, [v6, #O_registerDump+pc*4] ; TST r3, #PSRSVCMode ; MOVNE r2, #-1 ; but then, the stack is not within the heap ; BNE Trap_IsAPCSR LDR r3, =IsAStackChunk CMP r10, r1 ; within the heap? CMPGE r2, r10 BLT Trap_NotAPCSR LDR r4, [r10, #SC_mark-SC_SLOffset] EOR r4, r4, r3 BICS r4, r4, #&80000000 ; a chunk marked 'handling extension' will do BNE Trap_NotAPCSR Trap_IsAPCSR LDR v1, [v6, #O_registerDump+fp*4] LDR r12, [v6, #O_registerDump+sp*4] B Trap_CallingStandardKnown Trap_NotAPCSR LDR r10, [v6, #O_registerDump+r13*4] CMP r10, r1 CMPGE r2, r10 BLT Trap_NoStackForHandler LDR r4, [r10, #SC_mark-SC_SLOffset] EOR r4, r4, r3 BICS r4, r4, #&80000000 ; a chunk marked 'handling extension' will do BNE Trap_NoStackForHandler LDR v1, [v6, #O_registerDump+r10*4] LDR r12, [v6, #O_registerDump+r12*4] Trap_CallingStandardKnown ; We require that sp+256 > sl and sp < heaptop. ADD r1, r12, #256 CMP r1, r10 CMPCS r2, r12 BCC Trap_NoStackForHandler | LDR r12, [v6, #O_registerDump+sp*4] CMP r12, r1 CMPGT r2, r12 ADDGT r1, r12, #256 CMPGT r1, r10 BLE Trap_NoStackForHandler LDR r3, =IsAStackChunk LDR r4, [r10, #SC_mark-SC_SLOffset] EOR r4, r4, r3 BICS r4, r4, #&80000000 ; a chunk marked 'handling extension' will do BNE Trap_NoStackForHandler LDR v1, [v6, #O_registerDump+fp*4] ] LDMDB r11!, {a1-a4, v2-v5} STMDB r12!, {a1-a4, v2-v5} LDMDB r11!, {a1-a4, v2-v5} STMDB r12!, {a1-a4, v2-v5} ; Some agony here about preventing an event handler running ; (on the user stack) while the registers describing the stack ; (sp & sl) haven't both been updated. [ ModeMayBeNonUser LDR a1, [v6, #O_registerDump+pc*4] TST a1, #PSRIBit ORREQ a1, a1, #PSRVBit BICNE a1, a1, #PSRVBit TEQP a1, #0 | TEQP pc, #PSRIBit:OR:PSRVBit ; user mode ] NOOP MOV sp, r12 MOV sl, r10 MOV fp, v1 SWIVS IntOn LDR v1, [v6, #O_errorNumber] MOV r0, sp MOV v2, #lang_Trap BL FindAndCallHandlers MOV r0, sp MOV v2, #lang_UncaughtTrap BL FindAndCallHandlers BL RestoreOSHandlers MOV v5, sp ADD fp, v6, #O_errorNumber ADR ip, E_UncaughtTrap B FatalErrorX Trap_NoStackForHandler ADR ip, E_NoStackForTrapHandler B FatalErrorY ErrorBlock NoStackForTrapHandler, "No stack for trap handler", C55 RecursiveTrap Keep ADR ip, E_RecursiveTrap FatalErrorY ; Pointer to error block in ip. (beware, RestoreOSHandlers ; corrupts r0-r8). MOV fp, r0 MOV r0, #0 STRB r0, [v6, #O_callbackInactive] TEQP pc, #0 NOOP BL RestoreOSHandlers ADD v5, v6, #O_registerDump FatalErrorX [ SharedLibrary LDR a1, [v5, #pc*4] ADD a2, v6, #O_pc_hex_buff BL HexOut MOV a1, v5 ADD a2, v6, #O_reg_hex_buff BL HexOut MOV r0, ip [ :DEF:DEFAULT_TEXT ADD r0, r0, #4 10 LDRB r2, [r0], #1 CMP r2, #0 BNE %B10 ADD r0, r0, #3 BIC r0, r0, #3 ] MOV v4, pc SWI EnterSVC BLVC open_messagefile TEQP pc, v4 MOV r0, r0 ADD r2, v6, #O_fastEventStack MOV r3, #256 ADD r4, fp, #4 ADD r5, v6, #O_pc_hex_buff ADD r6, v6, #O_reg_hex_buff MOV r7, #0 SWI XMessageTrans_ErrorLookup | MOV r0, ip BL |_kernel_copyerror| MOV a4, r0 ADD a2, v6, #O_fastEventStack BL CopyError2 SUB a2, a2, #1 ADR a4, str1 BL CopyErrorString SUB a2, a2, #1 ADD a4, fp, #4 BL CopyErrorString SUB a2, a2, #1 ADR a4, str2 BL CopyErrorString SUB a2, a2, #1 LDR a1, [v5, #pc*4] BL HexOut MOV a4, a2 [ :DEF:DEFAULT_TEXT ADR a1, str3 ADR a2, str3tok | ADR a1, str3tok ] BL |_kernel_getmessage| MOV a2, a4 MOV a4, r0 BL CopyErrorString SUB a2, a2, #1 MOV a1, v5 BL HexOut MOV a1, #0 STRB a1, [a2] ADD r0, v6, #O_fastEventStack ] B FatalError str1 = ": ", 0 str2 = ", pc = ", 0 [ :DEF:DEFAULT_TEXT str3 = ": registers at ", 0 ] str3tok = "C56", 0 ALIGN ; a1 = Hex value to convert ; a2 = Buffer HexOut MOV a4, #8 01 MOV a1, a1, ROR #28 AND a3, a1, #15 CMP a3, #10 ADDLT a3, a3, #"0" ADDGE a3, a3, #"A"-10 STRB a3, [a2], #+1 SUBS a4, a4, #1 BNE %B01 [ SharedLibrary STRB a4, [a2] ] MOVS pc, r14 |_kernel_exittraphandler| LoadStaticBase ip, a1 MOV a1, #0 STRB a1, [ip, #O_inTrapHandler] MOV a1, #1 STRB a1, [ip, #O_callbackInactive] MOVS pc, r14 FindAndCallHandlers Keep ; Finds a handler of type offset v2 in a language description, ; responsible for the current pc value. If one (non-zero) is found, ; calls it, and resumes the program if it so requests. ; Otherwise, unwinds the stack and repeats the operation. ; r0 addresses the register dump ; v1 is the first argument for the handler (fault / event code) ; v2 is the handler offset ; v6 is my static data base ; set up an intial unwind block. For our purposes, we don't care ; about the values of callee-save registers (ARM or FP). STMFD sp!, {r0, r14} MOV v3, fp MOV v4, sp LDR v5, [r0, #pc*4] MOV ip, sl MOV r1, v5 SUB sp, sp, #uwb_size+4 ADD v4, sp, #uwb_fp+4 STMIA v4, {v3, v4, v5, ip} FCH_NextFrame BIC r1, r1, #PSRBits LDMIB v6, {v4, v5} FCH_NextLanguage Keep ; find to which language the current pc corresponds (between which ; language's code bounds it lies). CMP v4, v5 BGE FCH_NoHandlerFound LDMIA v4, {r0, r2, r3} CMP r1, r2 CMPGE r3, r1 ADDLT v4, v4, r0 BLT FCH_NextLanguage ; If the language has a handler procedure of the right type, call it ; (it may not have one either by not having a large enough area, or ; a zero entry). CMP r0, v2 BLE FCH_Unwind LDR r2, [v4, v2] CMP r2, #0 BEQ FCH_Unwind ; arguments for the handler are ; fault or event code ; pointer to dumped registers ; return value is non-zero to resume; otherwise, search continues LDR a1, =|RTSK$$Data$$Limit| CMP v5, a1 MOV a1, v1 LDR a2, [sp, #uwb_size+4] MOVEQ lr, pc MOVEQ pc, r2 BEQ FCH_ClientCalled CallClient r2 FCH_ClientCalled CMP v2, #lang_Event ; event handlers are only allowed to pass escape BNE FCH_NotEvent CMP v1, #-1 BNE FCH_Exit FCH_NotEvent CMP a1, #0 BEQ FCH_Unwind ; if resuming after a trap, clear the 'in trap handler' status FCH_Exit SUBS r0, v2, #lang_Trap SUBNES r0, v2, #lang_UncaughtTrap STREQB r0, [v6, #O_inTrapHandler] LDR r0, [sp, #uwb_size+4] B ReloadUserState FCH_NoHandlerFound Keep ; pc is not within a known code area in the image. Perhaps it is ; within a library area (if the image uses a shared library). [ SharedLibrary LDR r0, =|RTSK$$Data$$Limit| CMP v5, r0 MOVNE v5, r0 LDRNE v4, =|RTSK$$Data$$Base| BNE FCH_NextLanguage ] FCH_Unwind Keep ADD a1, sp, #4 MOV a2, sp BL |_kernel_unwind| CMP a1, #0 LDRGT r1, [sp, #uwb_pc+4] BGT FCH_NextFrame ADD sp, sp, #uwb_size+4+4 LDMFD sp!, {pc}^ ErrorBlock UncaughtTrap, "Uncaught trap", C57 ErrorBlock RecursiveTrap, "Trap while in trap handler", C58 RestoreOSHandlers Keep ; (there may not be a valid sp) STR lr, [v6, #O_lk_RestoreOSHandlers] BL InstallCallersHandlers LDRB r1, [v6, #O_underDesktop] CMP r1, #0 LDRNE r1, [v6, #O_knownSlotSize] LDRNE r0, [v6, #O_initSlotSize] CMPNE r1, r0 LDR lr, [v6, #O_lk_RestoreOSHandlers] BNE SetWimpSlot MOVS pc, lr SetWimpSlot_Save_r4r5 STMFD sp!, {r4, r5} SetWimpSlot Keep ; Set the wimp slot to r0 - ApplicationBase. ; Destroys r4, r5 ; May need to set MemoryLimit temporarily to the slot size, ; in order that Wimp_SlotSize not refuse. ; (Note that preservation of r4 is required anyway because of ; fault in Wimp_SlotSize which corrupts it). ; Returns the slot size set. MOV r4, r0 MOV r0, #Env_MemoryLimit MOV r1, #0 SWI ChangeEnv MOV r5, r1 MOV r0, #Env_ApplicationSpace MOV r1, #0 SWI ChangeEnv CMP r1, r5 MOVNE r0, #Env_MemoryLimit SWINE ChangeEnv SUB r0, r4, #Application_Base MOV r1, #-1 SWI Wimp_SlotSize MOVNE r4, r0 MOVNE r0, #Env_MemoryLimit MOVNE r1, r5 SWINE ChangeEnv MOVNE r0, r4 MOVS pc, r14 ; Our own exit handler, which restores our parent's environment then exits. ; Just in case a C program manages to (call something which) does a SWI Exit. ; Necessary otherwise because of Obey. ; The register state prevailing when exit was called is completely undefined; ; all we can rely on is that r12 addresses our static data. The stack ; description may be junk so we reset the stack to its base. ExitHandler Keep MOV v6, r12 LDR sl, [v6, #O_rootStackChunk] LDR sp, [sl, #SC_size] ADD sp, sl, sp ADD sl, sl, #SC_SLOffset MOV fp, #0 ; so the user can get to hear about faults in his atexit procs, unset ; inTrapHandler. This is safe (will terminate) because all finalisation ; is one-shot. STRB fp, [v6, #O_inTrapHandler] STMFD sp!, {r0-r2} BL Finalise LDMIA sp!, {r0-r2} SWI Exit UpCallHandler CMP r0, #256 MOVNE pc, r14 ; Entered in SWI mode. A new application is starting (not started by system, ; for which there's a different UpCall handler). It has the same MemoryLimit ; as this application, so is free to overwrite it. We'd better close ourselves ; down. ; The register state is undefined, except that r13 must be the SWI stack ; pointer. STMFD sp!, {r0-r3, v1-v6, fp, sl, lr} TEQP pc, #0 NOOP MOV v6, r12 LDR sl, [v6, #O_rootStackChunk] LDR sp, [sl, #SC_size] ADD sp, sl, sp ADD sl, sl, #SC_SLOffset MOV fp, #0 BL Finalise SWI EnterSVC LDMFD sp!, {r0-r3, v1-v6, fp, sl, lr} MOVS pc, lr InstallHandlers Keep ; r0 is zero for the initial call (previous values for the handlers ; to be saved). ; If non-zero, it is the value memoryLimit should be set to. STMFD sp!, {r0} MOV r8, r0 MOV r0, #0 MOV r1, #0 MOV r2, #0 MOV r3, #0 ADD r4, v6, #O_IIHandlerInData ADD r5, v6, #O_PAHandlerInData ADD r6, v6, #O_DAHandlerInData ADD r7, v6, #O_AEHandlerInData SWI SetEnv CMP r8, #0 ADDEQ r8, v6, #O_oldAbortHandlers STMEQIA r8!, {r4-r7} MOV r0, #Env_ExitHandler ADR r1, ExitHandler MOV r2, v6 SWI ChangeEnv STMEQIA r8!, {r1, r2} MOV r0, #Env_MemoryLimit LDMFD sp!, {r1} SWI ChangeEnv STMEQIA r8!, {r1} MOV r0, #Env_ErrorHandler ADR r1, ErrorHandler MOV r2, v6 ADD r3, v6, #O_errorBuffer SWI ChangeEnv STMEQIA r8!, {r1, r2, r3} ; callback, escape and event handlers must be updated atomically SWI IntOff MOV r0, #Env_CallBackHandler ADR r1, CallBackHandler MOV r2, v6 ADD r3, v6, #O_registerDump SWI ChangeEnv STMEQIA r8!, {r1, r2, r3} MOV r0, #Env_EscapeHandler ADR r1, EscapeHandler MOV r2, v6 SWI ChangeEnv STMEQIA r8!, {r1, r2} MOV r0, #Env_EventHandler ADR r1, EventHandler MOV r2, v6 SWI ChangeEnv STMEQIA r8!, {r1, r2} SWI IntOn MOV r0, #Env_UpCallHandler ADR r1, UpCallHandler MOV r2, v6 SWI ChangeEnv STMEQIA r8!, {r1, r2} MOVS pc, r14 InstallCallersHandlers Keep ADD r8, v6, #O_oldAbortHandlers MOV r0, #0 MOV r1, #0 MOV r2, #0 MOV r3, #0 LDMIA r8!, {r4-r7} SWI SetEnv MOV r0, #Env_ExitHandler LDMIA r8!, {r1, r2} SWI ChangeEnv MOV r0, #Env_MemoryLimit LDMIA r8!, {r1} SWI ChangeEnv MOV r4, r1 MOV r0, #Env_ErrorHandler LDMIA r8!, {r1, r2, r3} SWI ChangeEnv ; callback, escape and event handlers must be updated atomically SWI IntOff MOV r0, #Env_CallBackHandler LDMIA r8!, {r1, r2, r3} SWI ChangeEnv MOV r0, #Env_EscapeHandler LDMIA r8!, {r1, r2} SWI ChangeEnv MOV r0, #Env_EventHandler LDMIA r8!, {r1, r2} SWI ChangeEnv SWI IntOn MOV r0, #Env_UpCallHandler LDMIA r8!, {r1, r2} SWI ChangeEnv MOVS pc, r14 ;*-------------------------------------------------------------------* ;* Error handler * ;*-------------------------------------------------------------------* ErrorHandler Keep ; Now Brazil compatibility is discarded and all SWI calls in the library ; are X-bit set (other than _kernel_swi if non-X bit set has been explicitly ; asked for), any error is treated as fatal here. ; ; Entry with static data base in r0. User mode, interrupts on ; Since it would be nice to preserve as much as possible for the FP fault case ; we switch back to SWI mode to save the registers. SWI EnterSVC ADD r14, r0, #O_registerDump [ SASTMhatbroken STMIA r14!, {r0-r12} STMIA r14, {r13,r14}^ NOP SUB r14, r14, #13*4 | STMIA r14, {r0-r14}^ ] MOV r12, r0 ADD r0, r0, #O_errorNumber LDMDA r0, {r1, r2} ; r1 is error pc, r2 error number BIC r1, r1, #&0c000003 ; Sanitize PC value CMP r2, #Error_BranchThroughZero MOVEQ r1, #0 STR r1, [r12, #O_registerDump+pc*4] B AbortFindHandler ;*-------------------------------------------------------------------* ;* Escape and event handling * ;*-------------------------------------------------------------------* |_kernel_escape_seen| MOV a4, r14 LoadStaticBase a3, ip MOV a2, #0 TST r14, #PSRSVCMode SWINE EnterSVC TEQP pc, #PSRIBit+PSRSVCMode ; interrupts off, for atomic read and update LDRB a1, [a3, #O_escapeSeen] STRB a2, [a3, #O_escapeSeen] MOVS pc, a4 EventHandler Keep TEQ r0, #4 MOVEQS pc, lr STMFD r13!, {r11, r14} STR r0, [r12, #O_eventCode] ADD r11, r12, #O_eventRegisters STMIA r11, {r0-r10, r13} STMDB r11, {r13}^ MOV v6, r12 MOV v2, r11 LDMIB r12, {v4, v5} 02 CMP v4, v5 BGE EndFastEventHandlers LDR v1, [v4] CMP v1, #lang_FastEvent LDRGT r1, [v4, #lang_FastEvent] CMPGT r1, #0 BLE %F01 MOV a1, v2 MOV v1, r12 MOV fp, #0 ; nb fp is NOT r13 ; SL not set up - handlers must not have stack checking on, ; and may not require relocation of data references. MOV r14, pc MOV pc, r1 MOV r12, v1 01 ADD v4, v4, v1 B %B02 EndFastEventHandlers ; If callback is possible (not already in a callback), set r12 to 1 to ; request it. LDRB r12, [v6, #O_callbackInactive] CMP r12, #0 ADD v6, v6, #O_eventRegisters LDMIA v6, {r0-r10} LDMFD r13!, {r11, pc} EscapeHandler Keep TSTS r11, #&40 MOVEQ pc, r14 ; ignore flag going away ; In Arthur, it is NEVER safe to call a handler now: we always have to ; wait for CallBack. STMFD r13!, {r0} MOV r0, #-1 STRB r0, [r12, #O_hadEscape] STRB r0, [r12, #O_escapeSeen] STR r0, [r12, #O_eventCode] LDRB r11, [r12, #O_callbackInactive] CMP r11, #0 MOVNE r12, #1 LDMFD r13!, {r0} MOV pc, r14 ; Callback handler - entered in either SWI or IRQ mode, interrupts disabled, ; just before OS return to user mode with interrupts on. CallBackHandler Keep ; Set 'in callback' to prevent callback being reentered before it finishes ; when we enable interrupts later on. MOV r0, #0 STRB r0, [r12, #O_callbackInactive] MOV v6, r12 ; get SB into our standard place ; Copy the register set from our static callback buffer, onto the stack ; (If we appear to have a valid stack pointer). Otherwise, we just ; ignore the event. ADD r11, v6, #O_registerDump+16*4 LDR r10, [v6, #O_registerDump+sl*4] LDR r1, [v6, #O_heapBase] LDR r2, [v6, #O_heapLimit] LDR r3, =IsAStackChunk [ SharedLibrary ; Shared Library, with the possibility that the event occurred ; when APCS_A was in force. In order to sort this mess out, it ; is necessary that sl at the time of the fault was a valid ; stackchunk handle. In the non-shared case, this may not be true - ; it won't be within Fortran code, for example. ; There is a complicating factor, namely that the event may have ; happened during APCS change. This has the forms ; 200 -> 300 mov[ne] fp, fp_a ; mov[ne] sl, sl_a ; mov[ne] sp, sp_a ; ; 300 -> 200 mov[ne] sp_a, sp ; mov[ne] sl_a, sl ; mov[ne] fp_a, fp ; ; This code cares about the values of fp, sp and sl, so possible ; problems are: ; after either mov sl_x, sl_y we appear to be in APCS_R, but ; sp_r holds the value of SL. In both cases, fp_r holds the ; value of FP. ; MOV r12, #-1 CMP r10, r1 CMPGE r2, r10 BLT Event_NotAPCSR LDR r4, [r10, #SC_mark-SC_SLOffset] CMP r4, r3 BNE Event_NotAPCSR LDR v5, [v6, #O_registerDump+fp*4] LDR r0, [v6, #O_registerDump+pc*4] BIC r14, r0, #PSRBits LDR r14, [r14, #-4] LDR r12, movne_sl_sla EORS r12, r12, r14 BNE %F01 TST r0, #PSRZBit BNE IsntExecutedSLChange 01 BICS r12, r12, #&F0000000 ; lose the condition mask BEQ IsExecutedSLChange LDR r12, movne_sla_sl EORS r12, r12, r14 BNE %F02 TST r0, #PSRZBit BNE IsntExecutedSLChange 02 BICS r12, r12, #&F0000000 BEQ IsExecutedSLChange IsntExecutedSLChange LDR r12, [v6, #O_registerDump+r13*4] B Event_CallingStandardKnown IsExecutedSLChange LDR r12, [v6, #O_registerDump+r12*4] B Event_CallingStandardKnown Event_NotAPCSR LDR r10, [v6, #O_registerDump+r13*4] CMP r10, r1 CMPGE r2, r10 BLT Event_NoStackForHandler LDR r4, [r10, #SC_mark-SC_SLOffset] CMP r4, r3 BNE Event_NoStackForHandler LDR r12, [v6, #O_registerDump+r12*4] LDR v5, [v6, #O_registerDump+r10*4] ; Now r12 is the SP value in the interrupted code ; v5 FP ; r10 SL Event_CallingStandardKnown CMP r12, r10 CMPGT r2, r12 MOVLE r12, #-1 BLE Event_NoStackForHandler | LDR r12, [v6, #O_registerDump+sp*4] LDR v5, [v6, #O_registerDump+fp*4] CMP r12, r1 CMPGT r2, r12 ; sp within heap and ... CMPGT r10, r1 CMPGT r2, r10 ; sl within heap and ... CMPGT r12, r10 ; sp > sl and ... BLE Event_BadStack LDR r4, [r10, #SC_mark-SC_SLOffset] CMP r4, r3 ; sl points at stack chunk BEQ Event_StackOK Event_BadStack MOV r12, #-1 B Event_NoStackForHandler Event_StackOK ] LDMDB r11!, {r0-r7} STMDB r12!, {r0-r7} LDMDB r11!, {r0-r7} STMDB r12!, {r0-r7} Event_NoStackForHandler TEQP pc, #PSRIBit+PSRSVCMode ; we want the testing for an escape and MOV r1, #1 ; allowing callbacks to be indivisible LDRB r0, [v6, #O_hadEscape] ; (otherwise an escape may be lost). STRB r1, [v6, #O_callbackInactive] MOV r1, #0 STRB r1, [v6, #O_hadEscape] LDR v1, [v6, #O_eventCode] CMP r0, #0 MOVNE r0, #&7e SWINE Byte MOVNE v1, #-1 ; escape overrides everything else TEQP pc, #PSRIBit ; to user mode, with interrupts off NOOP CMP r12, #0 BGE CallEventHandlers ADD r0, v6, #O_registerDump B ReloadUserState CallEventHandlers ; now find a handler MOV sp, r12 MOV sl, r10 MOV fp, v5 SWI IntOn MOV r0, sp MOV v2, #lang_Event BL FindAndCallHandlers MOV r0, sp MOV v2, #lang_UnhandledEvent BL FindAndCallHandlers MOV r0, sp ReloadUserState ; Here we must unset the 'callback active' flag and restore our state ; from the callback buffer atomically. 3u ARMs unsupported now ; User r13, r14 must be reloaded from user mode. SWI EnterSVC TEQP pc, #PSRIBit+PSRSVCMode NOOP ADD r14, r0, #pc*4 LDMDB r14, {r0-r14}^ NOOP LDMIA r14, {pc}^ LTORG ;*-------------------------------------------------------------------* ;* Debugging support * ;*-------------------------------------------------------------------* IsClientHandler EQU &80000000 FindHandler Keep ; find to which language the r1 corresponds (between which ; language's code bounds it lies). LDMIB v6, {v4, v5} 01 CMP v4, v5 BGE FH_NoHandlerFound LDMIA v4, {r0, r2, r3} CMP r1, r2 CMPGE r3, r1 ADDLT v4, v4, r0 BLT %B01 ; If the language has a handler procedure of the right type, return ; it in r2 (otherwise 0) CMP r0, v2 LDRGT r2, [v4, v2] MOVLE r2, #0 ; If the handler is within the shared library, we must mark it. CMP r2, #0 LDRNE r0, =|RTSK$$Data$$Limit| CMPNE v5, r0 ORRNE r2, r2, #IsClientHandler MOVS pc, r14 FH_NoHandlerFound [ SharedLibrary LDR r0, =|RTSK$$Data$$Limit| CMP v5, r0 MOVNE v5, r0 LDRNE v4, =|RTSK$$Data$$Base| BNE %B01 ] MOV r2, #0 MOVS pc, r14 ; char *_kernel_language(int pc); |_kernel_language| STMFD sp!, {v2, v4-v6, r14} LoadStaticBase v6, ip MOV v2, #lang_name BIC r1, a1, #PSRBits BL FindHandler BIC a1, r2, #IsClientHandler LDMFD sp!, {v2, v4-v6, pc}^ ; char *_kernel_procname(int pc); |_kernel_procname| STMFD sp!, {v2, v4-v6, r14} LoadStaticBase v6, ip MOV v2, #lang_ProcName BIC r1, a1, #PSRBits BL FindHandler CMP r2, #0 MOVEQ a1, #0 LDMEQFD sp!, {v2, v4-v6, pc}^ MOV a1, r1 TST r2, #IsClientHandler LDMEQFD sp!, {v2, v4-v6, r14} MOVEQ pc, r2 CallClient r2 LDMFD sp!, {v2, v4-v6, pc}^ ; int _kernel_unwind(_kernel_unwindblock *inout, char **language); |_kernel_unwind| STMFD sp!, {a1, a2, v2, v4-v6, r14} LoadStaticBase v6, ip LDR r1, [a1, #uwb_pc] BIC r1, r1, #PSRBits MOV v2, #lang_Unwind BL FindHandler CMP r2, #0 BEQ call_default_unwind_handler TST r2, #IsClientHandler LDMEQFD sp!, {a1, a2, v2, v4-v6, r14} MOVEQ pc, r2 LDMFD sp!, {a1, a2} CallClient r2 LDMFD sp!, {v2, v4-v6, pc}^ call_default_unwind_handler LDMFD sp!, {a1, a2, v2, v4-v6, r14} default_unwind_handler Keep STMFD sp!, {v1-v6, r14} LoadStaticBase v6 MOV v2, #1 STRB v2, [v6, #O_unwinding] LDR a4, [a1, #uwb_fp] BICS a4, a4, #APCSChange+ChunkChange MOVEQ v5, #0 BEQ duh_exit ; a minimal sensibleness check on the FP's value ; (top bits used to mark stack extension & APCS change). TST a4, #&3c000003 BNE duh_corrupt LDR a3, [a4, #frame_entrypc] BIC a3, a3, #PSRBits [ StrongARM STMFD sp!, {a1-a2} MOV a1, #0 SWI XOS_PlatformFeatures MOVVS a1, #0 TST a1, #8 LDREQ v1, [a3, #-12] LDRNE v1, [a3, #-8] LDMFD sp!, {a1-a2} | LDR v1, [a3, #-12] ] ; check that the save mask instruction is indeed the right sort of STM ; If not, return indicating stack corruption. MOV ip, v1, LSR #16 EOR ip, ip, #&e900 EOR ip, ip, #&002c BICS ip, ip, #1 ; STMFD sp!, ... (sp = r12 or r13) BNE duh_corrupt ; update register values in the unwindblock which the save mask says ; were saved in this frame. MOV ip, #1 ADD v2, a4, #frame_prevfp MOV v3, #v6 ADD v4, a1, #uwb_r4-r4*4 01 TST v1, ip, ASL v3 LDRNE r14, [v2, #-4]! STRNE r14, [v4, v3, ASL #2] SUB v3, v3, #1 CMP v3, #v1 BGE %B01 ; now look for floating point stores immediately after the savemask ; instruction, updating values in the saveblock if they are there. SUB a3, a3, #8 LDR v4, =&ed6c0103 02 LDR v1, [a3], #+4 BIC r14, v1, #&7000 CMP r14, v4 BNE UnwindEndFP MOV v1, v1, LSR #10 AND v1, v1, #&1c ADD v1, v1, v1, ASL #1 ADD v1, a1, v1 LDR r14, [v2, #-4]! STR r14, [v1, #uwb_r4-r4*4*3+8] LDR r14, [v2, #-4]! STR r14, [v1, #uwb_r4-r4*4*3+4] LDR r14, [v2, #-4]! STR r14, [v1, #uwb_r4-r4*4*3] B %B02 UnwindEndFP LDMDB a4, {a3, a4, v1} ; saved fp, sp, link ; if the new fp is in a different stack chunk, must amend sl ; in the unwind block. TST a3, #ChunkChange BIC a3, a3, #ChunkChange+APCSChange LDR v3, [a1, #uwb_sl] LDRNE v3, [v3, #SC_prev-SC_SLOffset] ADDNE v3, v3, #SC_SLOffset ADD ip, a1, #uwb_fp STMIA ip, {a3, a4, v1, v3} MOV v3, a2 BIC r1, v1, #PSRBits MOV v2, #lang_name BL FindHandler BIC r2, r2, #IsClientHandler STR r2, [v3] MOV v5, #1 duh_exit MOV a1, #0 STRB a1, [v6, #O_unwinding] MOV a1, v5 LDMFD sp!, {v1-v6, pc}^ duh_abort LDR r12, [v6, #O_registerDump+r12*4] ; abort handling trampled this. LDR r14, [v6, #O_registerDump+pc*4] TEQP r14, #PSRBits ; Back to the mode and interrupt ; status before we got the abort NOOP duh_corrupt MOV v5, #-1 B duh_exit ;*-------------------------------------------------------------------* ;* SWI interfaces * ;*-------------------------------------------------------------------* |_kernel_hostos| EnterLeafProcContainingSWI MOV r0, #0 MOV r1, #1 SWI Byte MOV a1, r1 ExitLeafProcContainingSWI |_kernel_swi_c| [ StrongARM MOV ip, sp STMFD sp!, {a3, a4, v1-v6, fp, ip, r14, pc} SUB fp, ip, #4 BIC r12, a1, #&80000000 TST a1, #&80000000 ; non-X bit requested? ORREQ r12, r12, #X LDMIA r1, {r0-r9} SWI XOS_CallASWIR12 LDMFD sp!, {ip, lr} STMIA ip, {r0 - r9} MOV ip, #0 MOVCS ip, #1 MOVVS ip, #0 STR ip, [lr] BLVS CopyError MOVVC a1, #0 LDMDB fp, {v1-v6, fp, sp, pc}^ | ; Set up a proper frame here, so if an error happens (and not X) ; a sensible traceback can be given. MOV ip, sp STMFD sp!, {a3, a4, v1-v6, fp, ip, r14, pc} SUB fp, ip, #4 ; be kind to fault handler if there is an error. ADR a4, AfterSWI-4 SUB a4, a4, sp MOV a4, a4, LSR #2 BIC a4, a4, #&ff000000 ADD a4, a4, #&ea000000 ; B always TST a1, #&80000000 ; non-X bit requested? ORR a1, a1, #&EF000000 ; SWI + Always ORREQ a1, a1, #X STMFD sp!, {a1, a4} LDMIA a2, {r0 - r9} MOV pc, sp AfterSWI ADD sp, sp, #8 LDMFD sp!, {ip, lr} STMIA ip, {r0 - r9} MOV ip, #0 MOVCS ip, #1 MOVVS ip, #0 STR ip, [lr] BLVS CopyError MOVVC a1, #0 LDMDB fp, {v1-v6, fp, sp, pc}^ ] swi_ret_inst MOV pc, ip |_kernel_swi| [ StrongARM STMDB sp!, {a3, v1-v6, lr} BIC r12, a1, #&80000000 TST a1, #&80000000 ORREQ r12, r12, #X LDMIA r1, {r0-r9} SWI XOS_CallASWIR12 LDR ip, [sp] STMIA ip, {r0-r9} BLVS CopyError MOVVC a1, #0 LDMIA sp!, {a3, v1-v6, pc}^ | STMDB sp!, {a3, v1-v6, lr} LDR a4, swi_ret_inst TST a1, #&80000000 ORR a1, a1, #&ef000000 ORREQ a1, a1, #X STMDB sp!, {a1, a4} LDMIA a2, {r0-r9} MOV ip, pc MOV pc, sp LDR ip, [sp, #8]! STMIA ip, {r0-r9} BLVS CopyError MOVVC a1, #0 LDMIA sp!, {a3, v1-v6, pc}^ ] |_kernel_command_string| LoadStaticBase a1 LDR a1, [a1, #O_ArgString] MOVS pc, r14 |_kernel_osbyte| STMFD sp!, {v6, r14} SWI Byte BVS ErrorExitV6Stacked AND a1, a2, #&ff ORR a1, a1, a3, ASL #8 MOV a1, a1, ASL #16 ADC a1, a1, #0 MOV a1, a1, ROR #16 LDMFD sp!, {v6, pc}^ |_kernel_osrdch| STMFD sp!, {v6, r14} SWI ReadC BVS ErrorExitV6Stacked LDMCCFD sp!, {v6, pc}^ CMPS a1, #27 ; escape MOVEQ a1, #-27 MOVNE a1, #-1 ; other error, EOF etc LDMFD sp!, {v6, pc}^ |_kernel_oswrch| STMFD sp!, {v6, r14} SWI WriteC ErrorExitV6Stacked BLVS CopyError MOVVS a1, #-2 LDMFD sp!, {v6, pc}^ |_kernel_osbget| STMFD sp!, {v6, r14} MOV r1, a1 SWI BGet BVS ErrorExitV6Stacked MOVCS a1, #-1 LDMFD sp!, {v6, pc}^ |_kernel_osbput| STMFD sp!, {v6, r14} SWI BPut LDMVCFD sp!, {v6, pc}^ BVS ErrorExitV6Stacked |_kernel_osgbpb| ; typedef struct { ; void * dataptr; ; int nbytes, fileptr; ; int buf_len; ; char * wild_fld; ; } _kernel_osgbpb_block; ; int _kernel_osgbpb(int op, unsigned handle, _kernel_osgbpb_block *inout); STMFD sp!, {r4, r5, r6, r7, v6, r14} MOV r7, a3 LDMIA a3, {r2 - r6} SWI Multiple STMIA r7, {r2 - r6} BLVS CopyError MOVCS a1, #-1 MOVVS a1, #-2 LDMFD sp!, {r4, r5, r6, r7, v6, pc}^ |_kernel_osword| STMFD sp!, {v6, r14} SWI Word BVS ErrorExitV6Stacked MOV a1, r1 LDMFD sp!, {v6, pc}^ |_kernel_osfind| STMFD sp!, {v6, r14} SWI Open LDMVCFD sp!, {v6, pc}^ BVS ErrorExitV6Stacked |_kernel_osfile| ; typedef struct { ; int load, exec; ; int start, end; ; } _kernel_osfile_block; ; int _kernel_osfile(int op, const char *name, _kernel_osfile_block *inout); STMFD sp!, {r4, r5, r6, v6, r14} MOV r6, a3 LDMIA a3, {r2 - r5} SWI File STMIA r6, {r2 - r5} BLVS CopyError MOVVS a1, #-2 LDMFD sp!, {r4, r5, r6, v6, pc}^ |_kernel_osargs| STMFD sp!, {v6, r14} MOV ip, a1 ORR ip, ip, a2 SWI Args BVS ErrorExitV6Stacked CMP ip, #0 MOVNE a1, r2 LDMFD sp!, {v6, pc}^ |_kernel_oscli| STMFD sp!, {v6, r14} SWI CLI BVS ErrorExitV6Stacked MOV a1, #1 ; return 1 if OK LDMFD sp!, {v6, pc}^ |_kernel_last_oserror| LoadStaticBase ip, a1 LDR a1, [ip, #O_errorBuffer] CMP a1, #0 ADDNE a1, ip, #O_errorNumber MOVNE a2, #0 STRNE a2, [ip, #O_errorBuffer] MOVS pc, r14 |_kernel_system| ; Execute the string a1 as a command; if a2 is zero, as a subprogram, ; otherwise a replacement. ; STMFD sp!, {v1-v6, r14} LoadStaticBase v6, ip LDRB v5, [v6, #O_fpPresent] CMPS v5, #0 STFNEE f7, [sp, #-12]! STFNEE f6, [sp, #-12]! STFNEE f5, [sp, #-12]! STFNEE f4, [sp, #-12]! RFSNE v5 STMFD sp!, {a1, v5} LDR v5, [v6, #O_heapLimit] LDR v4, [v6, #O_heapTop] LDR v3, [v6, #O_imageBase] ; if the heap has been extended, copying the image is futile at best ; (and maybe harmful if it has a hole) LDR v2, [v6, #O_initSlotSize] CMP v5, v2 MOVGT v4, v5 ; so pretend top = limit. ; Calculate len of image and size of gap. We will not bother copying at all if gap ; is too small (but can't fault, because the command may not be an application) SUB r0, v4, v3 ; Len = heapTop - imageBase ADD r0, r0, #15 BIC v2, r0, #15 ; rounded Len, multiple of 16 SUB r14, v5, v3 ; heapLimit - imageBase SUB r14, r14, v2 ; Gap = (heapLimit -imageBase) - Len ; if gap is too small, don't bother with copy. 1024 is an arbitrary ; small number, but what this is mainly aiming to avoid is the ; (otherwise possible) case of v6 < 0. CMP r14, #1024 MOVLT r14, #0 STMFD sp!, {a2, v2-v5, r14, r15} ; save them away ;subr/chain, len, base, top, limit, gap ; hole for memoryLimit BL InstallCallersHandlers ; sets r4 to current memoryLimit STR r4, [sp, #24] ADD r14, sp, #16 LDMIA r14, {v5, r14} ; recover limit, gap LDRB r0, [v6, #O_underDesktop] ; if under desktop, find what the CMP r0, #0 ; Wimp slot size currently is, so we MOVNE r0, #Env_ApplicationSpace ; can reset it later on MOV r1, #0 SWINE ChangeEnv STMFD sp!, {r1} ; remember slot size ; All registers must be preserved whose values are wanted afterwards. ; v1 to v6 are already on the stack. ADD ip, sp, r14 ADD r5, v6, #O_languageEnvSave STMIA r5, {fp, ip, sl} ; save ptr to moved stack ADD v6, v6, r14 ; now points to the to be copied data ; The following loop copies the image up memory. It avoids overwriting ; itself by jumping to its copied copy as soon as it has copied itself, ; unless, of course, it's running in the shared C library... ; The image is copied in DECREASING address order. CopyUp CMP r14, #0 BEQ CopyUpDone LDR v2, [sp, #8] ; image base LDR v3, [sp, #12] ; Len ADD r1, v3, v2 ; imageBase + Len = initial src BIC v4, pc, #&FC000003 ; where to copy down to before jumping CMP v4, v5 ; copy code > limit? ADDGT v4, v3, #16 ; yes => in shared lib so fake v4 MOVGT v2, #0 ; and don't jump to non-copied code MOVLE v2, r14 ; else jump to copied code 01 LDMDB r1!, {r0,r2-r4} STMDB v5!, {r0,r2-r4} CMP r1, v4 ; r1 < %B01 ? BGT %B01 ; no, so keep going... [ StrongARM ;in case we are jumping to code we have just copied here (ie not shared Clib)... CMP v2, #0 MOVNE r0, #0 SWINE XOS_SynchroniseCodeAreas ] ADD r0, pc, v2 ; ... go to moved image MOV pc, r0 ; and continue copying up... 01 LDMDB r1!, {r0,r2-r4} STMDB v5!, {r0,r2-r4} CMP r1, v3 ; src > imageBase ? BGT %B01 ; yes, so continue ;StrongARM - no need to synchronise for rest of copied code here, since we will not ;be executing it (we have to synchronise later, after copying down) CopyUpDone ; ip is the relocated sp. LDR r0, [ip, #4] ; chain/subr CMP r0, #0 MOVEQ r0, #Env_MemoryLimit LDREQ r1, [v6, #O_imageBase] ADDEQ r1, r1, r14 SWIEQ ChangeEnv MOVEQ r0, #Env_ErrorHandler ADREQ r1, s_ErrHandler MOVEQ r2, v6 ADDEQ r3, v6, #O_errorBuffer SWIEQ ChangeEnv MOVEQ r0, #Env_ExitHandler ADREQ r1, s_ExitHandler MOVEQ r2, v6 SWIEQ ChangeEnv MOVEQ r0, #Env_UpCallHandler ; We don't really want one of these, ... ADREQ r1, s_UpCallHandler ; but RISCOS rules say we must have it MOVEQ r2, v6 SWIEQ ChangeEnv LDR r0, [ip, #32] ; the CLI string to execute ADD r0, r0, r14 ; ... suitably relocated... SWI CLI:AND::NOT:X ; force non-X variant B s_Exit s_UpCallHandler MOV pc, r14 s_ErrHandler MOV v6, r0 MOV r0, #-2 B s_Exit2 s_ExitHandler MOV v6, r12 s_Exit MOV r0, #0 s_Exit2 ADD r5, v6, #O_languageEnvSave LDMIA r5, {fp, sp, sl} LDMFD sp!, {a2, a3, v1-v5} ; slotsize, ;subr/chain, Len, Base, Top, Limit, Gap STR r0, [sp, #4] ; ... over prev saved r0... CMP a3, #0 SWINE Exit MOVS a1, a2 BLNE SetWimpSlot_Save_r4r5 ; set slot size back to value before CLI LDMNEFD sp!, {r4, r5} SUB sp, sp, v5 ; and relocate sp... SUB v6, v6, v5 ; ...and the static data ptr ; The following loop copies the image down memory. It avoids overwriting ; itself by jumping to its copied copy as soon as it has copied itself, ; unless of course, this code is running in the shared C library... ; The image is copied in ASCENDING address order. CMP v5, #0 BEQ CopyDnDone CopyDn SUB r0, v4, v1 ; limit - L = init src BIC v3, pc, #&FC000003 ADD v3, v3, #%F02-.-4 ; where to copy to before jumping CMP v3, v4 ; copy code > limit? SUBGT v3, v4, #16 ; yes => in shared lib so fake v3 MOVGT v1, #0 ; and don't jump to not copied code... MOVLE v1, v5 ; else jump... 01 LDMIA r0!, {r1-r3,ip} STMIA v2!, {r1-r3,ip} CMP r0, v3 ; copied the copy code? BLT %B01 ; no, so continue... [ StrongARM ;in case we are jumping to code we have just copied here (ie not shared Clib)... MOV r1, r0 CMP v1, #0 MOVNE r0, #0 SWINE XOS_SynchroniseCodeAreas MOV r0, r1 ] SUB ip, pc, v1 ; yes => copied this far ... MOV pc, ip ; ... so branch to copied copy loop 01 LDMIA r0!, {r1-r3,ip} STMIA v2!, {r1-r3,ip} CMP r0, v4 ; finished copying? BLT %B01 ; no, so continue... 02 CopyDnDone [ StrongARM ;you've guessed it MOV r0, #0 SWI XOS_SynchroniseCodeAreas ] LDMFD sp!, {r0} ; old memoryLimit BL InstallHandlers LDMFD sp!, {a1, v5} LDRB v4, [v6, #O_fpPresent] CMPS v4, #0 WFSNE v5 LDFNEE f4, [sp], #12 LDFNEE f5, [sp], #12 LDFNEE f6, [sp], #12 LDFNEE f7, [sp], #12 LDMFD sp!, {v1-v6, pc}^ CopyError Keep ; a1 is the address of an error block (may be ours) ; we want to copy its contents into our error block, ; so _kernel_last_oserror works. LoadStaticBase v6, a2 CopyErrorV6OK MOV a4, a1 ADD a2, v6, #O_errorNumber CopyError2 STR pc, [a2, #-4] ; mark as valid LDMIA a4!, {a3} STMIA a2!, {a3} CopyErrorString 01 LDRB a3, [a4], #+1 STRB a3, [a2], #+1 CMP a3, #' ' BCS %B01 MOVS pc, r14 |_kernel_getenv| ; _kernel_oserror *_kernel_getenv(const char *name, char *buffer, unsigned size); MOV ip, sp STMFD sp!, {v1, v2, v6, fp, ip, r14, pc} SUB fp, ip, #4 LoadStaticBase v6, ip MOV r3, #0 MOV r4, #3 SWI X:OR:ReadVarVal BLVS CopyError MOVVC a1, #0 STRVCB a1, [r1, r2] LDMDB fp, {v1, v2, v6, fp, sp, pc}^ |_kernel_setenv| ; _kernel_oserror *_kernel_setenv(const char *name, const char *value); STMFD sp!, {v1, v6, r14} LoadStaticBase v6, ip ; Apparently, we need to say how long the value string is as well ; as terminating it. MOV a3, #0 01 LDRB ip, [a2, a3] CMP ip, #0 ADDNE a3, a3, #1 BNE %B01 MOV r3, #0 MOV r4, #0 SWI X:OR:SetVarVal BLVS CopyError MOVVC a1, #0 LDMFD sp!, {v1, v6, pc}^ ;*-------------------------------------------------------------------* ;* Storage management * ;*-------------------------------------------------------------------* |_kernel_register_allocs| ; void _kernel_register_allocs(allocproc *malloc, freeproc *free); LoadStaticBase ip, a3 ADD ip, ip, #O_allocProc STMIA ip, {a1, a2} MOVS pc, r14 |_kernel_register_slotextend| LoadStaticBase ip, a3 MOVS a2, a1 LDR a1, [ip, #O_heapExtender] STRNE a2, [ip, #O_heapExtender] MOVS pc, r14 |_kernel_alloc| ; unsigned _kernel_alloc(unsigned minwords, void **block); ; Tries to allocate a block of sensible size >= minwords. Failing that, ; it allocates the largest possible block of sensible size. If it can't do ; that, it returns zero. ; *block is returned a pointer to the start of the allocated block ; (NULL if none has been allocated). LoadStaticBase ip, a3 CMP r0, #2048 MOVLT r0, #2048 ADD ip, ip, #O_heapTop LDMIA ip, {r2, r3} SUB r3, r3, r0, ASL #2 ; room for a block of this size? CMP r3, r2 ; if so, ... BGE alloc_return_block ; There's not going to be room for the amount required. See if ; we can extend our workspace. LDRB r3, [ip, #O_underDesktop-O_heapTop] CMP r3, #0 [ SharedLibrary ; See if we are allowed to extend the wimp-slot - depends on the stub vintage ; if running under the shared library. If not shared, we can do it provided ; we're running under the desktop. LDRNEB r3, [ip, #O_kallocExtendsWS-O_heapTop] CMPNE r3, #0 ] BEQ alloc_cant_extend ; not under desktop or old stubs STMFD sp!, {r0, r1, lr} LDR r3, [ip, #O_heapExtender-O_heapTop] CMP r3, #0 BEQ alloc_no_extender LDR r0, [sp], #-4 ; ask for the amount we were asked for MOV r0, r0, ASL #2 ; (since what we are given may well ; not be contiguous with what we had ; before). ; Set to a silly value, guaranteed not to be equal to initSlotSize, to ensure ; reset on exit. STR r3, [ip, #O_knownSlotSize-O_heapTop] MOV r1, sp MOV lr, pc MOV pc, r3 LoadStaticBase ip, a3 ; restore our static base ADD ip, ip, #O_heapTop LDR r1, [sp], #+4 ; base of area acquired CMP r0, #0 ; size (should be 0 or big enough) BEQ alloc_cant_extend_0 LDMIA ip, {r2, lr} CMP lr, r1 SUBNE r3, lr, r2 ; if not contiguous with old area, amount free ADD lr, r1, r0 ; adjust heapLimit MOVNE r0, r2 ; if not contiguous, remember old heapTop MOVNE r2, r1 ; and adjust STMIA ip, {r2, lr} CMPNE r3, #0 ; if contiguous, or old area had no free space, BEQ alloc_cant_extend_0 ; return from new area ADD sp, sp, #4 ; otherwise, return block from top of old area LDMFD sp!, {r1, lr} ; first (malloc will try again and get from STR r0, [r1] ; new area). MOV r0, r3, ASR #2 MOVS pc, r14 alloc_no_extender ; if current slotsize = heap limit, try to extend the heap by the ; amount required (or perhaps by just enough to allow allocation ; of the amount required) ADD lr, r2, r0, ASL #2 ; heaptop if request could be granted MOV r0, #Env_ApplicationSpace ; find current slotsize MOV r1, #0 SWI ChangeEnv LDR r0, [ip, #O_knownSlotSize-O_heapTop] CMP r1, r0 BNE alloc_cant_extend_0 ; If the extension will be contiguous with the current heap top, ; then we need just enough to allow the requested allocation. LDMIA ip, {r2, r3} CMP r3, r0 BEQ alloc_extend_slot ; Otherwise, we must extend by the amount requested. If there's still ; some space left in the previous area, give that back first. CMP r2, r3 BNE alloc_cant_extend_0 STR r0, [ip, #O_heapTop-O_heapTop] LDR r2, [sp] ADD lr, r0, r2, ASL #2 alloc_extend_slot ; lr holds the slot size we want. r1 is the current memory limit. ; Now if memory limit is not slot size, we must reset memory limit ; temporarily over the call to Wimp_SlotSize (or it will refuse). MOV r0, lr BL SetWimpSlot_Save_r4r5 LDMFD sp!, {r4, r5} ADD r0, r0, #Application_Base STR r0, [ip, #O_knownSlotSize-O_heapTop] STR r0, [ip, #O_heapLimit-O_heapTop] alloc_cant_extend_0 LDMFD sp!, {r0, r1, lr} alloc_cant_extend LDMIA ip, {r2, r3} SUB r3, r3, r0, ASL #2 ; room for a block of this size? CMP r3, r2 ; if so, ... alloc_return_block STRGE r2, [r1] ; return it above the previous heapTop ADDGE r2, r2, r0, ASL #2 ; and update heapTop STRGE r2, [ip] MOVGES pc, r14 ADD r0, r0, r3, ASR #2 ; otherwise, return whatever is free SUB r0, r0, r2, ASR #2 CMP r0, #0 BGT alloc_return_block STR r0, [r1] ; (if none, returned block is NULL, MOVS pc, r14 ; and don't update heapTop) |_kernel_malloc| ; void * _kernel_malloc(int numbytes); ; Allocates numbytes bytes (rounded up to number of words), and returns ; the block allocated. If it can't, returns NULL. ; Normally, this will be replaced by the real malloc very early in the ; startup procedure. LoadStaticBase ip, a2 ADD ip, ip, #O_heapTop LDMIA ip, {r2, r3} SUB r3, r3, r0 ; room for a block of this size? CMP r3, r2 ; if so, ... ADDGE r3, r2, r0 MOVGE r0, r2 ; return it above heapTop STRGE r3, [ip] MOVLT r0, #0 MOVS pc, r14 |_kernel_free| ; void free(void *); ; Frees the argument block. ; Normally, this will be replaced by the real free very early in the ; startup procedure. ; I don't think there's much point in providing a real procedure for this; ; if I do, it complicates malloc and alloc above. MOVS pc, r14 ;*-------------------------------------------------------------------* ;* Stack chunk handling * ;*-------------------------------------------------------------------* |_kernel_current_stack_chunk| SUB a1, sl, #SC_SLOffset MOVS pc, r14 ;*-------------------------------------------------------------------* ;* Stack overflow handling * ;*-------------------------------------------------------------------* |_kernel_stkovf_split_0frame| ; Run out of stack. ; Before doing anything else, we need to acquire some work registers ; as only ip is free. ; We can save things on the stack a distance below fp which allows the ; largest possible list of saved work registers (r0-r3, r4-r9 inclusive, ; plus fp, sp, lr, entry pc, = 14 regs in total) plus a minimal stack ; frame for return from StkOvfExit (a further 4 words, giving 18 in total) ; plus 4 extended floating point registers (a further 3*4 words) MOV ip, sp |_kernel_stkovf_split| SUB ip, sp, ip ; size required SUB sp, fp, #29*4 STMFD sp!, {a1, a2, v1-v6, lr}; to save a1-a2, v1-v6 [ 0 = 1 LoadStaticAddress disable_stack_extension, a1, lr LDR a1, [a1] CMP a1, #0 BNE StackOverflowFault ] ADD v4, ip, #SC_SLOffset ; required size + safety margin SUBS v1, fp, #30*4 ; save area ptr, clear V flag BL GetStackChunk BVS StackOverflowFault ; Get here with v2 pointing to a big enough chunk of size v3 ; (Not yet marked as a stack chunk) ADD sl, v2, #SC_SLOffset ; make the new sl ADD sp, v2, v3 ; and initial sp LDR a1, =IsAStackChunk STR a1, [sl, #SC_mark-SC_SLOffset] ; v1 is save area in old frame... will be temp sp in old frame ADD a1, v1, #4*4 ; temp fp in old frame LDMDA fp, {v3-v6} ; old fp, sp,lr, pc [ StrongARM STMFD sp!,{a1-a2} MOV a1,#0 SWI XOS_PlatformFeatures MOVVS a1,#0 TST a1,#8 ADREQ v6, StkOvfPseudoEntry+12 ADRNE v6, StkOvfPseudoEntry+8 LDMFD sp!,{a1-a2} | ADR v6, StkOvfPseudoEntry+12 ] STMDA a1, {v3-v6} ; new return frame in old chunk... ADR lr, StackOverflowExit MOV a2, sp ; saved sp in old frame = NEW sp ; (otherwise exit call is fatal) STMDB fp, {a1, a2, lr} ; pervert old frame to return here... LDMDA v1, {a1, a2, v1-v6, pc}^ |_kernel_stkovf_copy0args| ; Run out of stack. ; Before doing anything else, we need to acquire some work registers ; (IP is free in the StkOvf case, but not in the StkOvfN case). ; We can save things on the stack a distance below FP which allows the ; largest possible list of saved work registers (R0-R3, R4-9 inclusive, ; plus FP, SP, LR, entry PC, = 14 regs in total) plus 4 extended floating ; point registers, a further 3*4 words MOV ip, #0 ; STKOVF not STKOVFN |_kernel_stkovf_copyargs| ; the (probable) write below sp here is inevitable - there are no registers ; free. The only way to make this safe against events is to pervert sl ; temporarily. ADD sl, sl, #4 STR lr, [fp, #-26*4] ; save LR SUB lr, fp, #26*4 ; & use as temp SP STMFD lr!, {a1, a2, v1-v6} ; to save A1-A2, V1-V6 [ 0 = 1 LoadStaticAddress disable_stack_extension, a1, lr LDR a1, [a1] CMP a1, #0 BNE StackOverflowFault ] SUB v4, fp, sp ; needed frame size ADD v4, v4, #SC_SLOffset ; + safety margin MOV sp, lr ; got an SP now... SUB sl, sl, #4 ; We do not drop SL here : the code that gets called to acquire a new ; stack chunk had better not check for stack overflow (and also had ; better not use more than the minimum that may be available). SUBS v1, fp, #26*4 ; save area ptr, clear V flag BL GetStackChunk BVS StackOverflowFault ; out of stack ; Get here with V2 pointing to a big enough chunk of size V3 ADD sl, v2, #SC_SLOffset ; make the new SL ADD sp, v2, v3 ; and initial SP LDR a1, =IsAStackChunk STR a1, [sl, #SC_mark-SC_SLOffset] ; Copy over 5th and higher arguments, which are expected on the stack... CMP ip, #0 BLE DoneArgumentCopy 01 LDR v5, [fp, ip, ASL #2] ; copy args in high->low STR v5, [sp, #-4]! ; address order SUBS ip, ip, #1 BNE %B01 DoneArgumentCopy ; Now create a call frame in the new stack chunk by copying ; over stuff saved in the frame in the old stack chunk to the ; new, perverting LR so that, on return, control comes back to ; this code and perverting SP and FP to give us a save area ; containing none of the V registers. MOV v1, fp ; old chunk's frame pointer SUB ip, v4, #SC_SLOffset ; needed frame size, no margin LDMDA v1!, {a1, a2, v2-v6} ; 1st 7 of possible 14 saved regs ADR v5, StackOverflowExit ; return address... MOV v4, sp ; SP in NEW chunk ORR v3, fp, #ChunkChange ; new FP in old chunk SUB fp, sp, #4 ; FP in new chunk STMFD sp!, {a1, a2, v2-v6} ; 1st 7 copied frame regs LDMDA v1!, {a1, a2, v2-v6} ; and the 2nd 7 regs STMFD sp!, {a1, a2, v2-v6} ; copied to the new frame ; Now adjust the PC value saved in the old chunk to say "no registers" [ StrongARM MOV a1,#0 SWI XOS_PlatformFeatures MOVVS a1,#0 TST a1,#8 ADREQ v2, StkOvfPseudoEntry+12 ADRNE v2, StkOvfPseudoEntry+8 | ADR v2, StkOvfPseudoEntry+12 ] STR v2, [v1, #26*4] ; Set the SP to be FP - requiredFrameSize and return by reloading regs ; from where they were saved in the old chunk on entry to STKOVF/N SUB sp, fp, ip LDMDA v1, {a1, a2, v1-v6, pc}^ StkOvfPseudoEntry STMFD sp!, {fp, ip, lr, pc} ; A register save mask StackOverflowExit Keep ; We return here when returning from the procedure which caused the ; stack to be extended. FP is in the old chunk SP and SL are still ; in the new one. ; The Z flag is set (through magic in the compatibility stubs) if stack ; extension happened in APCS_A, in which case we must permute the ; stack-description registers here and permute them back at the end. ; We do not do the expected ..EQ operations here, in order to keep the ; event handler happy - APCS change expected to be unconditional or NE. MOV r14, pc TST r14, #PSRZBit MOVNE fp, r10 MOVNE sl, r13 MOVNE sp, r12 ; We need to move sp and sl back into the old chunk. Since this happens ; in two operations, we need precautions against events while we're ; doing it. ADD sl, sl, #4 ; (an invalid stack-chunk handle) SUB sp, fp, #3*4 ; get a sensible sp in the old chunk STMFD sp!, {a1, v1-v4} ; Save some work regs MOV v4, r14 ; Remember if register permute needed ; Now see if the new chunk has a next chunk and deallocate it if it has. SUB v1, sl, #4 LDR v2, [v1, #SC_next-SC_SLOffset] LDR sl, [v1, #SC_prev-SC_SLOffset] LDR a1, [sl, #SC_mark] ; make not a stack chunk (before making EOR a1, a1, #&40000000 ; sl a proper stackchunk handle). STR a1, [sl, #SC_mark] ADD sl, sl, #SC_SLOffset SUB sp, sp, #4 DeallocateChunkLoop CMP v2, #0 ; is there a next next chunk? BEQ DoneDeallocateChunks ; No! - do nothing LDR v3, [v2, #SC_next] ; next chunk MOV a1, v2 LDR ip, [v2,#SC_deallocate] ; deallocate proc for this chunk CMPS ip, #0 ; if there is one... MOV lr, pc MOVNE pc, ip ; then call it... MOVNE a2, #0 STRNE a2, [v1, #SC_next-SC_SLOffset] ; and unhook next chunk MOVEQ v1, v2 ; no deallocate proc: try next chunk MOV v2, v3 B DeallocateChunkLoop DoneDeallocateChunks ; Clear the chunk change bit, and return to caller by reloading the saved work ; regs and the frame regs that were adjusted at the time the stack was extended LDR a1, =IsAStackChunk STR a1, [sl, #SC_mark-SC_SLOffset] BIC fp, fp, #ChunkChange ; Return, changing APCS if necessary TST v4, #PSRZBit LDMDB fp, {a1, v1-v4, fp, sp, lr} MOVNE r12, sp MOVNE r13, sl MOVNE r10, fp MOVS pc, r14 GetStackChunk Keep ; Naughty procedure with non-standard argument conventions. ; On entry, V1 = save area ptr in case of error return; V4 = needed size; ; On exit, V1, and V4 are preserved, V2 points to the newly inserted chunk ; and V3 is the chunk's size. In case of error, V is set. StkOvfGetChunk TST r14, #PSRSVCMode ; in SWI mode, stack overflow is fatal ORRNES pc, r14, #PSRVBit ; (could have been detected earlier, ; but deferral to now is simpler). ; Check that the current chunk really is a stack chunk... STMFD sp!, {a3, a4, ip, lr} ; save args not saved before LDR v2, =IsAStackChunk ; magic constant... LDR v3, [sl, #SC_mark-SC_SLOffset] CMP v2, v3 ; matches magic in chunk? BNE StkOvfError ; No! - die horribly EOR v3, v3, #&80000000 ; make not a stack chunk, so recursive STR v3, [sl, #SC_mark-SC_SLOffset] ; extension faults ; We have a chunk, see if there's a usable next chunk... SUB v2, sl, #SC_SLOffset 02 LDR v2, [v2, #SC_next] CMP v2, #0 BEQ StkOvfGetNewChunk ; No! - so make one LDR v3, [v2, #SC_size] CMP v4, v3 ; is it big enough? BGT %B02 ; No! so try next chunk ; unlink the usable chunk from the chain... LDR a1, [v2, #SC_prev] ; previous chunk LDR a2, [v2, #SC_next] ; next chunk STR a2, [a1, #SC_next] ; prev->next = next CMPS a2, #0 ; next == NULL ? STRNE a1, [a2, #SC_prev] ; next->prev = prev B StkOvfInsertChunk StkOvfGetNewChunk Keep ; Now we swap to the special extension chunk (to give a reasonable ; stack size to malloc). LoadStaticBase v2, ip LDR a2, [v2, #O_extendChunk] LDR a3, [a2, #SC_size] ADD a3, a2, a3 ; new sp STMFD a3!, {sl, fp, sp} ; save old stack description MOV sp, a3 ADD sl, a2, #SC_SLOffset MOV fp, #0 MOV a1, #RootStackSize; new chunk is at least this big CMP a1, v4 ; but may be bigger if he wants a huge frame MOVLT a1, v4 LDR ip, [v2, #O_allocProc] CMPS ip, #0 BEQ %F01 ; (restore stack chunk, then error) LDR v2, [v2, #O_freeProc] STMFD sp!, {a1, v2} ; chunk size in bytes, dealloc proc MOV lr, pc MOV pc, ip MOVS v2, a1 LDMFD sp!, {v3, ip} ; size in bytes, dealloc 01 LDMFD sp, {sl, fp, sp} ; back to old chunk BEQ StkOvfError STR v3, [v2, #SC_size] STR ip, [v2, #SC_deallocate] LDR a1, [sl, #SL_Lib_Offset] STR a1, [v2, #SL_Lib_Offset+SC_SLOffset] LDR a1, [sl, #SL_Client_Offset] STR a1, [v2, #SL_Client_Offset+SC_SLOffset] ; and re-link it in its proper place... StkOvfInsertChunk SUB a1, sl, #SC_SLOffset ; chunk needing extension... LDR a2, =IsAStackChunk STR a2, [a1, #SC_mark] ; remark as stack chunk LDR a2, [a1, #SC_next] ; its next chunk STR a2, [v2, #SC_next] ; this->next = next STR v2, [a1, #SC_next] ; prev->next = this STR a1, [v2, #SC_prev] ; this->prev = prev CMPS a2, #0 STRNE v2, [a2, #SC_prev] ; next->prev = this STR pc, [v2, #SC_mark] ; Not a stack chunk (for safe non-atomic ; update of sp and sl). LDMFD sp!, {a3, a4, ip, pc}^ ; restore extra saved regs StkOvfError LDMFD sp!, {a3, a4, ip, lr} ORRS pc, lr, #PSRVBit ; return with V set StackOverflowFault Keep LoadStaticBase ip, a1 MOV sp, v1 LDMDA v1, {a1, a2, v1-v6, lr} ADD ip, ip, #O_registerDump STMIA ip, {a1 - r14} ADR r0, E_StackOverflow BL |_kernel_copyerror| SWI GenerateError ErrorBlock StackOverflow, "Stack overflow", C45 ;*-------------------------------------------------------------------* ;* Arithmetic * ;*-------------------------------------------------------------------* |_kernel_udiv| ; Unsigned divide of a2 by a1: returns quotient in a1, remainder in a2 ; Destroys a3, a4 and ip MOVS a3, a1 BEQ dividebyzero MOV a4, #0 MOV ip, #&80000000 CMP a2, ip MOVLO ip, a2 u_loop CMP ip, a3, ASL #0 BLS u_shifted0mod8 CMP ip, a3, ASL #1 BLS u_shifted1mod8 CMP ip, a3, ASL #2 BLS u_shifted2mod8 CMP ip, a3, ASL #3 BLS u_shifted3mod8 CMP ip, a3, ASL #4 BLS u_shifted4mod8 CMP ip, a3, ASL #5 BLS u_shifted5mod8 CMP ip, a3, ASL #6 BLS u_shifted6mod8 CMP ip, a3, ASL #7 MOVHI a3, a3, ASL #8 BHI u_loop u_loop2 u_shifted7mod8 CMP a2, a3, ASL #7 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #7 u_shifted6mod8 CMP a2, a3, ASL #6 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #6 u_shifted5mod8 CMP a2, a3, ASL #5 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #5 u_shifted4mod8 CMP a2, a3, ASL #4 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #4 u_shifted3mod8 CMP a2, a3, ASL #3 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #3 u_shifted2mod8 CMP a2, a3, ASL #2 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #2 u_shifted1mod8 CMP a2, a3, ASL #1 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #1 u_shifted0mod8 CMP a2, a3, ASL #0 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #0 CMP a1, a3, LSR #1 MOVLS a3, a3, LSR #8 BLS u_loop2 MOV a1, a4 MOVS pc, r14 ; Unsigned remainder of a2 by a1: returns remainder in a1 ; Could be faster (at expense in size) by duplicating code for udiv, ; but removing the code to generate a quotient. As it is, a sensible ; codegenerator will call udiv directly and use the result in a2 ; Unfortunately, udiv doesn't preserve any (but the callee-save) registers, ; so we need to stack our link over the call. |_kernel_urem| STMFD sp!, {r14} BL |_kernel_udiv| MOV a1, a2 LDMFD sp!, {pc}^ ; Fast unsigned divide by 10: dividend in a1 ; Returns quotient in a1, remainder in a2 ; ; Calculate x / 10 as (x * 2**32/10) / 2**32. ; That is, we calculate the most significant word of the double-length ; product. In fact, we calculate an approximation which may be 1 off ; because we've ignored a carry from the least significant word we didn't ; calculate. We correct for this by insisting that the remainder < 10 ; and by incrementing the quotient if it isn't. |__rt_udiv10| |_kernel_udiv10| MOV a2, a1 MOV a1, a1, LSR #1 ADD a1, a1, a1, LSR #1 ADD a1, a1, a1, LSR #4 ADD a1, a1, a1, LSR #8 ADD a1, a1, a1, LSR #16 MOV a1, a1, LSR #3 ADD a3, a1, a1, ASL #2 SUB a2, a2, a3, ASL #1 CMP a2, #10 ADDGE a1, a1, #1 SUBGE a2, a2, #10 MOVS pc, r14 |_kernel_sdiv| ; Signed divide of a2 by a1: returns quotient in a1, remainder in a2 ; Quotient is truncated (rounded towards zero). ; Sign of remainder = sign of dividend. ; Destroys a3, a4 and ip ; Negates dividend and divisor, then does an unsigned divide; signs ; get sorted out again at the end. ; Code mostly as for udiv, except that the justification part is slightly ; simplified by knowledge that the dividend is in the range [0..#x80000000] ; (one register may be gained thereby). MOVS ip, a1 BEQ dividebyzero RSBMI a1, a1, #0 ; absolute value of divisor EOR ip, ip, a2 ANDS a4, a2, #&80000000 ORR ip, a4, ip, LSR #1 ; ip bit 31 sign of dividend (= sign of remainder) ; bit 30 sign of dividend EOR sign of divisor (= sign of quotient) RSBNE a2, a2, #0 ; absolute value of dividend MOV a3, a1 MOV a4, #0 s_loop CMP a2, a3, ASL #0 BLS s_shifted0mod8 CMP a2, a3, ASL #1 BLS s_shifted1mod8 CMP a2, a3, ASL #2 BLS s_shifted2mod8 CMP a2, a3, ASL #3 BLS s_shifted3mod8 CMP a2, a3, ASL #4 BLS s_shifted4mod8 CMP a2, a3, ASL #5 BLS s_shifted5mod8 CMP a2, a3, ASL #6 BLS s_shifted6mod8 CMP a2, a3, ASL #7 MOVHI a3, a3, ASL #8 BHI s_loop s_loop2 CMP a2, a3, ASL #7 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #7 CMP a2, a3, ASL #6 s_shifted6mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #6 CMP a2, a3, ASL #5 s_shifted5mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #5 CMP a2, a3, ASL #4 s_shifted4mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #4 CMP a2, a3, ASL #3 s_shifted3mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #3 CMP a2, a3, ASL #2 s_shifted2mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #2 CMP a2, a3, ASL #1 s_shifted1mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #1 CMP a2, a3, ASL #0 s_shifted0mod8 ADC a4, a4, a4 SUBHS a2, a2, a3, ASL #0 CMP a1, a3, LSR #1 MOVLS a3, a3, LSR #8 BLS s_loop2 MOV a1, a4 TST ip, #&40000000 RSBNE a1, a1, #0 TST ip, #&80000000 RSBNE a2, a2, #0 MOVS pc, r14 ; Signed remainder of a2 by a1: returns remainder in a1 |_kernel_srem| STMFD sp!, {r14} BL |_kernel_sdiv| MOV a1, a2 LDMFD sp!, {pc}^ ; Fast signed divide by 10: dividend in a1 ; Returns quotient in a1, remainder in a2 ; Quotient is truncated (rounded towards zero). |__rt_sdiv10| |_kernel_sdiv10| MOVS a4, a1 RSBMI a1, a1, #0 MOV a2, a1 MOV a1, a1, LSR #1 ADD a1, a1, a1, LSR #1 ADD a1, a1, a1, LSR #4 ADD a1, a1, a1, LSR #8 ADD a1, a1, a1, LSR #16 MOV a1, a1, LSR #3 ADD a3, a1, a1, ASL #2 SUB a2, a2, a3, ASL #1 CMP a2, #10 ADDGE a1, a1, #1 SUBGE a2, a2, #10 CMP a4, #0 RSBMI a1, a1, #0 RSBMI a2, a2, #0 MOVS pc, r14 dividebyzero ; Dump all registers, then enter the abort code. ; We need to discover whether we were doing a divide (in which case, ; r14 is a valid link), or a remainder (in which case, we must retrieve ; the link from the stack). LoadStaticBase ip, a3 ADD ip, ip, #O_registerDump STMIA ip, {r0-r13} BIC r0, r14, #PSRBits SUBS r1, pc, r0 ADRGE r1, |_kernel_udiv| CMPGE r0, r1 LDMGEFD sp!, {r14} STR r14, [ip, #r14*4] ADR r0, E_DivideByZero 10 SUB r14, r14, #4 STR r14, [ip, #pc*4] BL |_kernel_copyerror| SWI EnterSVC LDR r14, [ip, #pc * 4] LDMIB ip, {r1-r14}^ NOOP STMDB sp!, {r10, r11, r12} STMDB sp!, {r14} SWI GenerateError [ 0 = 1 ADD ip, ip, #pc*4 LDMIA ip, {r14}^ SUB ip, ip, #pc*4-4 LDMIA ip, {r1-r13} TEQP pc, #0 NOOP SWI GenerateError ] EXPORT |_kernel_fault| |_kernel_fault| ; r0 points to an error block; ; original r0 is on the stack. ; r14 is the place to pretend the fault happened STMFD sp!, {r1, ip} LoadStaticBase ip, r1 ADD ip, ip, #O_registerDump STMIA ip, {r0-r14} LDMFD sp!, {r1, r2, r3} STR r3, [ip] STR r2, [ip, #ip*4] STR r1, [ip, #r1*4] B %B10 ErrorBlock DivideByZero, "Divide by zero", C06 ; --- International message lookup routines ---------------------------- EXPORT |_kernel_copyerror| EXPORT |_kernel_getmessage| [ SharedLibrary ; Only works with module for the moment XMessageTrans_OpenFile EQU &61501 XMessageTrans_Lookup EQU &61502 XMessageTrans_CloseFile EQU &61504 XMessageTrans_ErrorLookup EQU &61506 MessageTrans_OpenFile EQU &41501 MessageTrans_Lookup EQU &41502 MessageTrans_CloseFile EQU &41504 MessageTrans_ErrorLookup EQU &41506 n_module_claim EQU 6 n_module_lookupname EQU 18 ; Lookup an error message ; ; On entry: ; R0 = Pointer to "international" error block. ; +--------------------+ ; | Default error no. | - Default error no and str are used if message file ; +--------------------+ cannot be opened or an error occurs trying to read ; | Default error str. | the message file. ; +--------------------+ ; | Pad. to word align | ; +--------------------+ ; | Error no. | - Real error numbers (may be same as default) ; +--------------------+ ; | Error message tag | Message tag in message file ; +--------------------+ ; Return: ; R0 = Pointer to selected error block (default or from message file) ; |_kernel_copyerror| STMDB sp!, {r1-r7, r12, lr} BL open_messagefile MOV r2, #0 ADR r4, module_name MOV r5, #0 MOV r6, #0 MOV r7, #0 SWI XMessageTrans_ErrorLookup LDMIA sp!, {r1-r7, r12, pc}^ ; Try to get a message from the message file ; ; On entry: ; R0 = Message to use if failed to get message from message file ; R1 = Message tag ; ; Return: ; R0 = Message ; |_kernel_getmessage| STMDB sp!, {r0-r7, r12, lr} BL open_messagefile MOV r0, r1 [ :DEF:DEFAULT_TEXT LDR r1, [sp, #4] | LDR r1, [sp] ] MOV r2, #0 MOV r4, #0 MOV r5, #0 MOV r6, #0 MOV r7, #0 SWI XMessageTrans_Lookup STRVC r2, [sp] LDMIA sp!, {r0-r7, r12, pc}^ message_filename DCB "SharedCLibrary:Messages", 0 ALIGN module_name DCB "SharedCLibrary", 0 ALIGN ; Try to open the message file. ; open_messagefile STMDB sp!, {r0, r2-r5, lr} MOV r5, #0 LDR r1, [r5, #CLibWorkSpace] CMP r1, #0 LDMNEIA sp!, {r0, r2-r5, pc} MOV r0, #Module_Claim MOV r3, #Module_WorkSpace SWI Module LDMVSIA sp!, {r0, r2-r5, pc}^ ; NB R1 = 0 STR r2, [r5, #CLibWorkSpace] MOV r0, r2 ADR r1, message_filename MOV r2, #0 SWI XMessageTrans_OpenFile MOVVC r1, r0 LDMVCIA sp!, {r0, r2-r5, pc}^ MOV r0, #Module_Free LDR r2, [r5, #CLibWorkSpace] SWI Module MOV r1, #0 LDMIA sp!, {r0, r2-r5, pc}^ | |_kernel_copyerror| MOVS pc, lr |_kernel_getmessage| MOVS pc, lr ] |__counter| STMDB sp!, {r0,lr} LDR r0, =CLibCounter BL |_kernel_irqs_off| ; Disable IRQs round update. LDRB lr, [r0] STR lr, [sp] ADD lr, lr, #1 STRB lr, [r0] BL |_kernel_irqs_on| LDMIA sp!, {r0,pc} END