diff --git a/Dev/HeapTest/Makefile b/Dev/HeapTest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0bc4f42e3bc3e1b938ba3e566384a4e4131fff10 --- /dev/null +++ b/Dev/HeapTest/Makefile @@ -0,0 +1,20 @@ +# Copyright 2011 Castle Technology Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +COMPONENT = HeapTest +OBJS = asm testbed + +include CApp + +# Dynamic dependencies: diff --git a/Dev/HeapTest/c/testbed b/Dev/HeapTest/c/testbed new file mode 100644 index 0000000000000000000000000000000000000000..420ee1367d566bcb96ff186ef50ab06496f63f8d --- /dev/null +++ b/Dev/HeapTest/c/testbed @@ -0,0 +1,500 @@ +/* Copyright 2011 Castle Technology Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* OS_Heap testbed code. Creates a heap and randomly allocates/deallocates/ + resizes blocks of memory to test for any bugs. Tests can either be performed + using the OS_Heap SWI or by compiling a special version of the heap code + from the kernel source folder (see USE_LOCAL_OSHEAP #define and s.asm) +*/ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <stdbool.h> +#include <time.h> + +#include "kernel.h" +#include "swis.h" + +#include "Global/Heap.h" +#include "Global/NewErrors.h" + +/* Whether to use the XOS_Heap SWI or the local copy of the heap code */ +#define USE_LOCAL_OSHEAP + +/* Maximum number of allocations to make */ +#define MAX_ALLOCS 1024 + +#define VBIT (1<<28) + +#ifdef USE_LOCAL_OSHEAP +extern _kernel_oserror *CallHeap(_kernel_swi_regs *r); +#else +#define CallHeap(R) _kernel_swi(OS_Heap,R,R) +#endif + +/* Workspace */ +static uint32_t *heap=NULL; /* Main heap */ +static uint32_t *backup=NULL; /* Backup copy of heap */ +static uint32_t allocsize=0; /* Heap memory block size */ +static uint32_t *usedspace=NULL; /* Bitmap of used space; 1 bit per word */ +static uint32_t seed=0; /* RNG seed */ +static uint32_t sequence=0; /* Number of ops performed */ +static uint32_t numblocks=0; /* Number of blocks currently allocated */ +static uint32_t blocks[MAX_ALLOCS]; /* Offsets of blocks within heap */ +static uint32_t currentop = 0; /* Current heap operation */ +static uint32_t opsleft = 0; /* Number of ops left */ +static _kernel_swi_regs r; +static _kernel_swi_regs last; + +/* Utility functions */ + +static void init(void) +{ + srand(seed); + printf("Seed %08x alloc size %08x\n",seed,allocsize); + /* Make heap 4K aligned */ + heap = (uint32_t *) (((uint32_t) malloc(allocsize+4096)+4095)&0xfffff000); + /* Same for backup */ + backup = (uint32_t *) (((uint32_t) malloc(allocsize+4096)+4095)&0xfffff000); + /* Used space map */ + usedspace = (uint32_t *) malloc(((allocsize+31*4)>>7)<<2); + memset(usedspace,0,((allocsize+31*4)>>7)<<2); + memset(heap,0,allocsize); + memset(backup,0,allocsize); +} + +static uint32_t getrand(uint32_t max) +{ + uint64_t temp = ((uint64_t) max)*rand(); + return (uint32_t) (temp/RAND_MAX); +} + +static void dumpheap(uint32_t *h) +{ + fprintf(stderr,"heap @ %p:\nmag %x\nfree %x\nbase %x\nend %x\n",h,h[0],h[1],h[2],h[3]); + uint32_t free = h[1]; + uint32_t next = 16; + if(free) + free += 4; + while(free) + { + if(free > next) + { + fprintf(stderr,"allocs between %x and %x:\n",next,free); + do { + fprintf(stderr,"%x: alloc size %x\n",next,h[next>>2]); + if((h[next>>2] > h[2]) || (h[next>>2]+next > h[2]) || (h[next>>2]&3) || !h[next>>2]) + { + fprintf(stderr,"bad block, skipping rest\n"); + break; + } + next += h[next>>2]; + } while(free>next); + if(free!=next) + fprintf(stderr,"alloc mismatch! next=%x\n",next); + } + fprintf(stderr,"%x: free size %x next %x\n",free,h[(free+4)>>2],h[free>>2]); + if(h[(free+4)>>2] == h[free>>2]) + fprintf(stderr,"consecutive free blocks!\n"); + next = free+h[(free+4)>>2]; + if((h[free>>2] & 3) || (h[free>>2] >= h[2]) || (h[free>>2]+free >= h[2])) + { + fprintf(stderr,"bad next ptr\n"); + return; + } + if((h[(free+4)>>2] & 3) || (h[(free+4)>>2] >= h[2]) || (h[(free+4)>>2]+free >= h[2])) + { + fprintf(stderr,"bad size\n"); + return; + } + if(!h[free>>2]) + { + fprintf(stderr,"end of free list\n"); + break; + } + free = free+h[free>>2]; + if(free<next) + { + fprintf(stderr,"next free is inside current?\n"); + return; + } + } + if(free > h[2]) + { + fprintf(stderr,"free list extends beyond heap end\n"); + } + if(next > h[2]) + { + fprintf(stderr,"next ptr beyond heap end\n"); + } + fprintf(stderr,"end allocs:\n"); + while(next < h[2]) + { + fprintf(stderr,"%x: alloc size %x\n",next,h[next>>2]); + if((h[next>>2] > h[2]) || (h[next>>2]+next > h[2]) || (h[next>>2]&3) || !h[next>>2]) + { + fprintf(stderr,"bad block, skipping rest\n"); + return; + } + next += h[next>>2]; + } + fprintf(stderr,"end\n"); +} + +static bool heapvalid(uint32_t *h) +{ + uint32_t free = h[1]; + uint32_t next = 16; + if(free) + free += 4; + while(free) + { + if(free > next) + { + do { + if((h[next>>2] > h[2]) || (h[next>>2]+next > h[2]) || (h[next>>2]&3) || !h[next>>2]) + { + return false; + } + next += h[next>>2]; + } while(free>next); + if(free!=next) + return false; + } + if(h[(free+4)>>2] == h[free>>2]) + return false; + next = free+h[(free+4)>>2]; + if((h[free>>2] & 3) || (h[free>>2] >= h[2]) || (h[free>>2]+free >= h[2])) + { + return false; + } + if((h[(free+4)>>2] & 3) || (h[(free+4)>>2] >= h[2]) || (h[(free+4)>>2]+free >= h[2])) + { + return false; + } + if(!h[free>>2]) + { + break; + } + free = free+h[free>>2]; + if(free<next) + { + return false; + } + } + if(free > h[2]) + { + return false; + } + if(next > h[2]) + { + return false; + } + while(next < h[2]) + { + if((h[next>>2] > h[2]) || (h[next>>2]+next > h[2]) || (h[next>>2]&3) || !h[next>>2]) + { + return false; + } + next += h[next>>2]; + } + return true; +} + +static void fail(void) +{ + fprintf(stderr,"Failed on sequence %d\n",sequence); + fprintf(stderr,"Last op registers:\n"); + for(int i=0;i<5;i++) + fprintf(stderr,"r%d = %08x\n",i,last.r[i]); + fprintf(stderr,"Result registers:\n"); + for(int i=0;i<5;i++) + fprintf(stderr,"r%d = %08x\n",i,r.r[i]); + fprintf(stderr,"Heap before op:\n"); + dumpheap(backup); + fprintf(stderr,"Heap after op:\n"); + dumpheap(heap); + fprintf(stderr,"Allocated blocks:\n"); + for(uint32_t i=0;i<numblocks;i++) + { + fprintf(stderr,"%08x\n",blocks[i]); + } + exit(1); +} + +static uint32_t blocksize(uint32_t offset) +{ + return heap[(offset-4)>>2]; +} + +static void tryuse(uint32_t offset) +{ + uint32_t len = blocksize(offset); + if((len-4 > allocsize-offset) || (len & 3) || (len<4)) + { + fprintf(stderr,"tryuse: Bad block at %08x\n",offset); + fail(); + } + offset >>= 2; + while(len) + { + if(usedspace[offset>>5] & (1<<(offset&31))) + { + fprintf(stderr,"tryuse: Overlapping block at %08x\n",offset<<2); + fail(); + } + usedspace[offset>>5] |= 1<<(offset&31); + offset++; + len -= 4; + } +} + +static void tryfree(uint32_t offset) +{ + uint32_t len = blocksize(offset); + if((len-4 > allocsize-offset) || (len & 3) || (len<4)) + { + fprintf(stderr,"tryfree: Bad block at %08x\n",offset); + fail(); + } + offset >>= 2; + while(len) + { + if(!(usedspace[offset>>5] & (1<<(offset&31)))) + { + fprintf(stderr,"tryfree: Block at %08x already freed\n",offset<<2); + fail(); + } + usedspace[offset>>5] -= 1<<(offset&31); + offset++; + len -= 4; + } +} + +/* Main function */ + +int main(int argc,char **argv) +{ + _kernel_oserror *err; + + /* TODO - Take parameters from command line */ + _swix(OS_ReadMonotonicTime,_OUT(0),&seed) + allocsize = 8*1024; + + init(); + + /* Initialise heap */ + r.r[0] = HeapReason_Init; + r.r[1] = (int) heap; + r.r[3] = allocsize; + err = CallHeap(&r); + if(err) + { + fprintf(stderr,"Heap initialise failed! %s\n",err->errmess); + exit(1); + } + usedspace[0] = 0xf; + + /* Begin tests */ + uint32_t temp,temp2,temp3,temp4; + while(heapvalid(heap)) + { + if(!opsleft) + { + opsleft = getrand(128); + switch(getrand(4)) + { + case 0: + currentop = HeapReason_Get; + break; + case 1: + currentop = HeapReason_Free; + break; + case 2: + currentop = HeapReason_ExtendBlock; + break; + default: + currentop = HeapReason_GetAligned; + break; + } + } + if(!(sequence&0xffff)) + { +// printf("."); + dumpheap(heap); + } + sequence++; + r.r[0] = currentop; + memcpy(backup,heap,allocsize); + switch(currentop) + { + case HeapReason_Get: + if(numblocks == MAX_ALLOCS) + { + opsleft = 0; + break; + } + r.r[3] = temp = getrand(allocsize>>5)+1; + last = r; + err = CallHeap(&r); + if(err) + { + if(err->errnum != ErrorNumber_HeapFail_Alloc) + { + fprintf(stderr,"Failed allocating %08x bytes: %s\n",temp,err->errmess); + fail(); + } + } + else + { + temp2 = blocks[numblocks++] = r.r[2]-((uint32_t)heap); + if(blocksize(temp2) < temp+4) + { + fprintf(stderr,"Failed to allocate requested block size: %08x bytes at %08x\n",temp,temp2); + fail(); + } + tryuse(temp2); + } + break; + case HeapReason_Free: + if(!numblocks) + { + opsleft = 0; + break; + } + temp = getrand(numblocks); + r.r[2] = blocks[temp]+((uint32_t) heap); + tryfree(blocks[temp]); /* Must free beforehand */ + last = r; + err = CallHeap(&r); + if(err) + { + fprintf(stderr,"Failed freeing block at %08x: %s\n",blocks[temp],err->errmess); + fail(); + } + blocks[temp] = blocks[--numblocks]; + break; + case HeapReason_ExtendBlock: + if(!numblocks) + { + opsleft = 0; + break; + } + temp = getrand(numblocks); + r.r[2] = blocks[temp]+((uint32_t) heap); + temp2 = getrand(allocsize>>4)-(allocsize>>5); + r.r[3] = temp2; + temp3 = blocksize(blocks[temp]); + tryfree(blocks[temp]); /* Must free beforehand */ + last = r; + err = CallHeap(&r); + if(err) + { + if(err->errnum != ErrorNumber_HeapFail_Alloc) + { + fprintf(stderr,"Failed resizing block at %08x by %08x bytes: %s\n",blocks[temp],(int) temp2,err->errmess); + fail(); + } + if(blocksize(blocks[temp]) != temp3) + { + fprintf(stderr,"Resize failed but block size changed\n"); + fail(); + } + tryuse(blocks[temp]); + } + else + { + if(r.r[2] && (r.r[2] != 0xffffffff)) + { + if((int) (temp3+temp2) <= 4) + { + fprintf(stderr,"Resized block was kept when it should have been freed: block %08x by %08x\n",blocks[temp],(int) temp2); + fail(); + } + blocks[temp] = r.r[2]-((uint32_t)heap); + tryuse(blocks[temp]); + if((blocksize(blocks[temp])-(temp3+temp2)) > 7) + { + fprintf(stderr,"Failed to resize block by required amount: block %08x by %08x\n",blocks[temp],(int) temp2); + fail(); + } + } + else + { + if((int) (temp3+temp2) > 4) + { + fprintf(stderr,"Resized block was freed when it should have remained: block %08x by %08x\n",blocks[temp],(int) temp2); + fail(); + } + blocks[temp] = blocks[--numblocks]; + } + } + break; + case HeapReason_GetAligned: + if(numblocks == MAX_ALLOCS) + { + opsleft = 0; + break; + } + r.r[3] = temp = getrand(allocsize>>4)+1; + temp2 = 4<<getrand(9); /* Max 2K alignment (heap 4K aligned) */ + temp3 = temp2*(1<<getrand(5)); + if(temp3 > 4096) /* Max 2K boundary (heap 4K aligned) */ + temp3 = 2048; + if(temp3 < temp) + temp3 = 0; + r.r[2] = temp2; + r.r[4] = temp3; + last = r; + err = CallHeap(&r); + if(err) + { + if(err->errnum != ErrorNumber_HeapFail_Alloc) + { + fprintf(stderr,"Failed allocating %08x bytes at alignment %08x boundary %08x: %s\n",temp,temp2,temp3,err->errmess); + fail(); + } + } + else + { + temp4 = blocks[numblocks++] = r.r[2]-((uint32_t) heap); + if(blocksize(temp4) < temp+4) + { + fprintf(stderr,"Failed to allocate requested block size: %08x bytes at alignment %08x boundary %08x at %08x\n",temp,temp2,temp3,temp4); + fail(); + } + if(temp4 & (temp2-1)) + { + fprintf(stderr,"Block allocated at wrong alignment: %08x bytes at alignment %08x boundary %08x at %08x\n",temp,temp2,temp3,temp4); + fail(); + } + if(temp3 && ((temp4 & ~(temp3-1)) != ((temp4+temp-1) & ~(temp3-1)))) + { + fprintf(stderr,"Block crosses boundary: %08x bytes at alignment %08x boundary %08x at %08x\n",temp,temp2,temp3,temp4); + fail(); + } + tryuse(temp4); + } + break; + } + if(opsleft) + opsleft--; + } + fprintf(stderr,"Heap corruption detected!\n"); + fail(); + return 0; +} diff --git a/Dev/HeapTest/s/asm b/Dev/HeapTest/s/asm new file mode 100644 index 0000000000000000000000000000000000000000..f136f9eb9abe01e51266a157cdf006b69402681e --- /dev/null +++ b/Dev/HeapTest/s/asm @@ -0,0 +1,113 @@ +; Copyright 2011 Castle Technology Ltd +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. +; + +; Assembler gubbins to allow a local copy of the heap manager to be used by +; the testbed application + + + GET Hdr:ListOpts + GET Hdr:Macros + GET Hdr:System + GET Hdr:CPU.Arch + GET Hdr:Machine.<Machine> + GET Hdr:Heap + GET Hdr:Proc + GET Hdr:FSNumbers + GET Hdr:HighFSI + GET Hdr:NewErrors + +; Disable internationalisation + GBLL International +International SETL {FALSE} + +; Indicate we're compiling the testbed + GBLL HeapTestbed +HeapTestbed SETL {TRUE} + +; Heap debugging disabled for now + GBLL DebugHeaps +DebugHeaps SETL {FALSE} + +; Fake zero page workspace + + ^ 0 +IRQsema # 4 +HeapSavedReg_R0 # 4 +HeapSavedReg_R1 # 4 +HeapSavedReg_R2 # 4 +HeapSavedReg_R3 # 4 +HeapSavedReg_R4 # 4 +HeapSavedReg_R13 # 4 +HeapReturnedReg_R0 # 4 +HeapReturnedReg_R1 # 4 +HeapReturnedReg_R2 # 4 +HeapReturnedReg_R3 # 4 +HeapReturnedReg_R4 # 4 +HeapReturnedReg_R13 # 4 +HeapReturnedReg_PSR # 4 +ZeroPageSize * @ + +; Macros and other bits + + MACRO + assert $condition + [ :LNOT: ($condition) + ! 1,"Assert failed: $condition" + ] + MEND + +SVC2632 * SVC32_mode + + + AREA testbeddata, DATA + +ZeroPage % ZeroPageSize + +HeapBackgroundError % 256 + + AREA testbedcode, CODE + +; C interface + + EXPORT CallHeap +CallHeap ROUT + ; r0 = _kernel_swi_regs ptr for input/output + ; Returns error ptr in r0 + Push "r0,r4-r11,lr" + LDMIA r0,{r0-r9} + SWI OS_EnterOS ; Call in SVC mode? + BL DoCallXOSHeap + MOVVS r10,r0 + MOVVC r10,#0 + SWI OS_LeaveOS + Pull "r11" + STMIA r11,{r0-r9} + MOV r0,r10 + Pull "r4-r11,pc" + +; Assembler bits for use by heap code + +DoCallXOSHeap + ; Fake an XOS_Heap SWI + ; Preserve r10-r12 and enter with PSR in lr + Push "r10-r12,lr" + MRS lr,CPSR + B HeapEntry + +; Main heap manager code + + GET ^.^.s.HeapMan + + END diff --git a/VersionASM b/VersionASM index 8c4cd11dd0225e989d3ef403a1dca0513c9caefe..5648e2f32561a413343275c67ed2708fee76ce56 100644 --- a/VersionASM +++ b/VersionASM @@ -13,11 +13,11 @@ GBLS Module_ComponentPath Module_MajorVersion SETS "5.35" Module_Version SETA 535 -Module_MinorVersion SETS "4.79.2.127" -Module_Date SETS "27 Nov 2011" -Module_ApplicationDate SETS "27-Nov-11" +Module_MinorVersion SETS "4.79.2.128" +Module_Date SETS "10 Dec 2011" +Module_ApplicationDate SETS "10-Dec-11" Module_ComponentName SETS "Kernel" Module_ComponentPath SETS "castle/RiscOS/Sources/Kernel" -Module_FullVersion SETS "5.35 (4.79.2.127)" -Module_HelpVersion SETS "5.35 (27 Nov 2011) 4.79.2.127" +Module_FullVersion SETS "5.35 (4.79.2.128)" +Module_HelpVersion SETS "5.35 (10 Dec 2011) 4.79.2.128" END diff --git a/VersionNum b/VersionNum index 01410f17cc5f0a775e03340882ff66ee11e93461..bb184aefcccb2b3e5e6e258deab41658ca730677 100644 --- a/VersionNum +++ b/VersionNum @@ -5,19 +5,19 @@ * */ #define Module_MajorVersion_CMHG 5.35 -#define Module_MinorVersion_CMHG 4.79.2.127 -#define Module_Date_CMHG 27 Nov 2011 +#define Module_MinorVersion_CMHG 4.79.2.128 +#define Module_Date_CMHG 10 Dec 2011 #define Module_MajorVersion "5.35" #define Module_Version 535 -#define Module_MinorVersion "4.79.2.127" -#define Module_Date "27 Nov 2011" +#define Module_MinorVersion "4.79.2.128" +#define Module_Date "10 Dec 2011" -#define Module_ApplicationDate "27-Nov-11" +#define Module_ApplicationDate "10-Dec-11" #define Module_ComponentName "Kernel" #define Module_ComponentPath "castle/RiscOS/Sources/Kernel" -#define Module_FullVersion "5.35 (4.79.2.127)" -#define Module_HelpVersion "5.35 (27 Nov 2011) 4.79.2.127" +#define Module_FullVersion "5.35 (4.79.2.128)" +#define Module_HelpVersion "5.35 (10 Dec 2011) 4.79.2.128" #define Module_LibraryVersionInfo "5:35" diff --git a/hdr/KernelWS b/hdr/KernelWS index f32be4fec086e1de3d18b5ea67a169a089b6a913..5e413f912b830a4297d535ba9acdfac0ab956037 100644 --- a/hdr/KernelWS +++ b/hdr/KernelWS @@ -1980,6 +1980,8 @@ ROMBuildDate # 128 NewFX0Error # 64 ] +HeapBackgroundError # 256 ; For storing errors generated in the background by the forced completion of a foreground heap op + KbuffsEnd # 0 KbuffsSize * KbuffsEnd - KbuffsBaseAddress ;size of Kernel buffers area diff --git a/s/Arthur3 b/s/Arthur3 index 76644e91eb3a0be01cd524f7d9b113ae1cb932df..12bef037dbc321157b4cfd39863c566148e69140 100644 --- a/s/Arthur3 +++ b/s/Arthur3 @@ -1545,6 +1545,8 @@ ReadNumAuto Entry "r1,r3,r4" AutoString = "Auto", 0 +dotstring + = ".", 0 ALIGN ReadSizeParm ROUT diff --git a/s/HeapMan b/s/HeapMan index 839f49e25938f7cdb6a51209c29192d8e912f365..794e7a4020d43294a79934a62343e66e5feb9587 100644 --- a/s/HeapMan +++ b/s/HeapMan @@ -33,6 +33,11 @@ TubeInfo SETL {FALSE} GBLL debheap debheap SETL 1=0 + [ :LNOT: :DEF: HeapTestbed + GBLL HeapTestbed +HeapTestbed SETL {FALSE} + ] + [ DebugHeaps FreeSpaceDebugMask * &04000000 UsedSpaceDebugMask * &08000000 @@ -43,11 +48,11 @@ Nil * 0 hpd RN r1 ; The punter sees these addr RN r2 size RN r3 +work RN r4 HpTemp RN r10 ; But not these tp RN r11 bp RN r12 -work RN r4 ; This is the only one we have to save. ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ; + H E A P O R G A N I S A T I O N + @@ -103,7 +108,7 @@ freblksize # 0 ; The link field is Nil (0) for the last block in the list -; Block sizes must be forced to a multiple of 8 bytes for subsequent link and +; Block sizes must be forced to a minimum of 8 bytes for subsequent link and ; size information to be stored in them if they are disposed of by the user. ; They must also be capable of storing a 4 byte size field while allocated. @@ -124,6 +129,17 @@ $label BL ValidateHpdSubr MEND +; Call XOS_Heap SWI + + MACRO + CallXOSHeap + [ HeapTestbed + BL DoCallXOSHeap + | + SWI XOS_Heap + ] + MEND + ;**************************************************************************** ; These bits of ExtendBlock are outside the IRQ HeapOp range because they @@ -157,7 +173,7 @@ ReallocateInSafeZone Push addr ; save for later freeing MOV R0, #HeapReason_Get - SWI XOS_Heap + CallXOSHeap Pull addr, VS BVS SafeNaffExtension @@ -192,11 +208,11 @@ CopyForExtension MOV R0, #HeapReason_Free Pull addr ; heap block addr - SWI XOS_Heap + CallXOSHeap - MOV R0, #HeapReason_ExtendBlock + MOVVC R0, #HeapReason_ExtendBlock WritePSRc SVC_mode + I_bit,work ; disable IRQs before we venture back - B GoodExtension ; into danger zone + BVC GoodExtension ; into danger zone SafeNaffExtension WritePSRc SVC_mode + I_bit,work ; disable IRQs before we venture back @@ -236,6 +252,14 @@ heapopdoneinbackground ROUT ; clear the interlock TST R11, #V_bit ; look at returned error BEQ GoodHeapExit + ; Recover the error from our buffer + LDR R0,=HeapBackgroundError + LDR R10,[R0] + SWI XMessageTrans_CopyError + ; Check that it worked - MessageTrans may be dead + LDR R11,[R0] + TEQ R10,R11 + LDRNE R0,=HeapBackgroundError ; Just return our internal buffer if MessageTrans couldn't provide one B NaffHeapExit ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -247,8 +271,9 @@ heapopdoneinbackground ROUT ; In r0 = heap action requested ; r1(hpd) -> heap block -; r2(addr) -> start of block +; r2(addr) -> start of block, or required alignment ; r3(size) = size of block +; r4(work) = boundary limitation ; Out VClear -> Action performed ; VSet -> Something terrible has happened, error set @@ -282,7 +307,7 @@ inspect_IRQ_stack ORR R10, R10, #I32_bit:OR:SVC2632 STR R10, [R11, #4*6] ; return into SVC26/32 mode with IRQs disabled - Push "R0-R3, lr" + Push "R0-R4, lr" LDR R10, =ZeroPage+HeapSavedReg_R0 @@ -292,15 +317,30 @@ inspect_IRQ_stack ; CMP R12, #0 ; BNE HeapInUse - LDMIA R10, {R0-R3, R10, R11} + LDMIA R10, {R0-R4, R11} SWI XOS_Heap ; with interrupts off! LDR R12, =ZeroPage+HeapReturnedReg_R0 ; Could we poke these into the IRQ stack too...? ; would allow interruptible IRQ processes to do heap ops!!! MRS lr, CPSR - STMIA R12, {R0-R3, R10, R11, lr} - Pull "R0-R3, lr" + STMIA R12, {R0-R4, R11, lr} +; Any errors that were generated by the foreground operation may have ended up +; using one of MessageTrans' IRQ buffers. Trouble is, any number of IRQ errors +; could occur between now and when the foreground task gets the error. Avoid +; the error getting clobbered by copying it into a special kernel buffer, and +; then copy it back to a MessageTrans buffer once we're back in the foreground. + BVC noheapbackgrounderror + LDR R1,=HeapBackgroundError + MOV LR,#256 +heapbackgrounderrorloop + LDMIA R0!,{R2-R4,R12} + SUBS LR,LR,#16 + STMIA R1!,{R2-R4,R12} + BNE heapbackgrounderrorloop + +noheapbackgrounderror + Pull "R0-R4, lr" iis_end ; store the registers in the info block LDR R12, =ZeroPage+HeapSavedReg_R0 @@ -334,6 +374,8 @@ HeapJumpTable ; Check reason codes against Hdr:Heap defs B ExtendHeap assert ((.-HeapJumpTable) :SHR: 2) = HeapReason_ReadBlockSize B ReadBlockSize + assert ((.-HeapJumpTable) :SHR: 2) = HeapReason_GetAligned + B GetAreaAligned [ debheap B ShowHeap ] @@ -352,7 +394,12 @@ GoodHeapExit ; V cleared on entry to SWI dispatch Pull lr ORRVS lr, lr, #V_bit ; VSet Exit + [ HeapTestbed + MSR CPSR_cxsf, lr ; Fake exit for testbed + Pull "r10-r12,pc" + | ExitSWIHandler ; Like all good SWI handlers + ] ;HeapInUse ; $HeapBadAsModuleBRA @@ -592,8 +639,8 @@ GetArea ROUT BLE garfailed_zero ; And -ve is invalid as well! ; note sizes of many megabytes thrown out by looking. - ADD size, size, #(freblksize-1)+4 ; Make block size granular - BIC size, size, #(freblksize-1) ; with size field added + ADD size, size, #3+4 ; Make block size multiple of 4 + BIC size, size, #3 ; including header ADR addr, hpdfree-frelink ; addr:= @(hpd!free)-frelink @@ -610,7 +657,11 @@ garloop ; If we have an exact fit (or as close as the granularity of the free list will ; allow), unlink this block and return it - BNE SplitFreeBlock + CMP HpTemp, #freblksize + BGE SplitFreeBlock + +; Increase allocation size if there wasn't enough space to split the free block + ADD size, size, HpTemp [ debheap LDR HpTemp, hpddebug @@ -660,6 +711,7 @@ ResultIsAddrPlus4 STR size, [addr], #4 ; Store block size and increment addr Pull "size" ; Return original value to the punter ; Note : real size got would be an option! + CLRV B GoodHeapExit ; RESULTIS addr @@ -689,7 +741,7 @@ garmore ] garfailed - ADR R0, ErrorBlock_HeapFail_Alloc + ADRL R0, ErrorBlock_HeapFail_Alloc [ International BL TranslateError ] @@ -705,7 +757,7 @@ garfailed_badhpd [ debheap STRIM "Invalid heap descriptor" ] - ADR R0, ErrorBlock_HeapFail_BadDesc + ADRL R0, ErrorBlock_HeapFail_BadDesc [ International BL TranslateError ] @@ -719,6 +771,298 @@ garfailed_zero garfailed_zero * garfailed ] +; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +; +; GetAreaAligned. Top level HeapEntry +; ============== +; +; Allocate an aligned block of memory from the heap + +; This is the same as GetArea, except it will only allocate areas with the given +; (power-of-two) alignment. +; Fails if requesting size = 0 + +; In : hpd -> heap pointer +; size = size of block required +; addr = alignment (power of 2) +; work = boundary (power of 2, 0 for none) + +; Out : VClear : addr -> got a block +; VSet : addr = 0, couldn't get block +; Rest of universe ok + +GetAreaAligned ROUT + Push "size,work" + ValidateHpd garafailed + + [ debheap +; HpTemp not critical + LDR HpTemp, hpddebug + CMP HpTemp, #0 + BEQ %FT00 + Push "r0, link" + MOV r0, size + DREG r0, "GetAreaAligned " + MOV r0, addr + DREG r0, "alignment " + MOV r0, work + DREG r0, "boundary " + BL iShowHeap + Pull "r0, link" +00 + ] + + CMP size, #0 ; Can't deallocate 0, so there! + BLE garafailed_zero ; And -ve is invalid as well! + ; note sizes of many megabytes thrown out by looking. + + ADD size, size, #3 ; Make block size multiple of 4 + BIC size, size, #3 ; excluding header + + SUB bp, addr, #1 ; Store alignment-1 in bp + TST bp, addr + BNE garafailed_align ; Must be power of 2! + CMP bp, #3 + MOVLT bp, #3 ; Minimum alignment is 4 + + SUB r0, work, #1 ; Store boundary-1 in r0 + TST r0, work + BNE garafailed_boundary ; Must be power of 2! + + ADR addr, hpdfree-frelink ; addr:= @(hpd!free)-frelink + + ; If we have a boundary, it must be >= alignment, and >= size + CMP r0, #-1 + BEQ garaloop + CMP r0, bp + CMPHS work, size + BLO garafailed_boundary2 + +garaloop + LDR tp, [addr, #frelink] ; tp := addr!fre.link + CMP tp, #Nil ; Is this the end of the chain ? + BEQ garamore ; - so try main blk + ADD addr, addr, tp ; convert offset + LDR HpTemp, [addr, #fresize] + +; Calculate start and end addresses as if we were to allocate from this block + ADD work,addr,#4 ; 4 bytes for storing block size + ADD HpTemp,HpTemp,addr ; End of free block + ADD work,work,bp +garaloop2 + BIC work,work,bp ; work = start of user block + SUB lr,work,addr + CMP lr,#4 + BEQ garastartok ; Start alignment is exact + CMP lr,#freblksize+4 + BGE garastartok ; Enough space to fit a free block at the start + +; We need a free block, but there isn't enough space for it. +; Shift 'work' up by one unit of alignment and try again. + + ADD work,work,bp,LSL #1 + B garaloop2 + +garastartok +; Calculate block end address + ADD lr,work,size ; End of user block + SUBS lr,HpTemp,lr ; Gap after user block + BLO garaloop ; Not big enough + +; Check boundary requirement + CMP r0,#-1 + BEQ garaboundaryok + AND lr,work,r0 ; Start offset within boundary + ADD lr,lr,size + SUB lr,lr,#1 ; Last byte of allocation + CMP lr,r0 + BLS garaboundaryok + +; This allocation crosses a boundary. Shift 'work' up to be boundary aligned. + ADD work,work,r0 + BIC work,work,r0 + B garaloop2 ; Loop back round to recheck everything (with small boundary sizes, we may have created a situation where we can't fit an initial free block) + +garaboundaryok + +; We have a suitable space to allocate from. + ADD size,size,#4 ; Correct size to store + SUB work,work,#4 ; Correct block start + + [ debheap + LDR lr, hpddebug + CMP lr, #0 + BEQ %FT60 + WRLN "Using existing free block" +60 + ] + +; Note: bp now being used as scratch + + ADD bp,work,size ; End of user block + SUB bp,HpTemp,bp ; Gap after user block + + WritePSRc SVC_mode+I_bit, lr + +; Work out if we need a new free block afterwards + CMP bp, #freblksize + ADDLT size, size, bp ; Not enough space, so enlarge allocated block + BLT %FT10 + +; Create a new free block that will lie after our allocated block + SUB HpTemp, HpTemp, bp + STR bp, [HpTemp, #fresize] ; Write size + LDR bp, [addr, #frelink] + CMP bp, #Nil + ADDNE bp, bp, addr + SUBNE bp, bp, HpTemp + STR bp, [HpTemp, #frelink] ; Write next ptr + SUB HpTemp, HpTemp, addr + STR HpTemp, [addr, #frelink] ; Fix up link from previous block +10 + +; Shrink this free block to take up the space preceeding the allocated block. + SUBS bp,work,addr + STRNE bp, [addr, #fresize] + BNE ResultIsWorkPlus4 + +; No space for an initial free block. Get rid of it. + ASSERT frelink=0 ; otherwise LDR bp,[addr,#frelink]! + LDR bp, [addr] + CMP bp, #0 + ADDNE bp, bp, tp + STR bp, [addr, -tp] + B ResultIsWorkPlus4 + +; Got no more free blocks of length >= size, so try to allocate more heap space +; out of the block described by hpd + +garamore + [ debheap + LDR work, hpddebug + CMP work, #0 + BEQ %FT80 + WRLN "Trying to get more from main block" +80 + ] + LDR work, hpdbase + ADD work, work, hpd + ADD tp, work, #4 + ADD tp, tp, bp +garamoreloop + BIC tp, tp, bp ; tp = pointer to return to user + +; Make sure there's enough space for a free block if necessary + SUB HpTemp, tp, work ; HpTemp = tp-(hpd+hpdbase) + CMP HpTemp, #4 + BEQ garamoreok + CMP HpTemp, #freblksize+4 + ADDLT tp, tp, bp, LSL #1 ; Not enough space for free block + BLT garamoreloop + +garamoreok +; Boundary check + CMP r0, #-1 + BEQ garamoreboundaryok + AND HpTemp, tp, r0 + ADD HpTemp, HpTemp, size + SUB HpTemp, HpTemp, #1 + CMP HpTemp, r0 + BLS garamoreboundaryok + +; Shift 'tp' up to be boundary aligned + ADD tp, tp, r0 + BIC tp, tp, r0 + B garamoreloop + +garamoreboundaryok + ADD HpTemp, tp, size ; New heap end + SUB HpTemp, HpTemp, hpd ; New heap size + LDR lr, hpdend + CMP HpTemp, lr + BGT garafailed + + WritePSRc SVC_mode+I_bit, lr + +; Set up the block to return to the user + ADD size, size, #4 + STR size, [tp, #-4]! + +; Grow the heap + STR HpTemp, hpdbase + +; Create preceeding free block if necessary + SUBS HpTemp, tp, work + BEQ ResultIsTpPlus4 + +; Write the free block + STR HpTemp, [work, #fresize] + MOV HpTemp, #Nil + STR HpTemp, [work, #frelink] + +; Patch up the preceeding block + SUB HpTemp, work, addr + STR HpTemp, [addr, #frelink] + +ResultIsTpPlus4 +; Block size is already stored + ADD addr, tp, #4 + Pull "size,work" + MOV r0,#HeapReason_GetAligned + CLRV + B GoodHeapExit + +ResultIsWorkPlus4 + STR size, [work] ; Store block size + ADD addr, work, #4 ; Move to correct return reg & add offset + Pull "size,work" + MOV r0,#HeapReason_GetAligned + CLRV + B GoodHeapExit + +garafailed + ADRL R0, ErrorBlock_HeapFail_Alloc + [ International + BL TranslateError + ] + [ debheap + WRLN " : GetAreaAligned failed" + ] +garafail_common + MOV addr, #0 ; addr := 0 if we couldn't allocate + Pull "size,work" ; RESULTIS 0 + B NaffHeapExit ; VSet Exit + +garafailed_badhpd + [ debheap + STRIM "Invalid heap descriptor" + ] + ADRL R0, ErrorBlock_HeapFail_BadDesc + [ International + BL TranslateError + ] + B garafail_common + + [ debheap +garafailed_zero + STRIM "Can't allocate 0 or less bytes" + B garafailed +garafailed_align + STRIM "Alignment not power of 2" + B garafailed +garafailed_boundary + STRIM "Boundary not power of 2" + B garafailed +garafailed_boundary2 + STRIM "Boundary too small" + B garafailed + | +garafailed_zero * garafailed +garafailed_align * garafailed +garafailed_boundary * garafailed +garafailed_boundary2 * garafailed + ] + ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ; ; FreeArea. Top level HeapEntry @@ -787,7 +1131,7 @@ ExtendBlock CMP HpTemp, #0 BEQ %FT00 Push "r0, link" - DREG size, "ExtendBlock by ",concat + DREG size, "ExtendBlock by ",cc STRIM " block at " SUB r0, addr, hpd SUB r0, r0, #4 @@ -799,8 +1143,8 @@ ExtendBlock BL FindHeapBlock BVS NaffExtension - ADD size, size, #freblksize-1 ; round size as appropriate : - BICS size, size, #freblksize-1 ; round up to nearest 8 + ADD size, size, #3 ; round size as appropriate : + BICS size, size, #3 ; round up to nearest 4 BEQ GoodExtension ; get the easy case done. BPL MakeBlockBigger @@ -808,7 +1152,8 @@ ExtendBlock RSB size, size, #0 LDR bp, [addr, hpd] ; get block size WritePSRc SVC_mode+I_bit, R14 - SUBS bp, bp, size ; size of block left + SUB bp, bp, size ; size of block left + CMP bp, #4 [ debheap ; HpTemp not critical, GE/LT critical @@ -818,22 +1163,40 @@ ExtendBlock BEQ %FT01 WRLN "Freeing part of block" 01 - CMP bp, #0 ; restore GE/Lt + CMP bp, #4 ; restore GE/Lt ] MOVLE HpTemp, #-1 ; if discarding block, then STRLE HpTemp, [stack] ; make pointer really naff. + BLE GoodShrink - STRGT bp, [addr, hpd] ; update size of block left - ADDGT addr, addr, bp ; offset of block to free - STRGT size, [addr, hpd] ; construct block for freeing - + ; If we're only shrinking 4 bytes, only allow the shrink to go ahead + ; if there's a free block (or hpdbase) after us + CMP size, #4 + BGT DoShrink + LDR HpTemp, [hpd, tp] + CMP HpTemp, #Nil + ADDNE HpTemp, HpTemp, tp + LDREQ HpTemp, hpdbase + ADD HpTemp, HpTemp, hpd ; Next free block ptr + SUB HpTemp, HpTemp, addr ; Offset from start of this block + SUB HpTemp, HpTemp, size ; Apply shrink amount to match bp + CMP HpTemp, bp + MOVGT size, #0 ; Used block after us. Deny shrink. + BGT GoodExtension + BLT CorruptExtension ; Heap corrupt! + ; Else there's a free block (or hpdbase) directly after us +DoShrink + STR bp, [addr, hpd] ; update size of block left + ADD addr, addr, bp ; offset of block to free + STR size, [addr, hpd] ; construct block for freeing + +GoodShrink BL FreeChunkWithConcatenation ; work still set from block lookup GoodExtension Pull "addr, size, work" [ DebugHeaps - ADD lr, size, #freblksize-1 ; work out how much we actually extended by - BICS lr, lr, #freblksize-1 + MOVS lr, size ; work out how much we actually extended by BEQ %FT99 ; if zero or negative BMI %FT99 ; then nothing to do LDR HpTemp, [addr, #-4] ; get new block size @@ -847,6 +1210,7 @@ GoodExtension BNE %BT98 99 ] + CLRV B GoodHeapExit MakeBlockBigger @@ -871,7 +1235,7 @@ MakeBlockBigger LDRNE HpTemp, [HpTemp, #fresize] LDREQ HpTemp, hpdend SUBEQ HpTemp, HpTemp, bp - BICEQ HpTemp, HpTemp, #(freblksize-1) + BICEQ HpTemp, HpTemp, #3 ; force it to a sensible blocksize MRS lr, CPSR ; save EQ/NE state @@ -890,7 +1254,7 @@ MakeBlockBigger STRIM "Extending block into " 02 Pull "HpTemp,lr" - msr ,CPSR_f, lr + msr CPSR_f, lr ] LDR work, [addr, hpd] ; get size back @@ -925,8 +1289,13 @@ IntoFreeEntry Pull HpTemp ] - CMP HpTemp, size - BNE SplitFreeBlockForExtend + SUB HpTemp, HpTemp, size ; new freblk size + CMP HpTemp, #4 + BGT SplitFreeBlockForExtend + +; Not enough space for a free block. Increase the grow amount a bit. + ADDEQ work, work, #4 + STREQ work, [addr, hpd] ; free entry just right size : remove from free list LDR HpTemp, [bp, hpd] ; free link @@ -942,7 +1311,6 @@ SplitFreeBlockForExtend STR work, [tp, hpd] ; prevnode points at right place ADD work, work, tp ; offset of new free entry ADD work, work, hpd - SUB HpTemp, HpTemp, size ; new freblk size STR HpTemp, [work, #fresize] LDR HpTemp, [bp, hpd] CMP HpTemp, #Nil @@ -992,13 +1360,15 @@ hack_preceder ; work is prevprevfree offset ; size is amount block grows by ; addr is block offset - CMP bp, #0 - ADDNE HpTemp, tp, hpd - STRNE bp, [HpTemp, #fresize] ; prevblock shrunk - BNE copy_backwards + CMP bp, #freblksize + ADDGE HpTemp, tp, hpd + STRGE bp, [HpTemp, #fresize] ; prevblock shrunk + BGE copy_backwards ; free freblk: work is still prevprevblk pointer LDR HpTemp, [tp, hpd] + ADDNE size, size, bp ; Increase grow amount by any remainder + MOVNE bp, #0 ; And make sure the block does die CMP HpTemp, #Nil ADDNE HpTemp, HpTemp, tp ; offset from heap start SUBNE HpTemp, HpTemp, work @@ -1015,7 +1385,7 @@ copy_backwards LDR r0, hpddebug CMP r0, #0 BEQ %FT06 - DREG HpTemp, "copying -4+",concat + DREG HpTemp, "copying -4+",cc STRIM " from " SUB R0, addr, hpd BL PrintOffset @@ -1074,7 +1444,7 @@ try_add_preceding_block BNE ext_delink LDR work, hpdend SUB work, work, bp ; get back real size - BIC work, work, #(freblksize-1) + BIC work, work, #3 ADD work, work, bp STR work, hpdbase ; all free allocated B ext_hack @@ -1111,9 +1481,15 @@ got_to_reallocate B ReallocateInSafeZone +CorruptExtension + ADRL R0,ErrorBlock_HeapFail_BadLink + [ International + BL TranslateError + ] + NaffExtension Pull "addr, size, work" - B NaffHeapExit + B NaffHeapExit ; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -1522,7 +1898,7 @@ iShowHeap ROUT ; Internal entry point for debugging heap BL PrintOffsetLine SUB r0, work, bp ; hpdend-hpdbase - DREG r0,"Bytes free: ",concat, Word + DREG r0,"Bytes free: ",cc, Word SUB r0, bp, addr ; hpdbase-hpdsize DREG r0,", bytes used: ",, Word SWI XOS_NewLine @@ -1605,7 +1981,7 @@ PrintOffset DREG r0 CMP R0, #0 ADDNE R0, R0, hpd - DREG r0," (",concat + DREG r0," (",cc STRIM ")" GRAB "R0, PC" diff --git a/s/Oscli b/s/Oscli index e12ed74b5c6296f64ada81af43816443f8469216..2f6473c29ae1e0cc84ebd69e9ef4eb17e0260d11 100644 --- a/s/Oscli +++ b/s/Oscli @@ -547,8 +547,7 @@ AliasOscliTooLong AliasStr_QA = "ALIAS$", 0 ] AliasStr = "Alias$*", 0 -AliasDot = "Alias$" -dotstring = ".", 0 +AliasDot = "Alias$.", 0 ALIGN [ Oscli_QuickAliases diff --git a/s/vdu/vdudecl b/s/vdu/vdudecl index c7607147a444c74ed36c27fc750030c32e2f4375..9a77f9aec077ad3fb247c2314b23e4b7325c9f0d 100644 --- a/s/vdu/vdudecl +++ b/s/vdu/vdudecl @@ -68,7 +68,10 @@ ScreenModeReason_SelectMode * 0 ScreenModeReason_ReturnMode * 1 ScreenModeReason_EnumerateModes * 2 ScreenModeReason_SelectMonitorType * 3 -ScreenModeReason_Limit * 4 +ScreenModeReason_ConfigureAcceleration * 4 +ScreenModeReason_CleanCache * 5 +ScreenModeReason_ForceCleanCache * 6 +ScreenModeReason_Limit * 7 ; Mode selector format diff --git a/s/vdu/vduswis b/s/vdu/vduswis index 37575ad7bd8c5b0356f4f5ed170ab12ddabb3328..ce7493d44e9f4043bb1ce7eac36f645cfba5d4ec 100644 --- a/s/vdu/vduswis +++ b/s/vdu/vduswis @@ -1933,7 +1933,10 @@ ScreenModeSWI Entry ASSERT ScreenModeReason_ReturnMode = 1 ASSERT ScreenModeReason_EnumerateModes = 2 ASSERT ScreenModeReason_SelectMonitorType = 3 - ASSERT ScreenModeReason_Limit = 4 + ASSERT ScreenModeReason_ConfigureAcceleration = 4 + ASSERT ScreenModeReason_CleanCache = 5 + ASSERT ScreenModeReason_ForceCleanCache = 6 + ASSERT ScreenModeReason_Limit = 7 ScreenModeSub CMP r0, #ScreenModeReason_Limit @@ -1943,6 +1946,9 @@ ScreenModeSub B ScreenMode_ReturnMode B ScreenMode_EnumerateModes B ScreenMode_SelectMonitorType + B ScreenMode_ConfigureAcceleration + B ScreenMode_CleanCache + B ScreenMode_ForceCleanCache ; unknown OS_ScreenMode reason code @@ -2063,6 +2069,52 @@ ScreenMode_SelectMonitorType Entry "r0" STR r1, [WsPtr, #CurrentMonitorType] ; update current value EXIT +;************************************************************************** +; +; ScreenMode_ConfigureAcceleration - Configure screen memory cacheability +; +; Internal routine called by ScreenModeSWI +; +; in: r0 = reason code (4) +; r1 = flags: +; bit 0 : set to suspend cached screen until mode change +; bit 1 : set to suspend screen cleaning +; bit 2 : set to disable hardware acceleration +; other : reserved, must be 0 +; or -1 to read current value +; r2 = number of VSyncs between automatic screen cleaning (1-3), or -1 +; to read current value +; +; out: r1 = new flag state +; r2 = new number of VSyncs between automatic screen cleaning +; r10-r12 may be corrupted +; All other registers preserved +; + +ScreenMode_ConfigureAcceleration + ; Screen caching isn't supported yet. Just return dummy values. + MOV r1,#1 + MOV r2,#1 + MOV pc,lr + +;************************************************************************** +; +; ScreenMode_CleanCache - Clean screen memory from cache, if cache enabled +; ScreenMode_ForceCleanCache - Force clean of screen memory from cache +; +; Internal routine called by ScreenModeSWI +; +; in: r0 = reason code (5 or 6) +; +; out: r10-r12 may be corrupted +; All other registers preserved +; + +ScreenMode_CleanCache +ScreenMode_ForceCleanCache + ; Screen caching isn't supported yet. Just do nothing. + MOV pc,lr + ;;;mjsHAL - VIDCDividerSWI is horrible VIDC specific API, compiled out ;;; ; Should not cause any problems on any machine. STB flag just to be safe though.