; Copyright 1996 Acorn Computers Ltd ; ; Licensed under the Apache License, Version 2.0 (the "License"); ; you may not use this file except in compliance with the License. ; You may obtain a copy of the License at ; ; http://www.apache.org/licenses/LICENSE-2.0 ; ; Unless required by applicable law or agreed to in writing, software ; distributed under the License is distributed on an "AS IS" BASIS, ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ; See the License for the specific language governing permissions and ; limitations under the License. ; ; > s.memmap ; low level memory mapping ; ---------------------------------------------------------------------------------- ; ; AMB_SetMemMapEntries_MapIn_Lazy: ; ; entry: ; R1 = first page index in PMP/AMBNode ; R5 = no. of pages (must be >0) ; R7 = logical offset in pages - must be zero for this variant of routine ; R10 = DANode ; ; Lazy task swapping variant of SetMemMapEntries_MapIn ; Performs a lazy map in if lazy task swapping is enabled ; AMB_SetMemMapEntries_MapIn_Lazy ROUT [ AMB_LazyMapIn LDR r7, AMBFlags ANDS r7, r7, #AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend MOVEQ pc, lr ; Just exit if laziness enabled MOV r7, #0 ] ; Fall through... ; ---------------------------------------------------------------------------------- ; ; AMB_SetMemMapEntries_MapIn: ; ; entry: ; R1 = first page index in PMP/AMBNode ; R5 = no. of pages (must be >0) ; R7 = logical offset in pages (zero for standard map in) ; R10 = DANode ; ; Must not be used if current slot is mapped lazily ; AMB_SetMemMapEntries_MapIn ROUT Entry "r3,r8,r9,r10" [ AMB_Debug DebugReg r1, "MapIn phys start " DebugReg r5, "count " DebugReg r7, "log offset " DebugReg r10, "node " ] ; Update DA logical size prior to R10 being clobbered LDR r3,[r10,#DANode_Size] ADD r3,r3,r5,LSL #Log2PageSize STR r3,[r10,#DANode_Size] ; Get correct parameters for the low-level calls ADD r3,r1,r7 MOV r3,r3,LSL #Log2PageSize ADD r3,r3,#ApplicationStart MOV r8, r5 LDR r9,[r10,#DANode_Flags] LDR lr,=DynAreaFlags_AccessMask AND r9,r9,lr LDR r10,[r10,#DANode_PMP] ADD r10,r10,r1,LSL #2 ; Map in the pages (assuming area currently unmapped) PTOp AMB_movepagesin_L2PT BL AMB_movepagesin_CAM [ PMPParanoid BL ValidatePMPs ] [ AMB_Debug DebugTX "<MapIn" ] EXIT ; ---------------------------------------------------------------------------------- ; ; AMB_SetMemMapEntries_MapOut: ; ; entry: ; R1 = first page index in PMP/AMBNode ; R5 = no. of pages (must be >0) ; R7 = logical offset in pages (zero for standard map out) ; R10 = DANode ; ; Must not be used if current slot is mapped lazily ; AMB_SetMemMapEntries_MapOut ROUT Entry "r3,r4,r8,r9,r10" [ AMB_Debug DebugReg r1, "MapOut phys start " DebugReg r5, "count " DebugReg r7, "log offset " DebugReg r10, "node " ] ; Update DA logical size prior to R10 being clobbered LDR r3,[r10,#DANode_Size] SUB r3,r3,r5,LSL #Log2PageSize STR r3,[r10,#DANode_Size] ; Get correct parameters for the low-level calls LDR r3,[r10,#DANode_Flags] LDR lr,=DynAreaFlags_AccessMask AND r3,r3,lr ADD r4,r1,r7 MOV r4,r4,LSL #Log2PageSize ADD r4,r4,#ApplicationStart MOV r8, r5 MOV r9, r3 LDR r10,[r10,#DANode_PMP] ADD r10,r10,r1,LSL #2 ; Map out the pages (assuming area currently fully mapped) PTOp AMB_movecacheablepagesout_L2PT BL AMB_movepagesout_CAM [ PMPParanoid BL ValidatePMPs ] [ AMB_Debug DebugTX "<MapOut" ] EXIT [ AMB_LazyMapIn ptcounter SETA 0 WHILE ptcounter < 2 [ ptcounter = 0 :LAND: ShortDesc pt SETS "ShortDesc" ELIF ptcounter = 1 :LAND: LongDesc pt SETS "LongDesc" | pt SETS "" ] [ "$pt" <> "" ; ---------------------------------------------------------------------------------- ; ; AMB_SetMemMapEntries_MapOut_Lazy: ; ; entry: ; R1 = first page index in PMP/AMBNode ; R5 = no. of pages (must be >0) ; R7 = logical offset in pages - must be zero for this variant of routine ; R10 = DANode ; ; Lazy task swapping variant of SetMemMapEntries_MapOut ; Performs a sparse map out if lazy task swapping is enabled ; AMB_SetMemMapEntries_MapOut_Lazy_$pt ROUT LDR r7, AMBFlags TST r7, #AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend MOV r7, #0 BNE AMB_SetMemMapEntries_MapOut ; Perform a sparse map out via MappedInRegister ; Keep track of count of mapped in pages so we can early-exit if we unmap them all LDR r7,[r10,#DANode_Size] CMP r7,#0 MOVEQ pc,lr ; Hey, there's already nothing there! Entry "r0-r6,r8-r12" [ AMB_Debug DebugReg r1, "MapOut_Lazy start " DebugReg r5, "count " DebugReg r10, "node " DebugReg r7, "current log size " ] ;to do this safely we need to do it in two passes ;first pass makes pages uncacheable ;second pass unmaps them ;n.b. like most of the AMB code, this assumes nothing will trigger an abort-based lazy map in operation while we're in the middle of this processing! ; ;pass one: make pages uncacheable (preserve bitmap, CAM) ; LDR r2,=ZeroPage ;decide if we want to do TLB coherency as we go ARMop Cache_RangeThreshold,,,r2 ;returns threshold (bytes) in r0 CMP r5,r7,LSR #Log2PageSize MOVLO r6,r5 MOVHS r6,r7,LSR #Log2PageSize ;r6 = max number of pages we'll be unmapping SUB r6,r6,r0,LSR #Log2PageSize ;r6 < 0 if doing coherency as we go ADD r5,r5,r1 ;r5 = end of region to unmap MOV r3,r7 ;r3 = logical size ; Get MappedInRegister ptr AND r8,r1,#31 MOV r9,#1 ADR r7,AMBMappedInRegister MOV r9,r9,LSL r8 ;r9 = current bit in word BIC r8,r1,#31 LDR r8,[r7,r8,LSR #5-2]! ;r8 = current word, r7 = ptr Push "r7-r9" LDR r0,[r10,#DANode_Flags] LDR r11,[r2,#MMU_PCBTrans] MOV r4,r1 ;r4 = current index [ pt = "LongDesc" GetTempUncache_LongDesc r12, r0, r11, lr ;r12 = temp uncache L2PT flags LDR r11,=LL3PT+(ApplicationStart:SHR:(Log2PageSize-3));r11 -> L3PT, offset by appspace start | GetTempUncache_ShortDesc r12, r0, r11, lr ;r12 = temp uncache L2PT flags LDR r11,=L2PT+(ApplicationStart:SHR:(Log2PageSize-2));r11 -> L2PT, offset by appspace start ] CMP r9,#1 BNE %FT32 B %FT31 30 ; Advance to next word ADD r4,r4,#32 LDR r8,[r7,#4]! CMP r4,r5 BHS %FT39 31 ; Check register word TEQ r8,#0 BEQ %BT30 32 ; Check register bit TST r8,r9 BEQ %FT34 ; Mapped in page found, make uncacheable [ pt = "LongDesc" ADD lr,r11,r4,LSL #3 LDRD r0,[lr] ;Get current L3PT entry CMP r6,#0 BIC r0,r0,#TempUncache_L3PTMask ORR r0,r0,r12 STRD r0,[lr] ;Update L2PT | LDR r0,[r11,r4,LSL #2] ;Get current L2PT entry CMP r6,#0 BIC r0,r0,#TempUncache_L2PTMask ORR r0,r0,r12 STR r0,[r11,r4,LSL #2] ;Update L2PT ] BGE %FT33 ; Do cache/TLB maintenance MOV r1,r4,LSL #Log2PageSize ADD r0,r1,#ApplicationStart ARMop MMU_ChangingEntry,,,r2 33 SUBS r3,r3,#PageSize ADDEQ r5,r4,#1 ;mapped out all pages in slot; set end to current+1 34 ; Exit if we've just processed the last page ADD r4,r4,#1 CMP r4,r5 BEQ %FT40 ; Advance to next bit of current word MOVS r9,r9,LSL #1 BCC %BT32 ; Finished the word, go back to per-word loop if necessary LDR r8,[r7,#4]! MOV r9,#1 CMP r8,#0 BEQ %BT30 ;Next word empty, skip it B %BT32 ;Not empty, process it 39 ; Reached end of region LDR r0,[r10,#DANode_Size] CMP r0,r3 ADDEQ sp,sp,#12 ;Junk stacked r7-r9 BEQ %FT95 ;Nothing in region to map out 40 ; Do global maintenance if required CMP r6,#0 BLT %FT41 ARMop MMU_Changing,,,r2 41 ; ;pass two: unmap pages (+ clear bitmap + update CAM) ; Pull "r7-r9" FRAMLDR r4,,r1 STR r3,[r10,#DANode_Size] ;Write back new logical size LDR r10,[r10,#DANode_PMP] ;r10 -> page list LDR r3,[r2,#CamEntriesPointer] LDR r12,=DuffEntry CMP r9,#1 BNE %FT52 B %FT51 50 ; Advance to next word ADD r4,r4,#32 LDR r8,[r7,#4]! CMP r4,r5 BHS %FT59 51 ; Check register word TEQ r8,#0 BEQ %BT50 52 ; Check register bit TST r8,r9 BEQ %FT54 ; Mapped in page found, unmap it [ pt = "LongDesc" MOV r0,#0 MOV r1,#0 ADD lr,r11,r4,LSL #3 STRD r0,[lr] ;Zero L3PT LDR lr,[r10,r4,LSL #2] ;Get page number | MOV r0,#0 LDR lr,[r10,r4,LSL #2] ;Get page number STR r0,[r11,r4,LSL #2] ;Zero L2PT ] BIC r8,r8,r9 ASSERT CAM_LogAddr=0 STR r12,[r3,lr,LSL #CAM_EntrySizeLog2] ;Update CAM CMP r6,#0 BGE %FT53 ; Do TLB maintenance MOV r1,r4,LSL #Log2PageSize ADD r0,r1,#ApplicationStart ARMop MMU_ChangingUncachedEntry,,,r2 ;flush TLB 53 54 ; Exit if we've just processed the last page ADD r4,r4,#1 CMP r4,r5 BEQ %FT55 ; Advance to next bit of current word MOVS r9,r9,LSL #1 BCC %BT52 ; Finished the word, go back to per-word loop if necessary STR r8,[r7] ;Writeback new value LDR r8,[r7,#4]! MOV r9,#1 CMP r8,#0 BEQ %BT50 ;Next word empty, skip it B %BT52 ;Not empty, process it 55 ; Writeback last bitmap word STR r8,[r7] 59 ; Reached end of region ; Do global maintenance if required CMP r6,#0 BLT %FT95 ARMop MMU_ChangingUncached,,,r2 95 [ AMB_Debug DebugTX "<MapOut" FRAMLDR r10 LDR r11,[r10,#DANode_Size] DebugReg r11, "new log size " ] [ PMPParanoid BL ValidatePMPs ] MOV r7,#0 EXIT ] ptcounter SETA ptcounter + 1 WEND pt SETS "" ] [ AMB_LazyMapIn ; ---------------------------------------------------------------------------------- ; ;AMB_LazyFixUp ; ; *Only* for ARMs where the abort handler can restart instructions ; ; Routine to be used in abort handlers (in abort32 mode), that checks to see if abort ; is expected, and fixes things up if so, ready to restart instruction. ; ; Fix up consists of mapping in affected page, and updating AMBMappedInRegister. This ; may seem like a lot of work, but remember that the L2PT and CAM updates for each page are ; needed anyway in non-lazy scheme, so there is really only a housekeeping overhead. ; ; There is no cache clean/flush consideration here, since the map is a map in from Nowhere. ; TLB flush consideration is left to main abort handler code - in fact there may not ; be a TLB flush consideration at all, if ARM TLB can be assumed not to cache an ; entry which is a translation fault, as seems rational. ; ; entry: r0 = aborting address (data address for data abort, instruction address ; for prefetch abort), r1-r7 trashable ; r2 = 1 for prefetch abort, 0 for data abort ; r6 = FSR (iff data abort) ; exit: r0 = non-zero (NE status) if abort was expected and fixed up, zero (EQ status) if not ; FAR,FSR,SPSR_abt,lr_abt preserved ; AMB_LazyFixUp ROUT MOV r7,r12 LDR r12,=ZeroPage+AMBControl_ws LDR r12,[r12] CMP r12,#0 BEQ %FT90 ;not initialised! LDR r1,AMBFlags TST r1,#AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend BNE %FT90 ;not active LDR r1,AMBMappedInNode CMP r1,#0 BEQ %FT90 ;no current node TEQ r2,#1 ;if data abort ANDNE r3,r6,#&F TEQNE r3,#7 ; and not a page translation fault BNE %FT90 ; then not a lazy abort (and FAR may be invalid anyway) LDR r2,[r1,#AMBNode_DANode+DANode_PMPSize] SUBS r0,r0,#ApplicationStart BLO %FT90 ;abort not in current app space MOV r0,r0,LSR #Log2PageSize ;address now in terms of pages from ApplicationStart CMP r0,r2 BHS %FT90 ;abort not in current app space [ AMB_Debug Push "lr" DebugReg r0, "Lazy " Pull "lr" ] ; ; check/update the MappedIn bitmap ; ADR r2,AMBMappedInRegister MOV r5,#1 ADD r2,r2,r0,LSR #5-2 BIC r2,r2,#3 ;r2 -> bitmap word affected AND r3,r0,#31 MOV r5,r5,LSL r3 ;mask for bit affected in bitmap word LDR r3,[r2] LDR r4,[r1,#AMBNode_DANode+DANode_Size] ;count it TST r3,r5 ;if page already mapped in, not a lazy abort BNE %FT90 ORR r3,r3,r5 ;ok, mark that we are going to map this page in STR r3,[r2] ADD r4,r4,#PageSize STR r4,[r1,#AMBNode_DANode+DANode_Size] ; Update sparse HWM MOV r3,r0,LSL #Log2PageSize LDR r4,[r1,#AMBNode_DANode+DANode_SparseHWM] ADD r3,r3,#ApplicationStart+PageSize CMP r3,r4 STRHI r3,[r1,#AMBNode_DANode+DANode_SparseHWM] ; ; now map in the the page that went pop ; LDR r1,[r1,#AMBNode_DANode+DANode_PMP] LDR r2,=ZeroPage+PhysRamTable LDR r3,[r1,r0,LSL #2] ;r3 = page involved MOV r6,r3 10 LDMIA r2!,{r4,r5} MOV r5,r5,LSR #12 CMP r6,r5 SUBHS r6,r6,r5 BHS %BT10 ADD r4,r4,r6 MOV r4,r4,ROR #20 ;High address packed into low bits for LongDesc MOV r1,#DynAreaFlags_PMP ;here, r0 = page index into appslot, r1 = PPL, r3 = page number of page involved ADD r0,r0,#ApplicationStart:SHR:Log2PageSize ;address now in terms of pages from 0 [ LongDesc :LAND: ShortDesc PTWhich r5 BNE %FT50 ] [ ShortDesc GetPTE r4,4K,r4,r1,ShortDesc LDR r5,=L2PT STR r4,[r5,r0,LSL #2] ;update L2PT [ LongDesc B %FT60 50 ] ] [ LongDesc GetPTE r4,4K,r4,r1,LongDesc LDR r12,=LL3PT ADD r12,r12,r0,LSL #3 STRD r4,[r12] ;update L3PT 60 ] ; LDR r5,=ZeroPage LDR r5,[r5,#CamEntriesPointer] ADD r5,r5,r3,LSL #CAM_EntrySizeLog2 ;r5 -> CAM entry affected MOVS r0,r0,LSL #Log2PageSize ;address is now ordinary again, and must be non-zero LDR r12,[r5,#CAM_PageFlags] AND r12,r12,#StickyPageFlags ORR r1,r1,r12 ASSERT CAM_LogAddr=0 ASSERT CAM_PageFlags=4 STMIA r5,{r0,r1} ;update CAM entry MOV r12,r7 MOV pc,lr ;r0 is non-zero, NE status ; ; not our abort ; 90 MOVS r0,#0 MOV r12,r7 MOV pc,lr ;r0 is zero, EQ status ; ---------------------------------------------------------------------------------- ; ;AMB_MakeFullyHonest ; ; *Only* for ARMs where the abort handler can restart instructions ; ; Routine to be used in abort handlers (in abort32 mode), to make sure that application space ; is fully mapped in before passing control to a handler that can't cope with recursive aborts, ; e.g. when passing to the data or prefetch abort environment handlers (which are entered in ; abort32, and may be running from application space). ; ; Fix up consists of triggering recursive aborts in order to map in each missing page. ; ; entry: r0-r3, r12 trashable ; FSR valid for data aborts, unpredictable for prefetch aborts ; exit: r0-r3, r12 corrupt ; DFAR,DFSR,SPSR_abt,lr_abt corrupted ; AMB_MakeFullyHonest ROUT LDR r12,=ZeroPage+AMBControl_ws LDR r12,[r12] CMP r12,#0 BEQ %FT90 ;not initialised! LDR r1,AMBFlags TST r1,#AMBFlag_LazyMapIn_disable :OR: AMBFlag_LazyMapIn_suspend BNE %FT90 ;not active LDR r1,AMBMappedInNode CMP r1,#0 BEQ %FT90 ;no current node MOV r1,#ApplicationStart ;good old page walk to provoke lazy fixups LDR r2,AMBMappedInNode LDR r2,[r2,#AMBNode_DANode+DANode_PMPSize] CMP r2,#0 BEQ %FT90 MOV r0,lr ;preserve lr_abort so we can return properly (!) 30 LDR r3,[r1] ;bring that page in by the magic of aborts SUBS r2,r2,#1 ADD r1,r1,#PageSize BNE %BT30 MOV lr,r0 ;restore return address [ MEMM_Type = "VMSAv6" myISB ,r0 ; Not sure if this is necessary or not; do it just in case ] ; 90 MOV pc,lr ] ;AMB_LazyMapIn ; ---------------------------------------------------------------------------------- [ AMB_LazyMapIn ; ; If page of given logical address (r0) is in current app space, make sure page is ; 'honest' ie. properly mapped in. This is for things like FindMemMapEntries ; that must return sensible info (and presumably their client needs a consistent ; view of app space mapping, so that laziness is transparent) ; AMB_MakeHonestLA ROUT CMP r0,#AplWorkMaxSize ;quick dismiss if definitely not app address MOVHS pc,lr Push "r1,r12,lr" LDR r12,=ZeroPage+AMBControl_ws LDR r12,[r12] CMP r12,#0 BEQ %FT90 ;we're dormant! SUBS r14,r0,#ApplicationStart BMI %FT90 ;below app space MOV r14,r14,LSR #Log2PageSize ;pages from ApplicationStart LDR r1,AMBMappedInNode CMP r1,#0 BEQ %FT90 ;no node mapped in LDR r1,[r1,#AMBNode_DANode+DANode_PMPSize] CMP r1,r14 ;HI if log addr is in current app space LDRHIB r1, [r0,#0] ;make honest if necessary (magic of abort fixups!) 90 Pull "r1,r12,pc" ; similar to AMB_MakeHonestLA, but for page of given page number (r0) ; AMB_MakeHonestPN ROUT Push "r1-r3,r12,lr" LDR r14,=ZeroPage LDR r12,[r14,#AMBControl_ws] CMP r12,#0 BEQ %FT90 ;we're dormant! LDR r1,[r14,#MaxCamEntry] CMP r0,r1 BHI %FT90 ;invalid page number LDR r1,[r14,#CamEntriesPointer] ADD r1,r1,r0,LSL #CAM_EntrySizeLog2 ASSERT CAM_LogAddr = 0 ASSERT CAM_PageFlags = 4 ASSERT CAM_PMP = 8 ASSERT CAM_PMPIndex = 12 LDMIA r1,{r1-r3,r14} ASSERT CAM_LogAddr = 0 TST r2,#DynAreaFlags_PMP ;can't be one of ours if not owned by a PMP BEQ %FT90 LDR r2,=Nowhere TEQ r1,r2 BNE %FT90 ;only a page at Nowhere might be dishonest LDR r1,AMBMappedInNode ;let's check the current node ADD r1,r1,#AMBNode_DANode CMP r3,r1 BNE %FT90 ;doesn't belong to current node, or there is no current node ; r14 is the index within the PMP, and the flat logical<->physical indexing scheme used by AMB means it will also be the logical index into application space MOV r2,#ApplicationStart LDRB r2,[r2,r14,LSL #Log2PageSize] ;make honest if necessary (magic of abort fixups!) 90 Pull "r1-r3,r12,pc" ] ;AMB_LazyMapIn ; ---------------------------------------------------------------------------------- ; ;update CAM entry for page number in $reg ; ;entry: r11 -> CAM, r9 = logical addr of page, lr = PPL of page ;exit: $reg = addr of CAM entry ; MACRO UpdateCAM $reg,$temp ADD $reg,r11,$reg,LSL #CAM_EntrySizeLog2 ;r0 -> CAM entry for 1st page LDR $temp,[$reg,#CAM_PageFlags] AND $temp,$temp,#StickyPageFlags ORR $temp,$temp,lr [ $temp > r9 ASSERT CAM_LogAddr=0 ASSERT CAM_PageFlags=4 STMIA $reg,{r9,$temp} ;store logical addr,PPL | STR r9,[$reg,#CAM_LogAddr] STR $temp,[$reg,#CAM_PageFlags] ] MEND ; ---------------------------------------------------------------------------------- ; ;AMB_movepagesin_CAM ; ;updates CAM, does not update L2PT ; ; entry: ; r3 = new logical address of 1st page ; r8 = number of pages ; r9 = PPL for CAM ; r10 -> page list ; AMB_movepagesin_CAM ROUT Entry "r0-r12" BIC lr,r9,#StickyPageFlags MOV r9,r3 LDR r11,=ZeroPage LDR r11,[r11,#CamEntriesPointer] ;r11 -> CAM CMP r8,#8 BLT %FT20 10 LDMIA r10!,{r0-r7} ;next 8 page numbers UpdateCAM r0,r12 ADD r9,r9,#PageSize ;next logical addr UpdateCAM r1,r12 ADD r9,r9,#PageSize UpdateCAM r2,r12 ADD r9,r9,#PageSize UpdateCAM r3,r12 ADD r9,r9,#PageSize UpdateCAM r4,r12 ADD r9,r9,#PageSize UpdateCAM r5,r12 ADD r9,r9,#PageSize UpdateCAM r6,r12 ADD r9,r9,#PageSize UpdateCAM r7,r12 ADD r9,r9,#PageSize SUB r8,r8,#8 CMP r8,#8 BGE %BT10 20 CMP r8,#0 EXIT EQ 30 LDR r0,[r10],#4 UpdateCAM r0,r12 ADD r9,r9,#PageSize SUBS r8,r8,#1 BNE %BT30 EXIT ; ---------------------------------------------------------------------------------- ; ;AMB_movepagesout_CAM ; ;updates CAM, does not update L2PT ; ; entry: ; r8 = number of pages ; r9 = PPL for CAM ; r10 -> page list ; AMB_movepagesout_CAM ROUT Entry "r0-r12" BIC lr,r9,#StickyPageFlags LDR r9,=DuffEntry LDR r11,=ZeroPage LDR r11,[r11,#CamEntriesPointer] ;r11 -> CAM CMP r8,#8 BLT %FT20 10 LDMIA r10!,{r0-r7} ;next 8 page numbers UpdateCAM r0,r12 UpdateCAM r1,r12 UpdateCAM r2,r12 UpdateCAM r3,r12 UpdateCAM r4,r12 UpdateCAM r5,r12 UpdateCAM r6,r12 UpdateCAM r7,r12 SUB r8,r8,#8 CMP r8,#8 BGE %BT10 20 CMP r8,#0 EXIT EQ 30 LDR r0,[r10],#4 UpdateCAM r0,r12 SUBS r8,r8,#1 BNE %BT30 EXIT LTORG END