diff --git a/s/AMBControl/memmap b/s/AMBControl/memmap
index c9aa51a2c8c13062944b7226cbb27bf1158ffd2f..a2ba19be94f74ae8f94fbae620bd63a4de6f7705 100644
--- a/s/AMBControl/memmap
+++ b/s/AMBControl/memmap
@@ -356,50 +356,6 @@ AMB_SetMemMapEntries_MapOut_Lazy ROUT
 AMB_SetMemMapEntries_MapOut_Lazy * AMB_SetMemMapEntries_MapOut
   ]
 
-;
-; ----------------------------------------------------------------------------------
-;
-;convert page number in $pnum to L2PT entry (physical address+protection bits),
-;using cached PhysRamTable entries for speed
-;
-;entry: $ptable -> PhysRamTable, $pbits = protection bits
-;       $cache0, $cache1, $cache2 = PhysRamTable cache
-;exit:  $temp corrupted
-;       $cache0, $cache1, $cache2 updated
-;
-
-        MACRO
-        PageNumToL2PT $pnum,$ptable,$cache0,$cache1,$cache2,$pbits,$temp
-        SUB     $temp,$pnum,$cache0 ; no. pages into block
-        CMP     $temp,$cache2
-        BLHS    PageNumToL2PTCache_$ptable._$cache0._$cache1._$cache2._$temp
-        ADD     $pnum,$cache1,$temp,LSL #Log2PageSize ; physical address of page
-        ORR     $pnum,$pbits,$pnum ; munge in protection bits
-        MEND
-
-        MACRO
-        PageNumToL2PTInit $ptable,$cache0,$cache1,$cache2
-        ASSERT  $cache2 > $cache1
-        LDR     $ptable,=ZeroPage+PhysRamTable
-        MOV     $cache0,#0
-        LDMIA   $ptable,{$cache1,$cache2}
-        MOV     $cache2,$cache2,LSR #12
-        MEND
-
-PageNumToL2PTCache_r4_r5_r6_r7_r12 ROUT
-        Entry   "r4"
-        ADD     r12,r12,r5 ; Restore page number
-        MOV     r5,#0
-10
-        LDMIA   r4!,{r6,r7} ; Get PhysRamTable entry
-        MOV     r7,r7,LSR #12
-        CMP     r12,r7
-        SUBHS   r12,r12,r7
-        ADDHS   r5,r5,r7
-        BHS     %BT10
-        EXIT    ; r5-r7 = cache entry, r12 = offset into entry
-
-
   [ AMB_LazyMapIn
 
 ; ----------------------------------------------------------------------------------
@@ -621,54 +577,6 @@ AMB_MakeHonestPN  ROUT
 
   ] ;AMB_LazyMapIn
 
-; ----------------------------------------------------------------------------------
-;
-;AMB_movepagesin_L2PT
-;
-;updates L2PT for new logical page positions, does not update CAM
-;
-; entry:
-;       r3  =  new logical address of 1st page
-;       r8  =  number of pages
-;       r9  =  page flags
-;       r10 -> page list
-;
-AMB_movepagesin_L2PT ROUT
-        Entry   "r0-r12"
-
-        MOV     r0, #0
-        GetPTE  r11, 4K, r0, r9
-
-        PageNumToL2PTInit r4,r5,r6,r7
-
-        LDR     r9,=L2PT
-        ADD     r9,r9,r3,LSR #(Log2PageSize-2) ;r9 -> L2PT for 1st new logical page
-
-        CMP     r8,#4
-        BLT     %FT20
-10
-        LDMIA   r10!,{r0-r3}         ;next 4 page numbers
-        PageNumToL2PT r0,r4,r5,r6,r7,r11,r12
-        PageNumToL2PT r1,r4,r5,r6,r7,r11,r12
-        PageNumToL2PT r2,r4,r5,r6,r7,r11,r12
-        PageNumToL2PT r3,r4,r5,r6,r7,r11,r12
-        STMIA   r9!,{r0-r3}          ;write 4 L2PT entries
-        SUB     r8,r8,#4
-        CMP     r8,#4
-        BGE     %BT10
-20
-        CMP     r8,#0
-        BEQ     %FT35
-30
-        LDR     r0,[r10],#4
-        PageNumToL2PT r0,r4,r5,r6,r7,r11,r12
-        STR     r0,[r9],#4
-        SUBS    r8,r8,#1
-        BNE     %BT30
-35
-        PageTableSync
-        EXIT
-
 ; ----------------------------------------------------------------------------------
 ;
 ;update CAM entry for page number in $reg
@@ -791,111 +699,6 @@ AMB_movepagesout_CAM ROUT
         BNE     %BT30
         EXIT
 
-; ----------------------------------------------------------------------------------
-;
-;AMB_movecacheablepagesout_L2PT
-;
-;updates L2PT for old logical page positions, does not update CAM
-;
-; entry:
-;       r3  =  old page flags
-;       r4  =  old logical address of 1st page
-;       r8  =  number of pages
-;
-AMB_movecacheablepagesout_L2PT
-        Entry   "r0-r8"
-
-        ; Calculate L2PT flags needed to make the pages uncacheable
-        ; Assume all pages will have identical flags (or at least close enough)
-        LDR     lr,=ZeroPage
-        LDR     lr,[lr, #MMU_PCBTrans]
-        GetTempUncache r0, r3, lr, r1
-        LDR     r1, =TempUncache_L2PTMask
-
-        LDR     lr,=L2PT
-        ADD     lr,lr,r4,LSR #(Log2PageSize-2)    ;lr -> L2PT 1st entry
-
-        CMP     r8,#4
-        BLT     %FT20
-10
-        LDMIA   lr,{r2-r5}
-        BIC     r2,r2,r1
-        BIC     r3,r3,r1
-        BIC     r4,r4,r1
-        BIC     r5,r5,r1
-        ORR     r2,r2,r0
-        ORR     r3,r3,r0
-        ORR     r4,r4,r0
-        ORR     r5,r5,r0
-        STMIA   lr!,{r2-r5}
-        SUB     r8,r8,#4
-        CMP     r8,#4
-        BGE     %BT10
-20
-        CMP     r8,#0
-        BEQ     %FT35
-30
-        LDR     r2,[lr]
-        BIC     r2,r2,r1
-        ORR     r2,r2,r0
-        STR     r2,[lr],#4
-        SUBS    r8,r8,#1
-        BNE     %BT30
-35
-        FRAMLDR r0,,r4                           ;address of 1st page
-        FRAMLDR r1,,r8                           ;number of pages
-        LDR     r3,=ZeroPage
-        ARMop   MMU_ChangingEntries,,,r3
-        FRAMLDR r4
-        FRAMLDR r8
-        B       %FT55 ; -> moveuncacheablepagesout_L2PT (avoid pop+push of large stack frame)
-
-; ----------------------------------------------------------------------------------
-;
-;AMB_moveuncacheablepagesout_L2PT
-;
-;updates L2PT for old logical page positions, does not update CAM
-;
-; entry:
-;       r4  =  old logical address of 1st page
-;       r8  =  number of pages
-;
-AMB_moveuncacheablepagesout_L2PT
-        ALTENTRY
-55      ; Enter here from moveuncacheablepagesout
-        LDR     lr,=L2PT
-        ADD     lr,lr,r4,LSR #(Log2PageSize-2)    ;lr -> L2PT 1st entry
-
-        MOV     r0,#0                             ;0 means translation fault
-        MOV     r1,#0
-        MOV     r2,#0
-        MOV     r3,#0
-        MOV     r4,#0
-        MOV     r5,#0
-        MOV     r6,#0
-        MOV     r7,#0
-
-        CMP     r8,#8
-        BLT     %FT70
-60
-        STMIA   lr!,{r0-r7}                       ;blam! (8 entries)
-        SUB     r8,r8,#8
-        CMP     r8,#8
-        BGE     %BT60
-70
-        CMP     r8,#0
-        BEQ     %FT85
-80
-        STR     r0,[lr],#4
-        SUBS    r8,r8,#1
-        BNE     %BT80
-85
-        FRAMLDR r0,,r4                           ;address of 1st page
-        FRAMLDR r1,,r8                           ;number of pages
-        LDR     r3,=ZeroPage
-        ARMop   MMU_ChangingUncachedEntries,,,r3 ;no cache worries, hoorah
-        EXIT
-
 
         LTORG
 
diff --git a/s/ChangeDyn b/s/ChangeDyn
index 180884e11a8d5bfcc2c8f40b7a188bb4d93471b2..c45abaddf4dd05df091de4965b02664641b51d88 100644
--- a/s/ChangeDyn
+++ b/s/ChangeDyn
@@ -4240,171 +4240,6 @@ GetNextRange Entry "r7,r8"
         ADDNE   r0, r0, #8                              ; then advance fixed ptr
         EXIT
 
-;**************************************************************************
-;
-;       AllocateBackingLevel2 - Allocate L2 pages for an area
-;
-;       Internal routine called by DynArea_Create
-;
-; in:   r3 = base address (will be page aligned)
-;       r4 = area flags (NB if doubly mapped, then have to allocate for both halves)
-;       r5 = size (of each half in doubly mapped areas)
-;
-; out:  If successfully allocated pages, then
-;         All registers preserved
-;         V=0
-;       else
-;         r0 -> error
-;         V=1
-;       endif
-
-AllocateBackingLevel2 Entry "r0-r8,r11"
-        TST     r4, #DynAreaFlags_DoublyMapped          ; if doubly mapped
-        SUBNE   r3, r3, r5                              ; then area starts further back
-        MOVNE   r5, r5, LSL #1                          ; and is twice the size
-
-; NB no need to do sanity checks on addresses here, they've already been checked
-
-; now round address range to 4M boundaries
-
-        ADD     r5, r5, r3                              ; r5 -> end
-        MOV     r0, #1 :SHL: 22
-        SUB     r0, r0, #1
-        BIC     r8, r3, r0                              ; round start address down (+ save for later)
-        ADD     r5, r5, r0
-        BIC     r5, r5, r0                              ; but round end address up
-
-; first go through existing L2PT working out how much we need
-
-        LDR     r7, =L2PT
-        ADD     r3, r7, r8, LSR #10                     ; r3 -> start of L2PT for area
-        ADD     r5, r7, r5, LSR #10                     ; r5 -> end of L2PT for area +1
-
-        ADD     r1, r7, r3, LSR #10                     ; r1 -> L2PT for r3
-        ADD     r2, r7, r5, LSR #10                     ; r2 -> L2PT for r5
-
-        TEQ     r1, r2                                  ; if no pages needed
-        BEQ     %FT30
-
-        MOV     r4, #0                                  ; number of backing pages needed
-10
-        LDR     r6, [r1], #4                            ; get L2PT entry for L2PT
-        TST     r6, #3                                  ; EQ if translation fault
-        ADDEQ   r4, r4, #1                              ; if not there then 1 more page needed
-        TEQ     r1, r2
-        BNE     %BT10
-
-; if no pages needed, then exit
-
-        TEQ     r4, #0
-        BEQ     %FT30
-
-; now we need to claim r4 pages from the free pool, if possible; return error if not
-
-        LDR     r1, =ZeroPage
-        LDR     r6, [r1, #FreePoolDANode + DANode_PMPSize]
-        SUBS    r6, r6, r4                              ; reduce free pool size by that many pages
-        BCS     %FT14                                   ; if enough, skip next bit
-
-; not enough pages in free pool currently, so try to grow it by the required amount
-
-        Push    "r0, r1"
-        MOV     r0, #ChangeDyn_FreePool
-        RSB     r1, r6, #0                              ; size change we want (+ve)
-        MOV     r1, r1, LSL #12
-        SWI     XOS_ChangeDynamicArea
-        Pull    "r0, r1"
-        BVS     %FT90                                   ; didn't manage change, so report error
-
-        MOV     r6, #0                                  ; will be no pages left in free pool after this
-14
-        STR     r6, [r1, #FreePoolDANode + DANode_PMPSize] ; if possible then update size
-
-        LDR     r0, [r1, #FreePoolDANode + DANode_PMP]  ; r0 -> free pool page list
-        ADD     r0, r0, r6, LSL #2                      ; r0 -> first page we're taking out of free pool
-
-        LDR     lr, =L1PT
-        ADD     r8, lr, r8, LSR #18                     ; point r8 at start of L1 we may be updating
-        ADD     r1, r7, r3, LSR #10                     ; point r1 at L2PT for r3 again
-        LDR     r11, =ZeroPage
-        LDR     r11, [r11, #PageTable_PageFlags]        ; access privs (+CB bits)
-20
-        LDR     r6, [r1], #4                            ; get L2PT entry again
-        TST     r6, #3                                  ; if no fault
-        BNE     %FT25                                   ; then skip
-
-        Push    "r1-r2, r4"
-        MOV     lr, #-1
-        LDR     r2, [r0]                                ; get page number to use
-        STR     lr, [r0], #4                            ; remove from PMP
-        Push    "r0"
-        BL      BangCamUpdate                           ; Map in to L2PT access window
-
-; now that the page is mapped in we can zero its contents (=> cause translation fault for area initially)
-; L1PT won't know about the page yet, so mapping it in with garbage initially shouldn't cause any issues
-
-        ADD     r0, r3, #4096
-        MOV     r1, #0
-        MOV     r2, #0
-        MOV     r4, #0
-        MOV     r6, #0
-15
-        STMDB   r0!, {r1,r2,r4,r6}                      ; store data
-        TEQ     r0, r3
-        BNE     %BT15
-
-        ; Make sure the page is seen to be clear before we update L1PT to make
-        ; it visible to the MMU
-        PageTableSync
-
-        Pull    "r0-r2, r4"
-
-        LDR     lr, =ZeroPage
-        LDR     r6, [lr, #L2PTUsed]
-        ADD     r6, r6, #4096
-        STR     r6, [lr, #L2PTUsed]
-
-; now update 4 words in L1PT (corresponding to 4M of address space which is covered by the 4K of L2)
-; and point them at the physical page we've just allocated (r1!-4 will already hold physical address+bits now!)
-
-        LDR     r6, [r1, #-4]                           ; r6 = physical address for L2 page + other L2 bits
-        MOV     r6, r6, LSR #12                         ; r6 = phys.addr >> 12
- [ MEMM_Type = "VMSAv6"
-        LDR     lr, =L1_Page
- |
-        LDR     lr, =L1_Page + L1_U                     ; form other bits to put in L1
- ]
-        ORR     lr, lr, r6, LSL #12                     ; complete L1 entry
-        STR     lr, [r8, #0]                            ; store entry for 1st MB
-        ADD     lr, lr, #1024                           ; advance L2 pointer
-        STR     lr, [r8, #4]                            ; store entry for 2nd MB
-        ADD     lr, lr, #1024                           ; advance L2 pointer
-        STR     lr, [r8, #8]                            ; store entry for 3rd MB
-        ADD     lr, lr, #1024                           ; advance L2 pointer
-        STR     lr, [r8, #12]                           ; store entry for 4th MB
-25
-        ADD     r3, r3, #4096                           ; advance L2PT logical address
-        ADD     r8, r8, #16                             ; move onto L1 for next 4M
-
-        TEQ     r1, r2
-        BNE     %BT20
-        PageTableSync
-30
-        CLRV
-        EXIT
-
-; Come here if not enough space in free pool to allocate level2
-
-90
-        ADRL    r0, ErrorBlock_CantAllocateLevel2
-  [ International
-        BL      TranslateError
-  |
-        SETV
-  ]
-        STR     r0, [sp]
-        EXIT
-
 ;**************************************************************************
 
   [ ChocolateSysHeap
@@ -6178,7 +6013,7 @@ ReplacePage_BothUnmapped
 
         ; Now map in dest
         MOV     r0, #L1_B
-        LDR     r1, [r8, #PageBlockSize+8]      ; r1 = physical address of src for copy
+        LDR     r1, [r8, #PageBlockSize+8]      ; r1 = physical address of dest for copy
         MOV     r2, #0
         MOV     r3, #0                          ; no oldp needed
         BL      RISCOS_AccessPhysicalAddress
@@ -6240,35 +6075,9 @@ ReplacePage_Done
 
 ; now check if page we're replacing is in L2PT, and if so then adjust L1PT entries (4 of these)
 
-        LDR     r2, =L2PT
-        LDR     r6, [r8, #4]                    ; look at logical address of page being replaced
-        SUBS    r6, r6, r2
-        BCC     %FT84                           ; address is below L2PT
-        CMP     r6, #4*1024*1024
-        BCS     %FT84                           ; address is above L2PT
-
-        LDR     r2, =L1PT
-        ADD     r2, r2, r6, LSR #(12-4)         ; address in L1 of 4 consecutive words to update
-        LDR     r3, [r2]                        ; load 1st word, to get AP etc bits
-        MOV     r3, r3, LSL #(31-9)             ; junk other bits
-        LDR     r4, [r8, #PageBlockSize+8]      ; load new physical address for page
-        ORR     r3, r4, r3, LSR #(31-9)         ; and merge with AP etc bits
-        STR     r3, [r2], #4
-        ADD     r3, r3, #&400
-        STR     r3, [r2], #4
-        ADD     r3, r3, #&400
-        STR     r3, [r2], #4
-        ADD     r3, r3, #&400
-        STR     r3, [r2], #4
-      [ MEMM_Type = "VMSAv6"
-        ; In order to guarantee that the result of a page table write is
-        ; visible, the ARMv6+ memory order model requires us to perform TLB
-        ; maintenance (equivalent to the MMU_ChangingUncached ARMop) after we've
-        ; performed the write. Performing the maintenance beforehand (as we've
-        ; done traditionally) will work most of the time, but not always.
-        LDR     r3, =ZeroPage
-        ARMop   MMU_ChangingUncached,,,r3
-      ]
+        LDR     r0, [r8, #4]                    ; look at logical address of page being replaced
+        LDR     r1, [r8, #PageBlockSize+8]      ; load new physical address for page
+        BL      UpdateL1PTForPageReplacement
 
 84
         Pull    "r0-r4,r7-r12"                  ; restore registers
diff --git a/s/GetAll b/s/GetAll
index aa3a767ab20a623419a2334019048025344706fa..97b74f5b9a44e3864367803609a486534416d5e0 100644
--- a/s/GetAll
+++ b/s/GetAll
@@ -98,6 +98,7 @@
         $GetUnsqueeze
         GET     s.ArthurSWIs
         $GetKernelMEMC
+        GET     s.ShortDesc
         GET     s.Exceptions
         GET     s.ChangeDyn
         GET     s.HAL
diff --git a/s/MemInfo b/s/MemInfo
index 7b80cd59f41b7a6b53e0284ba079721e89b06660..a2de7965663219479159f7dbbcc3d2df35feabcf 100644
--- a/s/MemInfo
+++ b/s/MemInfo
@@ -398,50 +398,6 @@ ppn_to_logical
         CLC
         MOV     pc, lr
 
-
-;----------------------------------------------------------------------------------------
-; logical_to_physical
-;
-;       In:     r4 = logical address
-;
-;       Out:    r5 corrupt
-;               CC => r8,r9 = physical address
-;               CS => invalid logical address, r8,r9 corrupted
-;
-;       Convert logical address to physical address.
-;
-logical_to_physical
-        LDR     r5, =L2PT
-        MOV     r9, r4, LSR #12         ; r9 = logical page number
-        ADD     r9, r5, r9, LSL #2      ; r9 -> L2PT entry for logical address
-        MOV     r8, r9, LSR #12         ; r8 = page offset to L2PT entry for logical address
-        LDR     r8, [r5, r8, LSL #2]    ; r8 = L2PT entry for L2PT entry for logical address
-      [ MEMM_Type = "ARM600"
-        ASSERT  ((L2_SmallPage :OR: L2_ExtPage) :AND: 2) <> 0
-        ASSERT  (L2_LargePage :AND: 2) = 0
-      |
-        ASSERT  L2_SmallPage = 2
-        ASSERT  L2_XN = 1               ; Because XN is bit 0, bit 1 is the only bit we can check when looking for small pages
-      ]
-        TST     r8, #2                  ; Check for valid (4K) page.
-        BEQ     meminfo_returncs
-
-        LDR     r8, [r9]                ; r8 = L2PT entry for logical address
-        TST     r8, #2                  ; Check for valid (4K) page.
-        BEQ     meminfo_returncs
-
-      [ NoARMT2
-        LDR     r9, =&FFF               ; Valid so
-        BIC     r8, r8, r9              ;   mask off bits 0-11,
-        AND     r9, r4, r9              ;   get page offset from logical page
-        ORR     r8, r8, r9              ;   combine with physical page address.
-      |
-        BFI     r8, r4, #0, #12         ; Valid, so apply offset within the page
-      ]
-        MOV     r9, #0                  ; 4K pages are always in the low 4GB
-        CLC
-        MOV     pc, lr
-
 meminfo_returncs_pullr8
         Pull    "r8"
 meminfo_returncs
diff --git a/s/MemMap2 b/s/MemMap2
index a753c3f4ef1a5ca282678e8c51cad9d6b5e58d21..19424f5cbdc1e67bd85c1a8451b6ccae3e734f55 100644
--- a/s/MemMap2
+++ b/s/MemMap2
@@ -500,61 +500,6 @@ DoTheShrink ROUT
         EXIT
 
  [ CacheablePageTables
-MakePageTablesCacheable ROUT
-        Entry   "r0,r4-r5,r8-r9"
-        BL      GetPageFlagsForCacheablePageTables
-        ; Update PageTable_PageFlags
-        LDR     r1, =ZeroPage
-        STR     r0, [r1, #PageTable_PageFlags]
-        ; Adjust the logical mapping of the page tables to use the specified page flags
-        LDR     r1, =L1PT
-        LDR     r2, =16*1024
-        BL      AdjustMemoryPageFlags
-        LDR     r1, =L2PT
-        LDR     r2, =4*1024*1024
-        BL      AdjustMemoryPageFlags
-        ; Update the TTBR
-        LDR     r4, =L1PT
-        BL      logical_to_physical
-        MOV     r0, r8 ; Assume only 32bit address
-        LDR     r1, =ZeroPage
-        BL      SetTTBR
-        ; Perform a full TLB flush to make sure the new mappings are visible
-        ARMop   TLB_InvalidateAll,,,r1
-        EXIT
-
-MakePageTablesNonCacheable ROUT
-        Entry   "r0-r1,r4-r5,r8-r9"
-        ; Flush the page tables from the cache, so that when we update the TTBR
-        ; below we can be sure that the MMU will be seeing the current page
-        ; tables
-        LDR     r0, =L1PT
-        ADD     r1, r0, #16*1024
-        LDR     r4, =ZeroPage
-        ARMop   Cache_CleanRange,,,r4
-        LDR     r0, =L2PT
-        ADD     r1, r0, #4*1024*1024
-        ARMop   Cache_CleanRange,,,r4
-        ; Update the TTBR so the MMU performs non-cacheable accesses
-        LDR     r0, =AreaFlags_PageTablesAccess :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable
-        STR     r0, [r4, #PageTable_PageFlags]
-        LDR     r4, =L1PT
-        BL      logical_to_physical
-        MOV     r0, r8 ; Assume only 32bit address
-        LDR     r1, =ZeroPage
-        BL      SetTTBR
-        ; Perform a full TLB flush just in case
-        ARMop   TLB_InvalidateAll,,,r1
-        ; Now we can adjust the logical mapping of the page tables to be non-cacheable
-        LDR     r0, [r1, #PageTable_PageFlags]
-        LDR     r1, =L1PT
-        LDR     r2, =16*1024
-        BL      AdjustMemoryPageFlags
-        LDR     r1, =L2PT
-        LDR     r2, =4*1024*1024
-        BL      AdjustMemoryPageFlags
-        EXIT
-
 ; In:
 ; R0 = new page flags
 ; R1 = base of area (page aligned)
diff --git a/s/ShortDesc b/s/ShortDesc
new file mode 100644
index 0000000000000000000000000000000000000000..a52656365b58d89193b75a421700232bba460b77
--- /dev/null
+++ b/s/ShortDesc
@@ -0,0 +1,533 @@
+; Copyright 1996 Acorn Computers Ltd
+; Copyright 2016 Castle Technology Ltd
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;     http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+;
+
+; Page table interaction - "short descriptor" format (ARMv3+ 4 bytes per entry)
+
+;----------------------------------------------------------------------------------------
+; logical_to_physical
+;
+;       In:     r4 = logical address
+;
+;       Out:    r5 corrupt
+;               CC => r8,r9 = physical address
+;               CS => invalid logical address, r8,r9 corrupted
+;
+;       Convert logical address to physical address.
+;
+logical_to_physical
+        LDR     r5, =L2PT
+        MOV     r9, r4, LSR #12         ; r9 = logical page number
+        ADD     r9, r5, r9, LSL #2      ; r9 -> L2PT entry for logical address
+        MOV     r8, r9, LSR #12         ; r8 = page offset to L2PT entry for logical address
+        LDR     r8, [r5, r8, LSL #2]    ; r8 = L2PT entry for L2PT entry for logical address
+      [ MEMM_Type = "ARM600"
+        ASSERT  ((L2_SmallPage :OR: L2_ExtPage) :AND: 2) <> 0
+        ASSERT  (L2_LargePage :AND: 2) = 0
+      |
+        ASSERT  L2_SmallPage = 2
+        ASSERT  L2_XN = 1               ; Because XN is bit 0, bit 1 is the only bit we can check when looking for small pages
+      ]
+        TST     r8, #2                  ; Check for valid (4K) page.
+        BEQ     meminfo_returncs
+
+        LDR     r8, [r9]                ; r8 = L2PT entry for logical address
+        TST     r8, #2                  ; Check for valid (4K) page.
+        BEQ     meminfo_returncs
+
+      [ NoARMT2
+        LDR     r9, =&FFF               ; Valid so
+        BIC     r8, r8, r9              ;   mask off bits 0-11,
+        AND     r9, r4, r9              ;   get page offset from logical page
+        ORR     r8, r8, r9              ;   combine with physical page address.
+      |
+        BFI     r8, r4, #0, #12         ; Valid, so apply offset within the page
+      ]
+        MOV     r9, #0                  ; 4K pages are always in the low 4GB
+        CLC
+        MOV     pc, lr
+
+ [ CacheablePageTables
+MakePageTablesCacheable ROUT
+        Entry   "r0,r4-r5,r8-r9"
+        BL      GetPageFlagsForCacheablePageTables
+        ; Update PageTable_PageFlags
+        LDR     r1, =ZeroPage
+        STR     r0, [r1, #PageTable_PageFlags]
+        ; Adjust the logical mapping of the page tables to use the specified page flags
+        LDR     r1, =L1PT
+        LDR     r2, =16*1024
+        BL      AdjustMemoryPageFlags
+        LDR     r1, =L2PT
+        LDR     r2, =4*1024*1024
+        BL      AdjustMemoryPageFlags
+        ; Update the TTBR
+        LDR     r4, =L1PT
+        BL      logical_to_physical
+        MOV     r0, r8 ; Assume only 32bit address
+        LDR     r1, =ZeroPage
+        BL      SetTTBR
+        ; Perform a full TLB flush to make sure the new mappings are visible
+        ARMop   TLB_InvalidateAll,,,r1
+        EXIT
+
+MakePageTablesNonCacheable ROUT
+        Entry   "r0-r1,r4-r5,r8-r9"
+        ; Flush the page tables from the cache, so that when we update the TTBR
+        ; below we can be sure that the MMU will be seeing the current page
+        ; tables
+        LDR     r0, =L1PT
+        ADD     r1, r0, #16*1024
+        LDR     r4, =ZeroPage
+        ARMop   Cache_CleanRange,,,r4
+        LDR     r0, =L2PT
+        ADD     r1, r0, #4*1024*1024
+        ARMop   Cache_CleanRange,,,r4
+        ; Update the TTBR so the MMU performs non-cacheable accesses
+        LDR     r0, =AreaFlags_PageTablesAccess :OR: DynAreaFlags_NotCacheable :OR: DynAreaFlags_NotBufferable
+        STR     r0, [r4, #PageTable_PageFlags]
+        LDR     r4, =L1PT
+        BL      logical_to_physical
+        MOV     r0, r8 ; Assume only 32bit address
+        LDR     r1, =ZeroPage
+        BL      SetTTBR
+        ; Perform a full TLB flush just in case
+        ARMop   TLB_InvalidateAll,,,r1
+        ; Now we can adjust the logical mapping of the page tables to be non-cacheable
+        LDR     r0, [r1, #PageTable_PageFlags]
+        LDR     r1, =L1PT
+        LDR     r2, =16*1024
+        BL      AdjustMemoryPageFlags
+        LDR     r1, =L2PT
+        LDR     r2, =4*1024*1024
+        BL      AdjustMemoryPageFlags
+        EXIT
+ ]
+
+;**************************************************************************
+;
+;       AllocateBackingLevel2 - Allocate L2 pages for an area
+;
+;       Internal routine called by DynArea_Create
+;
+; in:   r3 = base address (will be page aligned)
+;       r4 = area flags (NB if doubly mapped, then have to allocate for both halves)
+;       r5 = size (of each half in doubly mapped areas)
+;
+; out:  If successfully allocated pages, then
+;         All registers preserved
+;         V=0
+;       else
+;         r0 -> error
+;         V=1
+;       endif
+
+AllocateBackingLevel2 Entry "r0-r8,r11"
+        TST     r4, #DynAreaFlags_DoublyMapped          ; if doubly mapped
+        SUBNE   r3, r3, r5                              ; then area starts further back
+        MOVNE   r5, r5, LSL #1                          ; and is twice the size
+
+; NB no need to do sanity checks on addresses here, they've already been checked
+
+; now round address range to 4M boundaries
+
+        ADD     r5, r5, r3                              ; r5 -> end
+        MOV     r0, #1 :SHL: 22
+        SUB     r0, r0, #1
+        BIC     r8, r3, r0                              ; round start address down (+ save for later)
+        ADD     r5, r5, r0
+        BIC     r5, r5, r0                              ; but round end address up
+
+; first go through existing L2PT working out how much we need
+
+        LDR     r7, =L2PT
+        ADD     r3, r7, r8, LSR #10                     ; r3 -> start of L2PT for area
+        ADD     r5, r7, r5, LSR #10                     ; r5 -> end of L2PT for area +1
+
+        ADD     r1, r7, r3, LSR #10                     ; r1 -> L2PT for r3
+        ADD     r2, r7, r5, LSR #10                     ; r2 -> L2PT for r5
+
+        TEQ     r1, r2                                  ; if no pages needed
+        BEQ     %FT30
+
+        MOV     r4, #0                                  ; number of backing pages needed
+10
+        LDR     r6, [r1], #4                            ; get L2PT entry for L2PT
+        TST     r6, #3                                  ; EQ if translation fault
+        ADDEQ   r4, r4, #1                              ; if not there then 1 more page needed
+        TEQ     r1, r2
+        BNE     %BT10
+
+; if no pages needed, then exit
+
+        TEQ     r4, #0
+        BEQ     %FT30
+
+; now we need to claim r4 pages from the free pool, if possible; return error if not
+
+        LDR     r1, =ZeroPage
+        LDR     r6, [r1, #FreePoolDANode + DANode_PMPSize]
+        SUBS    r6, r6, r4                              ; reduce free pool size by that many pages
+        BCS     %FT14                                   ; if enough, skip next bit
+
+; not enough pages in free pool currently, so try to grow it by the required amount
+
+        Push    "r0, r1"
+        MOV     r0, #ChangeDyn_FreePool
+        RSB     r1, r6, #0                              ; size change we want (+ve)
+        MOV     r1, r1, LSL #12
+        SWI     XOS_ChangeDynamicArea
+        Pull    "r0, r1"
+        BVS     %FT90                                   ; didn't manage change, so report error
+
+        MOV     r6, #0                                  ; will be no pages left in free pool after this
+14
+        STR     r6, [r1, #FreePoolDANode + DANode_PMPSize] ; if possible then update size
+
+        LDR     r0, [r1, #FreePoolDANode + DANode_PMP]  ; r0 -> free pool page list
+        ADD     r0, r0, r6, LSL #2                      ; r0 -> first page we're taking out of free pool
+
+        LDR     lr, =L1PT
+        ADD     r8, lr, r8, LSR #18                     ; point r8 at start of L1 we may be updating
+        ADD     r1, r7, r3, LSR #10                     ; point r1 at L2PT for r3 again
+        LDR     r11, =ZeroPage
+        LDR     r11, [r11, #PageTable_PageFlags]        ; access privs (+CB bits)
+20
+        LDR     r6, [r1], #4                            ; get L2PT entry again
+        TST     r6, #3                                  ; if no fault
+        BNE     %FT25                                   ; then skip
+
+        Push    "r1-r2, r4"
+        MOV     lr, #-1
+        LDR     r2, [r0]                                ; get page number to use
+        STR     lr, [r0], #4                            ; remove from PMP
+        Push    "r0"
+        BL      BangCamUpdate                           ; Map in to L2PT access window
+
+; now that the page is mapped in we can zero its contents (=> cause translation fault for area initially)
+; L1PT won't know about the page yet, so mapping it in with garbage initially shouldn't cause any issues
+
+        ADD     r0, r3, #4096
+        MOV     r1, #0
+        MOV     r2, #0
+        MOV     r4, #0
+        MOV     r6, #0
+15
+        STMDB   r0!, {r1,r2,r4,r6}                      ; store data
+        TEQ     r0, r3
+        BNE     %BT15
+
+        ; Make sure the page is seen to be clear before we update L1PT to make
+        ; it visible to the MMU
+        PageTableSync
+
+        Pull    "r0-r2, r4"
+
+        LDR     lr, =ZeroPage
+        LDR     r6, [lr, #L2PTUsed]
+        ADD     r6, r6, #4096
+        STR     r6, [lr, #L2PTUsed]
+
+; now update 4 words in L1PT (corresponding to 4M of address space which is covered by the 4K of L2)
+; and point them at the physical page we've just allocated (r1!-4 will already hold physical address+bits now!)
+
+        LDR     r6, [r1, #-4]                           ; r6 = physical address for L2 page + other L2 bits
+        MOV     r6, r6, LSR #12                         ; r6 = phys.addr >> 12
+ [ MEMM_Type = "VMSAv6"
+        LDR     lr, =L1_Page
+ |
+        LDR     lr, =L1_Page + L1_U                     ; form other bits to put in L1
+ ]
+        ORR     lr, lr, r6, LSL #12                     ; complete L1 entry
+        STR     lr, [r8, #0]                            ; store entry for 1st MB
+        ADD     lr, lr, #1024                           ; advance L2 pointer
+        STR     lr, [r8, #4]                            ; store entry for 2nd MB
+        ADD     lr, lr, #1024                           ; advance L2 pointer
+        STR     lr, [r8, #8]                            ; store entry for 3rd MB
+        ADD     lr, lr, #1024                           ; advance L2 pointer
+        STR     lr, [r8, #12]                           ; store entry for 4th MB
+25
+        ADD     r3, r3, #4096                           ; advance L2PT logical address
+        ADD     r8, r8, #16                             ; move onto L1 for next 4M
+
+        TEQ     r1, r2
+        BNE     %BT20
+        PageTableSync
+30
+        CLRV
+        EXIT
+
+; Come here if not enough space in free pool to allocate level2
+
+90
+        ADRL    r0, ErrorBlock_CantAllocateLevel2
+  [ International
+        BL      TranslateError
+  |
+        SETV
+  ]
+        STR     r0, [sp]
+        EXIT
+
+;**************************************************************************
+;
+;       UpdateL1PTForPageReplacement
+;
+; Updates L1PT to point to the right place, if a physical L2PT page has been
+; replaced with a substitute.
+;
+; In: r0 = log addr of page being replaced
+;     r1 = phys addr of replacement page
+;
+; Out: r0-r4, r7-r12 can be corrupted
+;
+UpdateL1PTForPageReplacement ROUT
+        LDR     r2, =L2PT
+        SUBS    r0, r0, r2
+        MOVCC   pc, lr                          ; address is below L2PT
+        CMP     r0, #4*1024*1024
+        MOVCS   pc, lr                          ; address is above L2PT
+
+        LDR     r2, =L1PT
+        ADD     r2, r2, r0, LSR #(12-4)         ; address in L1 of 4 consecutive words to update
+        LDR     r3, [r2]                        ; load 1st L1PT entry
+        MOV     r3, r3, LSL #(31-9)             ; junk old phys addr
+        ORR     r3, r1, r3, LSR #(31-9)         ; merge in new phys addr
+        STR     r3, [r2], #4
+        ADD     r3, r3, #&400                   ; advance by 1K for each L1PT word
+        STR     r3, [r2], #4
+        ADD     r3, r3, #&400
+        STR     r3, [r2], #4
+        ADD     r3, r3, #&400
+        STR     r3, [r2], #4
+    [ MEMM_Type = "VMSAv6"
+        ; In order to guarantee that the result of a page table write is
+        ; visible, the ARMv6+ memory order model requires us to perform TLB
+        ; maintenance (equivalent to the MMU_ChangingUncached ARMop) after we've
+        ; performed the write. Performing the maintenance beforehand (as we've
+        ; done traditionally) will work most of the time, but not always.
+        LDR     r3, =ZeroPage
+        ARMop   MMU_ChangingUncached,,tailcall,r3
+    |
+        ; With older architectures there shouldn't be any TLB maintenance
+        ; required. But we do potentially need to drain the write buffer to make
+        ; sure the CPU actually sees the changes.
+      [ CacheablePageTables
+        LDR     r3, =ZeroPage
+        ARMop   DSB_ReadWrite,,tailcall,r3        
+      |
+        MOV     pc, lr
+      ]
+    ]
+
+;
+; ----------------------------------------------------------------------------------
+;
+;convert page number in $pnum to L2PT entry (physical address+protection bits),
+;using cached PhysRamTable entries for speed
+;
+;entry: $ptable -> PhysRamTable, $pbits = protection bits
+;       $cache0, $cache1, $cache2 = PhysRamTable cache
+;exit:  $temp corrupted
+;       $cache0, $cache1, $cache2 updated
+;
+
+        MACRO
+        PageNumToL2PT $pnum,$ptable,$cache0,$cache1,$cache2,$pbits,$temp
+        SUB     $temp,$pnum,$cache0 ; no. pages into block
+        CMP     $temp,$cache2
+        BLHS    PageNumToL2PTCache_$ptable._$cache0._$cache1._$cache2._$temp
+        ADD     $pnum,$cache1,$temp,LSL #Log2PageSize ; physical address of page
+        ORR     $pnum,$pbits,$pnum ; munge in protection bits
+        MEND
+
+        MACRO
+        PageNumToL2PTInit $ptable,$cache0,$cache1,$cache2
+        ASSERT  $cache2 > $cache1
+        LDR     $ptable,=ZeroPage+PhysRamTable
+        MOV     $cache0,#0
+        LDMIA   $ptable,{$cache1,$cache2}
+        MOV     $cache2,$cache2,LSR #12
+        MEND
+
+PageNumToL2PTCache_r4_r5_r6_r7_r12 ROUT
+        Entry   "r4"
+        ADD     r12,r12,r5 ; Restore page number
+        MOV     r5,#0
+10
+        LDMIA   r4!,{r6,r7} ; Get PhysRamTable entry
+        MOV     r7,r7,LSR #12
+        CMP     r12,r7
+        SUBHS   r12,r12,r7
+        ADDHS   r5,r5,r7
+        BHS     %BT10
+        EXIT    ; r5-r7 = cache entry, r12 = offset into entry
+
+; ----------------------------------------------------------------------------------
+;
+;AMB_movepagesin_L2PT
+;
+;updates L2PT for new logical page positions, does not update CAM
+;
+; entry:
+;       r3  =  new logical address of 1st page
+;       r8  =  number of pages
+;       r9  =  page flags
+;       r10 -> page list
+;
+AMB_movepagesin_L2PT ROUT
+        Entry   "r0-r12"
+
+        MOV     r0, #0
+        GetPTE  r11, 4K, r0, r9
+
+        PageNumToL2PTInit r4,r5,r6,r7
+
+        LDR     r9,=L2PT
+        ADD     r9,r9,r3,LSR #(Log2PageSize-2) ;r9 -> L2PT for 1st new logical page
+
+        CMP     r8,#4
+        BLT     %FT20
+10
+        LDMIA   r10!,{r0-r3}         ;next 4 page numbers
+        PageNumToL2PT r0,r4,r5,r6,r7,r11,r12
+        PageNumToL2PT r1,r4,r5,r6,r7,r11,r12
+        PageNumToL2PT r2,r4,r5,r6,r7,r11,r12
+        PageNumToL2PT r3,r4,r5,r6,r7,r11,r12
+        STMIA   r9!,{r0-r3}          ;write 4 L2PT entries
+        SUB     r8,r8,#4
+        CMP     r8,#4
+        BGE     %BT10
+20
+        CMP     r8,#0
+        BEQ     %FT35
+30
+        LDR     r0,[r10],#4
+        PageNumToL2PT r0,r4,r5,r6,r7,r11,r12
+        STR     r0,[r9],#4
+        SUBS    r8,r8,#1
+        BNE     %BT30
+35
+        PageTableSync
+        EXIT
+
+; ----------------------------------------------------------------------------------
+;
+;AMB_movecacheablepagesout_L2PT
+;
+;updates L2PT for old logical page positions, does not update CAM
+;
+; entry:
+;       r3  =  old page flags
+;       r4  =  old logical address of 1st page
+;       r8  =  number of pages
+;
+AMB_movecacheablepagesout_L2PT
+        Entry   "r0-r8"
+
+        ; Calculate L2PT flags needed to make the pages uncacheable
+        ; Assume all pages will have identical flags (or at least close enough)
+        LDR     lr,=ZeroPage
+        LDR     lr,[lr, #MMU_PCBTrans]
+        GetTempUncache r0, r3, lr, r1
+        LDR     r1, =TempUncache_L2PTMask
+
+        LDR     lr,=L2PT
+        ADD     lr,lr,r4,LSR #(Log2PageSize-2)    ;lr -> L2PT 1st entry
+
+        CMP     r8,#4
+        BLT     %FT20
+10
+        LDMIA   lr,{r2-r5}
+        BIC     r2,r2,r1
+        BIC     r3,r3,r1
+        BIC     r4,r4,r1
+        BIC     r5,r5,r1
+        ORR     r2,r2,r0
+        ORR     r3,r3,r0
+        ORR     r4,r4,r0
+        ORR     r5,r5,r0
+        STMIA   lr!,{r2-r5}
+        SUB     r8,r8,#4
+        CMP     r8,#4
+        BGE     %BT10
+20
+        CMP     r8,#0
+        BEQ     %FT35
+30
+        LDR     r2,[lr]
+        BIC     r2,r2,r1
+        ORR     r2,r2,r0
+        STR     r2,[lr],#4
+        SUBS    r8,r8,#1
+        BNE     %BT30
+35
+        FRAMLDR r0,,r4                           ;address of 1st page
+        FRAMLDR r1,,r8                           ;number of pages
+        LDR     r3,=ZeroPage
+        ARMop   MMU_ChangingEntries,,,r3
+        FRAMLDR r4
+        FRAMLDR r8
+        B       %FT55 ; -> moveuncacheablepagesout_L2PT (avoid pop+push of large stack frame)
+
+; ----------------------------------------------------------------------------------
+;
+;AMB_moveuncacheablepagesout_L2PT
+;
+;updates L2PT for old logical page positions, does not update CAM
+;
+; entry:
+;       r4  =  old logical address of 1st page
+;       r8  =  number of pages
+;
+AMB_moveuncacheablepagesout_L2PT
+        ALTENTRY
+55      ; Enter here from moveuncacheablepagesout
+        LDR     lr,=L2PT
+        ADD     lr,lr,r4,LSR #(Log2PageSize-2)    ;lr -> L2PT 1st entry
+
+        MOV     r0,#0                             ;0 means translation fault
+        MOV     r1,#0
+        MOV     r2,#0
+        MOV     r3,#0
+        MOV     r4,#0
+        MOV     r5,#0
+        MOV     r6,#0
+        MOV     r7,#0
+
+        CMP     r8,#8
+        BLT     %FT70
+60
+        STMIA   lr!,{r0-r7}                       ;blam! (8 entries)
+        SUB     r8,r8,#8
+        CMP     r8,#8
+        BGE     %BT60
+70
+        CMP     r8,#0
+        BEQ     %FT85
+80
+        STR     r0,[lr],#4
+        SUBS    r8,r8,#1
+        BNE     %BT80
+85
+        FRAMLDR r0,,r4                           ;address of 1st page
+        FRAMLDR r1,,r8                           ;number of pages
+        LDR     r3,=ZeroPage
+        ARMop   MMU_ChangingUncachedEntries,,,r3 ;no cache worries, hoorah
+        EXIT
+
+        LTORG
+
+        END