1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
; Copyright 1996 Acorn Computers Ltd
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; > ARM600
; Convert given page flags to the equivalent temp uncacheable L2PT flags
; n.b. temp not used here but included for VMSAv6 compatibility
MACRO
GetTempUncache_ShortDesc $out, $pageflags, $pcbtrans, $temp
ASSERT $out <> $pageflags ; For consistency with VMSAv6 version
ASSERT $out <> $pcbtrans
[ "$temp" <> ""
ASSERT $out <> $temp ; For consistency with VMSAv6 version
ASSERT $temp <> $pcbtrans ; For consistency with VMSAv6 version
]
ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
AND $out, $pageflags, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
ORR $out, $out, #DynAreaFlags_NotCacheable ; treat as temp uncache
LDRB $out, [$pcbtrans, $out, LSR #4] ; convert to X, C and B bits for this CPU
MEND
TempUncache_L2PTMask * L2_X+L2_C+L2_B
; MMU interface file - ARM600 version
KEEP
; **************** CAM manipulation utility routines ***********************************
; **************************************************************************************
;
; BangCamUpdate - Update CAM, MMU for page move, coping with page currently mapped in
;
; mjs Oct 2000
; reworked to use generic ARM ops (vectored to appropriate routines during boot)
;
; First look in the CamEntries table to find the logical address L this physical page is
; currently allocated to. Then check in the Level 2 page tables to see if page L is currently
; at page R2. If it is, then map page L to be inaccessible, otherwise leave page L alone.
; Then map logical page R3 to physical page R2.
;
; in: r2 = physical page number
; r3 = logical address (2nd copy if doubly mapped area)
; r9 = offset from 1st to 2nd copy of doubly mapped area (either source or dest, but not both)
; r11 = PPL + CB bits
;
; out: r0, r1, r4, r6 corrupted
; r2, r3, r5, r7-r12 preserved
;
BangCamUpdate_ShortDesc ROUT
TST r11, #DynAreaFlags_DoublyMapped ; if moving page to doubly mapped area
SUBNE r3, r3, r9 ; then CAM soft copy holds ptr to 1st copy
LDR r1, =ZeroPage
LDR r1, [r1, #CamEntriesPointer]
ADD r1, r1, r2, LSL #CAM_EntrySizeLog2 ; point at cam entry (logaddr, PPL)
ASSERT CAM_LogAddr=0
ASSERT CAM_PageFlags=4
LDMIA r1, {r0, r6} ; r0 = current logaddress, r6 = current PPL
Push "r0, r6" ; save old logical address, PPL
BIC r4, r11, #PageFlags_Unsafe
BIC r4, r4, #StickyPageFlags
AND r6, r6, #StickyPageFlags
ORR r4, r4, r6
STMIA r1, {r3, r4} ; store new address, PPL
LDR r1, =ZeroPage+PhysRamTable ; go through phys RAM table
MOV r6, r2 ; make copy of r2 (since that must be preserved)
10
LDMIA r1!, {r0, r4} ; load next address, size
SUBS r6, r6, r4, LSR #12 ; subtract off that many pages
BCS %BT10 ; if more than that, go onto next bank
ADD r6, r6, r4, LSR #12 ; put back the ones which were too many
ADD r0, r0, r6 ; move on address by the number of pages left
LDR r6, [sp] ; reload old logical address
MOV r0, r0, LSL #12 ; convert from page units to bytes
; now we have r6 = old logical address, r2 = physical page number, r0 = physical address
TEQ r6, r3 ; TMD 19-Jan-94: if old logaddr = new logaddr, then
BEQ %FT20 ; don't remove page from where it is, to avoid window
; where page is nowhere.
LDR r1, =L2PT
ADD r6, r1, r6, LSR #10 ; r6 -> L2PT entry for old log.addr
MOV r4, r6, LSR #12 ; r4 = word offset into L2 for address r6
LDR r4, [r1, r4, LSL #2] ; r4 = L2PT entry for L2PT entry for old log.addr
TST r4, #3 ; if page not there
BEQ %FT20 ; then no point in trying to remove it
LDR r4, [r6] ; r4 = L2PT entry for old log.addr
MOV r4, r4, LSR #12 ; r4 = physical address for old log.addr
TEQ r4, r0, LSR #12 ; if equal to physical address of page being moved
BNE %FT20 ; if not there, then just put in new page
AND r4, r11, #PageFlags_Unsafe
Push "r0, r3, r11, r14" ; save phys.addr, new log.addr, new PPL, lr
ADD r3, sp, #4*4
LDMIA r3, {r3, r11} ; reload old logical address, old PPL
LDR r0, =DuffEntry ; Nothing to do if wasn't mapped in
ORR r11, r11, r4
TEQ r3, r0
MOV r0, #0 ; cause translation fault
BLNE BangL2PT ; map page out
Pull "r0, r3, r11, r14"
20
ADD sp, sp, #8 ; junk old logical address, PPL
LDR r4, =DuffEntry ; check for requests to map a page to nowhere
TEQ r4, r3 ; don't actually map anything to nowhere
MOVEQ pc, lr
GetPTE r0, 4K, r0, r11, ShortDesc
LDR r1, =L2PT ; point to level 2 page tables
;fall through to BangL2PT
;internal entry point for updating L2PT entry
;
; entry: r0 = new L2PT value, r1 -> L2PT, r3 = logical address (4k aligned), r11 = PPL
;
; exit: r0,r1,r4,r6 corrupted
;
BangL2PT ; internal entry point used only by BangCamUpdate
Push "lr"
MOV r6, r0
TST r11, #PageFlags_Unsafe
BNE BangL2PT_unsafe
;In order to safely map out a cacheable page and remove it from the
;cache, we need to perform the following process:
;* Make the page uncacheable
;* Flush TLB
;* Clean+invalidate cache
;* Write new mapping (r6)
;* Flush TLB
;For uncacheable pages we can just do the last two steps
;
TEQ r6, #0 ;EQ if mapping out
TSTEQ r11, #DynAreaFlags_NotCacheable ;EQ if also cacheable (overcautious for temp uncache+illegal PCB combos)
LDR r4, =ZeroPage
BNE %FT20
LDR lr, [r4, #MMU_PCBTrans]
GetTempUncache_ShortDesc r0, r11, lr
LDR lr, [r1, r3, LSR #10] ;get current L2PT entry
BIC lr, lr, #TempUncache_L2PTMask ;remove current attributes
ORR lr, lr, r0
STR lr, [r1, r3, LSR #10]! ;Make uncacheable
TST r11, #DynAreaFlags_DoublyMapped
BEQ %FT19
STR lr, [r1, r9, LSR #10] ;Update 2nd mapping too if required
ADD r0, r3, r9
ARMop MMU_ChangingEntry,,, r4
19
MOV r0, r3
ARMop MMU_ChangingEntry,,, r4
LDR r1, =L2PT
20 STR r6, [r1, r3, LSR #10]! ;update L2PT entry
TST r11, #DynAreaFlags_DoublyMapped
BEQ %FT21
STR r6, [r1, r9, LSR #10] ;Update 2nd mapping
MOV r0, r3
ARMop MMU_ChangingUncachedEntry,,, r4 ; TLB flush for 1st mapping
ADD r3, r3, r9 ;restore r3 back to 2nd copy
21
Pull "lr"
MOV r0, r3
ARMop MMU_ChangingUncachedEntry,,tailcall,r4
BangL2PT_unsafe
STR r6, [r1, r3, LSR #10]! ; update level 2 page table (and update pointer so we can use bank-to-bank offset
TST r11, #DynAreaFlags_DoublyMapped ; if area doubly mapped
STRNE r6, [r1, r9, LSR #10] ; then store entry for 2nd copy as well
ADDNE r3, r3, r9 ; and point logical address back at 2nd copy
Pull "pc"
[ ARM6support
PPLTransARM6
& (AP_Full * L2_APMult) + L2_SmallPage ; R any W any
& (AP_Read * L2_APMult) + L2_SmallPage ; R any W sup
& (AP_None * L2_APMult) + L2_SmallPage ; R sup W sup
& (AP_Read * L2_APMult) + L2_SmallPage ; R any W sup
PPLAccessARM6 ; EL1EL0
; RWXRWX
GenPPLAccess 2_111111
GenPPLAccess 2_111101
GenPPLAccess 2_111000
GenPPLAccess 2_111101
DCD -1
]
PPLTrans_ShortDesc
& (AP_Full * L2_APMult) + L2_SmallPage ; R any W any
& (AP_Read * L2_APMult) + L2_SmallPage ; R any W sup
& (AP_None * L2_APMult) + L2_SmallPage ; R sup W sup
& (AP_ROM * L2_APMult) + L2_SmallPage ; R any W none
PPLTransX_ShortDesc
& (AP_Full * L2X_APMult) + L2_ExtPage ; R any W any
& (AP_Read * L2X_APMult) + L2_ExtPage ; R any W sup
& (AP_None * L2X_APMult) + L2_ExtPage ; R sup W sup
& (AP_ROM * L2X_APMult) + L2_ExtPage ; R any W none
PPLAccess_ShortDesc ; EL1EL0
; RWXRWX
GenPPLAccess 2_111111
GenPPLAccess 2_111101
GenPPLAccess 2_111000
GenPPLAccess 2_101101
DCD -1
= 0 ; So PageShifts-1 is word aligned
PageShifts
= 12, 13, 0, 14 ; 1 2 3 4
= 0, 0, 0, 15 ; 5 6 7 8
ALIGN
LTORG
; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
;
; "ARM600"-specific OS_MMUControl code
;
; in: r0 = 0 (reason code 0, for modify control register)
; r1 = EOR mask
; r2 = AND mask
;
; new control = ((old control AND r2) EOR r1)
;
; out: r1 = old value
; r2 = new value
MMUControl_ModifyControl ROUT
Push "r0,r3,r4,r5"
CMP r1,#0
CMPEQ r2,#&FFFFFFFF
BEQ MMUC_modcon_readonly
LDR r3,=ZeroPage
LDRB r5,[r3, #ProcessorArch]
PHPSEI r4 ; disable IRQs while we modify soft copy (and possibly switch caches off/on)
CMP r5,#ARMv4
LDRLO lr, [r3, #MMUControlSoftCopy]
ARM_read_control lr,HS ; if ARMv4 or later, we can read control reg. - trust this more than soft copy
AND r2, r2, lr
EOR r2, r2, r1
MOV r1, lr
STR r2, [r3, #MMUControlSoftCopy]
BIC lr, r2, r1 ; lr = bits going from 0->1
TST lr, #MMUC_C ; if cache turning on then flush cache before we do it
BEQ %FT05
ARMop Cache_InvalidateAll,,,r3 ; D-cache turning on, I-cache invalidate is either necessary (both turning on) or a safe side-effect
B %FT10
05
TST lr, #MMUC_I
ARMop IMB_Full,NE,,r3 ; I-cache turning on, Cache_InvalidateAll could be unsafe
10
BIC lr, r1, r2 ; lr = bits going from 1->0
TST lr, #MMUC_C ; if cache turning off then clean data cache first
BEQ %FT15
ARMop Cache_CleanAll,,,r3
15
ARM_write_control r2
BIC lr, r1, r2 ; lr = bits going from 1->0
TST lr, #MMUC_C ; if cache turning off then flush cache afterwards
BEQ %FT17
LDR r3,=ZeroPage
ARMop Cache_InvalidateAll,,,r3 ; D-cache turned off, can safely invalidate I+D
B %FT20
17
TST lr, #MMUC_I
BEQ %FT20
LDR r3,=ZeroPage
ARMop IMB_Full,,,r3 ; Only I-cache which turned off, clean D-cache & invalidate I-cache
20
PLP r4 ; restore IRQ state
Pull "r0,r3,r4,r5,pc"
MMUC_modcon_readonly
LDR r3, =ZeroPage
LDRB r5, [r3, #ProcessorArch]
CMP r5, #ARMv4
LDRLO lr, [r3, #MMUControlSoftCopy]
ARM_read_control lr,HS ; if ARMv4 or later, we can read control reg. - trust this more than soft copy
STRHS lr, [r3, #MMUControlSoftCopy]
MOV r1, lr
MOV r2, lr
Pull "r0,r3,r4,r5,pc"
; If extended pages are supported:
; PPLTrans should contain L2X_AP + L2_ExtPage
; PCBTrans should contain L2_C+L2_B+L2_TEX (for an extended page)
; If extended pages aren't supported:
; PPLTrans should contain L2_AP + L2_SmallPage
; PCBTrans should contain L2_C+L2_B
; In:
; r0 = phys addr (aligned)
; r1 = page flags:
; DynAreaFlags_APBits
; DynAreaFlags_NotBufferable
; DynAreaFlags_NotCacheable
; DynAreaFlags_CPBits
; PageFlags_TempUncacheableBits
; r2 -> PPLTrans
; r3 -> PCBTrans
; Out:
; r0 = PTE for 4K page ("small page" or "extended page" depending on PPLTrans)
Get4KPTE_ShortDesc ROUT
Entry "r4"
AND lr, r1, #DynAreaFlags_APBits
LDR lr, [r2, lr, LSL #2]
; Insert AP bits, page type/size
ORR r0, r0, lr
; Insert CB+TEX bits
ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
TST r1, #PageFlags_TempUncacheableBits
AND r4, r1, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
AND lr, r1, #DynAreaFlags_CPBits
ORRNE r4, r4, #DynAreaFlags_NotCacheable ; if temp uncache, set NC bit, ignore P
ORREQ r4, r4, lr, LSR #10-4 ; else use NC, NB and P bits
LDRB r4, [r3, r4, LSR #4] ; convert to X, C and B bits for this CPU
ORR r0, r0, r4
EXIT
; In:
; As per Get4KPTE
; Out:
; r0 = PTE for 64K page ("large page")
Get64KPTE_ShortDesc ROUT
Entry "r4"
AND lr, r1, #DynAreaFlags_APBits
LDR lr, [r2, lr, LSL #2]
; Force to large page
ORR r0, r0, #L2_LargePage
; Insert AP bits
AND lr, lr, #L2X_AP ; If extended pages are supported, we need to expand L2X_AP to L2_AP
MOV r4, #L2_APMult/L2X_APMult
MLA r0, r4, lr, r0
50
; Insert CB+TEX bits
; Shared with Get1MPTE
ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
TST r1, #PageFlags_TempUncacheableBits
AND r4, r1, #DynAreaFlags_NotCacheable + DynAreaFlags_NotBufferable
AND lr, r1, #DynAreaFlags_CPBits
ORRNE r4, r4, #DynAreaFlags_NotCacheable ; if temp uncache, set NC bit, ignore P
ORREQ r4, r4, lr, LSR #10-4 ; else use NC, NB and P bits
LDRB r4, [r3, r4, LSR #4] ; convert to X, C and B bits for this CPU
; Move TEX field up
ORR r4, r4, r4, LSL #L2L_TEXShift-L2_TEXShift
BIC r4, r4, #L2_TEX :OR: ((L2_C+L2_B) :SHL: (L2L_TEXShift-L2_TEXShift))
ORR r0, r0, r4
EXIT
; In:
; As per Get4KPTE
; Out:
; r0 = PTE for 1M page ("section")
Get1MPTE_ShortDesc
ALTENTRY
AND lr, r1, #DynAreaFlags_APBits
[ ARM6support
; Set U bit if cacheable and not ROM access
; (Because ROM access isn't supported, it'll get mapped to AP_Read.
; Writes to ROM will presumably be ignored by the bus, but if we have
; U set it will update the cache, effectively giving people the power
; to temporarily overwrite ROM)
CMP lr, #2
TSTLS r1, #DynAreaFlags_NotCacheable
ORREQ r0, r0, #L1_U
]
LDR lr, [r2, lr, LSL #2]
; Force to section map
ORR r0, r0, #L1_Section
; Insert AP bits
ASSERT L1_AP = L2X_AP :SHL: 6
AND lr, lr, #L2X_AP
ORR r0, r0, lr, LSL #6
; Insert CB+TEX bits
ASSERT L1_C = L2_C
ASSERT L1_B = L2_B
ASSERT L1_TEXShift = L2L_TEXShift
B %BT50
; In:
; r0 = page-aligned logical addr
; Out:
; r0,r1 = phys addr
; r2 = page flags
; or -1 if fault
; r3 = entry size/alignment (bytes)
LoadAndDecodeL2Entry_ShortDesc ROUT
LDR r1, =L2PT
LDR r0, [r1, r0, LSR #10]
ANDS r3, r0, #3
MOVEQ r2, #-1
MOVEQ r3, #4096
MOVEQ pc, lr
Entry "r4-r6"
; Get AP bits in low bits
ASSERT L2X_APMult = 1:SHL:4
MOV r2, r0, LSR #4
; Remap TEX+CB so that they're in the same position as an extended page entry
ASSERT L2_LargePage < L2_SmallPage
ASSERT L2_SmallPage < L2_ExtPage
CMP r3, #L2_SmallPage
AND r4, r0, #L2_C+L2_B
ANDLT lr, r0, #L2L_TEX
ORRLT r4, r4, lr, LSR #L2L_TEXShift-L2_TEXShift
ANDGT lr, r0, #L2_TEX
ORRGT r4, r4, lr
; Align phys addr to page size and set up r3
MOV r0, r0, LSR #12
BICLT r0, r0, #15
MOV r0, r0, LSL #12
MOV r1, #0
MOVLT r3, #65536
MOVGE r3, #4096
20
; Common code shared with LoadAndDecodeL1Entry
; Only four PPL possibilities, so just directly decode it
; ARM access goes 0 => all R/O, 1 => user none, 2 => user R/O, 3 => user R/W
; PPL access goes 0 => user R/W, 1 => user R/O, 2 => user none, 3 => all R/0
; i.e. just invert the bits
AND r2, r2, #3
LDR r6, =ZeroPage
EOR r2, r2, #3
; Search through PCBTrans for a match on TEX+CB
; Funny order is used so that NCNB is preferred over other variants (since NCNB is common fallback)
LDR r6, [r6, #MMU_PCBTrans]
MOV lr, #3
30
LDRB r5, [r6, lr]
CMP r5, r4
BEQ %FT40
TST lr, #2_11
SUBNE lr, lr, #1 ; loop goes 3,2,1,0,7,6,5,4,...,31,30,29,28
ADDEQ lr, lr, #7
TEQ lr, #35
BNE %BT30 ; Give up if end of table reached
40
; Decode index back into page flags
; n.b. temp uncache is ignored (no way we can differentiate between real uncached)
ASSERT DynAreaFlags_CPBits = 7*XCB_P :SHL: 10
ASSERT DynAreaFlags_NotCacheable = XCB_NC :SHL: 4
ASSERT DynAreaFlags_NotBufferable = XCB_NB :SHL: 4
AND r4, lr, #XCB_NC+XCB_NB
AND lr, lr, #7*XCB_P
ORR r2, r2, r4, LSL #4
ORR r2, r2, lr, LSL #10
EXIT
; In:
; r0 = MB-aligned logical addr
; Out:
; r0,r1 = phys addr of start of section or L2PT entry
; r2 = page flags if 1MB page
; or -1 if fault
; or -2 if page table ptr
; r3 = entry size/alignment (bytes)
LoadAndDecodeL1Entry_ShortDesc
ALTENTRY
LDR r1, =L1PT
LDR r0, [r1, r0, LSR #20-2]
MOV r3, #1048576
AND r2, r0, #3
ASSERT L1_Fault < L1_Page
ASSERT L1_Page < L1_Section
CMP r2, #L1_Page
BGT %FT50
MOVLT r2, #-1
MOVEQ r2, #-2
MOVEQ r0, r0, LSR #10
MOVEQ r0, r0, LSL #10
MOVEQ r1, #0
EXIT
50
; Get AP bits in low bits
ASSERT L1_APMult = 1:SHL:10
MOV r2, r0, LSR #10
; Remap TEX+CB so that they're in the same position as an extended page entry
ASSERT L1_C = L2_C
ASSERT L1_B = L2_B
AND r4, r0, #L1_C+L1_B
AND lr, r0, #L1_TEX
ORR r4, r4, lr, LSR #L1_TEXShift-L2_TEXShift
; Align phys addr to page size and set up r3
MOV r0, r0, LSR #20
MOV r0, r0, LSL #20
MOV r1, #0
; Jump to common code to do AP decode + PCBTrans search
B %BT20
; In:
; r0 = phys addr (aligned)
; r1 -> ZeroPage
; Out:
; TTBR and any other related registers updated
; If MMU is currently on, it's assumed the mapping of ROM+stack will not be
; affected by this change
SetTTBR_ShortDesc ROUT
ARM_MMU_transbase r0
MOV pc, lr
[ CacheablePageTables
; Out: R0 = desired page flags for the page tables
GetPageFlagsForCacheablePageTables ROUT
; For ARMv5 and below the MMU can't read from the L1 cache, so the
; best we can do is a write-through cache policy
LDR r0, =AreaFlags_PageTablesAccess :OR: (CP_CB_Writethrough :SHL: DynAreaFlags_CPShift)
MOV pc, lr
]
GET s.ShortDesc
END