root / lab4 / .minix-src / include / i386 / pmap.h @ 14
History | View | Annotate | Download (13.8 KB)
1 |
/* $NetBSD: pmap.h,v 1.117 2014/04/21 19:12:11 christos Exp $ */
|
---|---|
2 |
|
3 |
/*
|
4 |
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
5 |
* All rights reserved.
|
6 |
*
|
7 |
* Redistribution and use in source and binary forms, with or without
|
8 |
* modification, are permitted provided that the following conditions
|
9 |
* are met:
|
10 |
* 1. Redistributions of source code must retain the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer.
|
12 |
* 2. Redistributions in binary form must reproduce the above copyright
|
13 |
* notice, this list of conditions and the following disclaimer in the
|
14 |
* documentation and/or other materials provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
17 |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
18 |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
19 |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
20 |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
21 |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
22 |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
23 |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
25 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
/*
|
29 |
* Copyright (c) 2001 Wasabi Systems, Inc.
|
30 |
* All rights reserved.
|
31 |
*
|
32 |
* Written by Frank van der Linden for Wasabi Systems, Inc.
|
33 |
*
|
34 |
* Redistribution and use in source and binary forms, with or without
|
35 |
* modification, are permitted provided that the following conditions
|
36 |
* are met:
|
37 |
* 1. Redistributions of source code must retain the above copyright
|
38 |
* notice, this list of conditions and the following disclaimer.
|
39 |
* 2. Redistributions in binary form must reproduce the above copyright
|
40 |
* notice, this list of conditions and the following disclaimer in the
|
41 |
* documentation and/or other materials provided with the distribution.
|
42 |
* 3. All advertising materials mentioning features or use of this software
|
43 |
* must display the following acknowledgement:
|
44 |
* This product includes software developed for the NetBSD Project by
|
45 |
* Wasabi Systems, Inc.
|
46 |
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
|
47 |
* or promote products derived from this software without specific prior
|
48 |
* written permission.
|
49 |
*
|
50 |
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
|
51 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
52 |
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
53 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
|
54 |
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
55 |
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
56 |
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
57 |
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
58 |
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
59 |
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
60 |
* POSSIBILITY OF SUCH DAMAGE.
|
61 |
*/
|
62 |
|
63 |
#ifndef _I386_PMAP_H_
|
64 |
#define _I386_PMAP_H_
|
65 |
|
66 |
#if defined(_KERNEL_OPT)
|
67 |
#include "opt_user_ldt.h" |
68 |
#include "opt_xen.h" |
69 |
#endif
|
70 |
|
71 |
#include <sys/atomic.h> |
72 |
|
73 |
#include <i386/pte.h> |
74 |
#include <machine/segments.h> |
75 |
#if defined(_KERNEL)
|
76 |
#include <machine/cpufunc.h> |
77 |
#endif
|
78 |
|
79 |
#include <uvm/uvm_object.h> |
80 |
#ifdef XEN
|
81 |
#include <xen/xenfunc.h> |
82 |
#include <xen/xenpmap.h> |
83 |
#endif /* XEN */ |
84 |
|
85 |
/*
|
86 |
* see pte.h for a description of i386 MMU terminology and hardware
|
87 |
* interface.
|
88 |
*
|
89 |
* a pmap describes a processes' 4GB virtual address space. when PAE
|
90 |
* is not in use, this virtual address space can be broken up into 1024 4MB
|
91 |
* regions which are described by PDEs in the PDP. the PDEs are defined as
|
92 |
* follows:
|
93 |
*
|
94 |
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
|
95 |
* (the following assumes that KERNBASE is 0xc0000000)
|
96 |
*
|
97 |
* PDE#s VA range usage
|
98 |
* 0->766 0x0 -> 0xbfc00000 user address space
|
99 |
* 767 0xbfc00000-> recursive mapping of PDP (used for
|
100 |
* 0xc0000000 linear mapping of PTPs)
|
101 |
* 768->1023 0xc0000000-> kernel address space (constant
|
102 |
* 0xffc00000 across all pmap's/processes)
|
103 |
* <end>
|
104 |
*
|
105 |
*
|
106 |
* note: a recursive PDP mapping provides a way to map all the PTEs for
|
107 |
* a 4GB address space into a linear chunk of virtual memory. in other
|
108 |
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
|
109 |
* area. the PTE for page 1 is the second int. the very last int in the
|
110 |
* 4MB range is the PTE that maps VA 0xfffff000 (the last page in a 4GB
|
111 |
* address).
|
112 |
*
|
113 |
* all pmap's PD's must have the same values in slots 768->1023 so that
|
114 |
* the kernel is always mapped in every process. these values are loaded
|
115 |
* into the PD at pmap creation time.
|
116 |
*
|
117 |
* at any one time only one pmap can be active on a processor. this is
|
118 |
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
|
119 |
* will have all its PTEs mapped into memory at the recursive mapping
|
120 |
* point (slot #767 as show above). when the pmap code wants to find the
|
121 |
* PTE for a virtual address, all it has to do is the following:
|
122 |
*
|
123 |
* address of PTE = (767 * 4MB) + (VA / PAGE_SIZE) * sizeof(pt_entry_t)
|
124 |
* = 0xbfc00000 + (VA / 4096) * 4
|
125 |
*
|
126 |
* what happens if the pmap layer is asked to perform an operation
|
127 |
* on a pmap that is not the one which is currently active? in that
|
128 |
* case we temporarily load this pmap, perform the operation, and mark
|
129 |
* the currently active one as pending lazy reload.
|
130 |
*
|
131 |
* the following figure shows the effects of the recursive PDP mapping:
|
132 |
*
|
133 |
* PDP (%cr3)
|
134 |
* +----+
|
135 |
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
|
136 |
* | |
|
137 |
* | |
|
138 |
* | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
|
139 |
* | 768| -> first kernel PTP (maps 0xc0000000 -> 0xc0400000)
|
140 |
* | |
|
141 |
* +----+
|
142 |
*
|
143 |
* note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
|
144 |
*
|
145 |
* starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
|
146 |
* PTP:
|
147 |
*
|
148 |
* PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
|
149 |
* +----+
|
150 |
* | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
|
151 |
* | |
|
152 |
* | |
|
153 |
* | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbfeff000
|
154 |
* | 768| -> maps contents of first kernel PTP
|
155 |
* | |
|
156 |
* |1023|
|
157 |
* +----+
|
158 |
*
|
159 |
* note that mapping of the PDP at PTP#767's VA (0xbfeff000) is
|
160 |
* defined as "PDP_BASE".... within that mapping there are two
|
161 |
* defines:
|
162 |
* "PDP_PDE" (0xbfeffbfc) is the VA of the PDE in the PDP
|
163 |
* which points back to itself.
|
164 |
*
|
165 |
* - PAE support -
|
166 |
* ---------------
|
167 |
*
|
168 |
* PAE adds another layer of indirection during address translation, breaking
|
169 |
* up the translation process in 3 different levels:
|
170 |
* - L3 page directory, containing 4 * 64-bits addresses (index determined by
|
171 |
* bits [31:30] from the virtual address). This breaks up the address space
|
172 |
* in 4 1GB regions.
|
173 |
* - the PD (L2), containing 512 64-bits addresses, breaking each L3 region
|
174 |
* in 512 * 2MB regions.
|
175 |
* - the PT (L1), also containing 512 64-bits addresses (at L1, the size of
|
176 |
* the pages is still 4K).
|
177 |
*
|
178 |
* The kernel virtual space is mapped by the last entry in the L3 page,
|
179 |
* the first 3 entries mapping the user VA space.
|
180 |
*
|
181 |
* Because the L3 has only 4 entries of 1GB each, we can't use recursive
|
182 |
* mappings at this level for PDP_PDE (this would eat up 2 of the 4GB
|
183 |
* virtual space). There are also restrictions imposed by Xen on the
|
184 |
* last entry of the L3 PD (reference count to this page cannot be
|
185 |
* bigger than 1), which makes it hard to use one L3 page per pmap to
|
186 |
* switch between pmaps using %cr3.
|
187 |
*
|
188 |
* As such, each CPU gets its own L3 page that is always loaded into its %cr3
|
189 |
* (ci_pae_l3_pd in the associated cpu_info struct). We claim that the VM has
|
190 |
* only a 2-level PTP (similar to the non-PAE case). L2 PD is now 4 contiguous
|
191 |
* pages long (corresponding to the 4 entries of the L3), and the different
|
192 |
* index/slots (like PDP_PDE) are adapted accordingly.
|
193 |
*
|
194 |
* Kernel space remains in L3[3], L3[0-2] maps the user VA space. Switching
|
195 |
* between pmaps consists in modifying the first 3 entries of the CPU's L3 page.
|
196 |
*
|
197 |
* PTE_BASE will need 4 entries in the L2 PD pages to map the L2 pages
|
198 |
* recursively.
|
199 |
*
|
200 |
* In addition, for Xen, we can't recursively map L3[3] (Xen wants the ref
|
201 |
* count on this page to be exactly one), so we use a shadow PD page for
|
202 |
* the last L2 PD. The shadow page could be static too, but to make pm_pdir[]
|
203 |
* contiguous we'll allocate/copy one page per pmap.
|
204 |
*/
|
205 |
|
206 |
/*
|
207 |
* Mask to get rid of the sign-extended part of addresses.
|
208 |
*/
|
209 |
#define VA_SIGN_MASK 0 |
210 |
#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
|
211 |
/*
|
212 |
* XXXfvdl this one's not right.
|
213 |
*/
|
214 |
#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
|
215 |
|
216 |
/*
|
217 |
* the following defines identify the slots used as described above.
|
218 |
*/
|
219 |
#ifdef PAE
|
220 |
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-4) /* 1532: for recursive PDP map */ |
221 |
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 1536: start of kernel space */ |
222 |
#else /* PAE */ |
223 |
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-1) /* 767: for recursive PDP map */ |
224 |
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 768: start of kernel space */ |
225 |
#endif /* PAE */ |
226 |
|
227 |
#define L2_SLOT_KERNBASE L2_SLOT_KERN
|
228 |
|
229 |
#define PDIR_SLOT_KERN L2_SLOT_KERN
|
230 |
#define PDIR_SLOT_PTE L2_SLOT_PTE
|
231 |
|
232 |
/*
|
233 |
* the following defines give the virtual addresses of various MMU
|
234 |
* data structures:
|
235 |
* PTE_BASE: the base VA of the linear PTE mappings
|
236 |
* PDP_BASE: the base VA of the recursive mapping of the PDP
|
237 |
* PDP_PDE: the VA of the PDE that points back to the PDP
|
238 |
*/
|
239 |
|
240 |
#define PTE_BASE ((pt_entry_t *) (PDIR_SLOT_PTE * NBPD_L2))
|
241 |
|
242 |
#define L1_BASE PTE_BASE
|
243 |
|
244 |
#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L2_SLOT_PTE * NBPD_L1)) |
245 |
|
246 |
#define PDP_PDE (L2_BASE + PDIR_SLOT_PTE)
|
247 |
|
248 |
#define PDP_BASE L2_BASE
|
249 |
|
250 |
/* largest value (-1 for APTP space) */
|
251 |
#define NKL2_MAX_ENTRIES (NTOPLEVEL_PDES - (KERNBASE/NBPD_L2) - 1) |
252 |
#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * NPDPG) |
253 |
|
254 |
#define NKL2_KIMG_ENTRIES 0 /* XXX unused */ |
255 |
|
256 |
#define NKL2_START_ENTRIES 0 /* XXX computed on runtime */ |
257 |
#define NKL1_START_ENTRIES 0 /* XXX unused */ |
258 |
|
259 |
#ifndef XEN
|
260 |
#define NTOPLEVEL_PDES (PAGE_SIZE * PDP_SIZE / (sizeof (pd_entry_t))) |
261 |
#else /* !XEN */ |
262 |
#ifdef PAE
|
263 |
#define NTOPLEVEL_PDES 1964 /* 1964-2047 reserved by Xen */ |
264 |
#else /* PAE */ |
265 |
#define NTOPLEVEL_PDES 1008 /* 1008-1023 reserved by Xen */ |
266 |
#endif /* PAE */ |
267 |
#endif /* !XEN */ |
268 |
#define NPDPG (PAGE_SIZE / sizeof (pd_entry_t)) |
269 |
|
270 |
#define PTP_MASK_INITIALIZER { L1_FRAME, L2_FRAME }
|
271 |
#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT }
|
272 |
#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES }
|
273 |
#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES }
|
274 |
#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2 }
|
275 |
#define PDES_INITIALIZER { L2_BASE }
|
276 |
|
277 |
#define PTP_LEVELS 2 |
278 |
|
279 |
/*
|
280 |
* PG_AVAIL usage: we make use of the ignored bits of the PTE
|
281 |
*/
|
282 |
|
283 |
#define PG_W PG_AVAIL1 /* "wired" mapping */ |
284 |
#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ |
285 |
#define PG_X PG_AVAIL3 /* executable mapping */ |
286 |
|
287 |
/*
|
288 |
* Number of PTE's per cache line. 4 byte pte, 32-byte cache line
|
289 |
* Used to avoid false sharing of cache lines.
|
290 |
*/
|
291 |
#ifdef PAE
|
292 |
#define NPTECL 4 |
293 |
#else
|
294 |
#define NPTECL 8 |
295 |
#endif
|
296 |
|
297 |
#include <x86/pmap.h> |
298 |
|
299 |
#ifndef XEN
|
300 |
#define pmap_pa2pte(a) (a)
|
301 |
#define pmap_pte2pa(a) ((a) & PG_FRAME)
|
302 |
#define pmap_pte_set(p, n) do { *(p) = (n); } while (0) |
303 |
#define pmap_pte_flush() /* nothing */ |
304 |
|
305 |
#ifdef PAE
|
306 |
#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
|
307 |
#define pmap_pte_testset(p, n) \
|
308 |
atomic_swap_64((volatile uint64_t *)p, n)
|
309 |
#define pmap_pte_setbits(p, b) \
|
310 |
atomic_or_64((volatile uint64_t *)p, b)
|
311 |
#define pmap_pte_clearbits(p, b) \
|
312 |
atomic_and_64((volatile uint64_t *)p, ~(b))
|
313 |
#else /* PAE */ |
314 |
#define pmap_pte_cas(p, o, n) atomic_cas_32((p), (o), (n))
|
315 |
#define pmap_pte_testset(p, n) \
|
316 |
atomic_swap_ulong((volatile unsigned long *)p, n) |
317 |
#define pmap_pte_setbits(p, b) \
|
318 |
atomic_or_ulong((volatile unsigned long *)p, b) |
319 |
#define pmap_pte_clearbits(p, b) \
|
320 |
atomic_and_ulong((volatile unsigned long *)p, ~(b)) |
321 |
#endif /* PAE */ |
322 |
|
323 |
#else /* XEN */ |
324 |
extern kmutex_t pte_lock;
|
325 |
|
326 |
static __inline pt_entry_t
|
327 |
pmap_pa2pte(paddr_t pa) |
328 |
{ |
329 |
return (pt_entry_t)xpmap_ptom_masked(pa);
|
330 |
} |
331 |
|
332 |
static __inline paddr_t
|
333 |
pmap_pte2pa(pt_entry_t pte) |
334 |
{ |
335 |
return xpmap_mtop_masked(pte & PG_FRAME);
|
336 |
} |
337 |
static __inline void |
338 |
pmap_pte_set(pt_entry_t *pte, pt_entry_t npte) |
339 |
{ |
340 |
int s = splvm();
|
341 |
xpq_queue_pte_update(xpmap_ptetomach(pte), npte); |
342 |
splx(s); |
343 |
} |
344 |
|
345 |
static __inline pt_entry_t
|
346 |
pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
|
347 |
{ |
348 |
pt_entry_t opte; |
349 |
|
350 |
mutex_enter(&pte_lock); |
351 |
opte = *ptep; |
352 |
if (opte == o) {
|
353 |
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n); |
354 |
xpq_flush_queue(); |
355 |
} |
356 |
mutex_exit(&pte_lock); |
357 |
return opte;
|
358 |
} |
359 |
|
360 |
static __inline pt_entry_t
|
361 |
pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
|
362 |
{ |
363 |
pt_entry_t opte; |
364 |
|
365 |
mutex_enter(&pte_lock); |
366 |
opte = *pte; |
367 |
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), |
368 |
npte); |
369 |
xpq_flush_queue(); |
370 |
mutex_exit(&pte_lock); |
371 |
return opte;
|
372 |
} |
373 |
|
374 |
static __inline void |
375 |
pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
|
376 |
{ |
377 |
mutex_enter(&pte_lock); |
378 |
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits); |
379 |
xpq_flush_queue(); |
380 |
mutex_exit(&pte_lock); |
381 |
} |
382 |
|
383 |
static __inline void |
384 |
pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
|
385 |
{ |
386 |
mutex_enter(&pte_lock); |
387 |
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), |
388 |
(*pte) & ~bits); |
389 |
xpq_flush_queue(); |
390 |
mutex_exit(&pte_lock); |
391 |
} |
392 |
|
393 |
static __inline void |
394 |
pmap_pte_flush(void)
|
395 |
{ |
396 |
int s = splvm();
|
397 |
xpq_flush_queue(); |
398 |
splx(s); |
399 |
} |
400 |
|
401 |
#endif
|
402 |
|
403 |
struct vm_map;
|
404 |
struct trapframe;
|
405 |
struct pcb;
|
406 |
|
407 |
int pmap_exec_fixup(struct vm_map *, struct trapframe *, struct pcb *); |
408 |
void pmap_ldt_cleanup(struct lwp *); |
409 |
|
410 |
#include <x86/pmap_pv.h> |
411 |
|
412 |
#define __HAVE_VM_PAGE_MD
|
413 |
#define VM_MDPAGE_INIT(pg) \
|
414 |
memset(&(pg)->mdpage, 0, sizeof((pg)->mdpage)); \ |
415 |
PMAP_PAGE_INIT(&(pg)->mdpage.mp_pp) |
416 |
|
417 |
struct vm_page_md {
|
418 |
struct pmap_page mp_pp;
|
419 |
}; |
420 |
|
421 |
#endif /* _I386_PMAP_H_ */ |