root / lab4 / .minix-src / include / uvm / uvm_page.h @ 14
History | View | Annotate | Download (13.4 KB)
1 |
/* $NetBSD: uvm_page.h,v 1.80 2015/03/23 07:59:12 riastradh Exp $ */
|
---|---|
2 |
|
3 |
/*
|
4 |
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
5 |
* Copyright (c) 1991, 1993, The Regents of the University of California.
|
6 |
*
|
7 |
* All rights reserved.
|
8 |
*
|
9 |
* This code is derived from software contributed to Berkeley by
|
10 |
* The Mach Operating System project at Carnegie-Mellon University.
|
11 |
*
|
12 |
* Redistribution and use in source and binary forms, with or without
|
13 |
* modification, are permitted provided that the following conditions
|
14 |
* are met:
|
15 |
* 1. Redistributions of source code must retain the above copyright
|
16 |
* notice, this list of conditions and the following disclaimer.
|
17 |
* 2. Redistributions in binary form must reproduce the above copyright
|
18 |
* notice, this list of conditions and the following disclaimer in the
|
19 |
* documentation and/or other materials provided with the distribution.
|
20 |
* 3. Neither the name of the University nor the names of its contributors
|
21 |
* may be used to endorse or promote products derived from this software
|
22 |
* without specific prior written permission.
|
23 |
*
|
24 |
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
25 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
26 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
27 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
28 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
29 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
30 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
31 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
32 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
33 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
34 |
* SUCH DAMAGE.
|
35 |
*
|
36 |
* @(#)vm_page.h 7.3 (Berkeley) 4/21/91
|
37 |
* from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
|
38 |
*
|
39 |
*
|
40 |
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
41 |
* All rights reserved.
|
42 |
*
|
43 |
* Permission to use, copy, modify and distribute this software and
|
44 |
* its documentation is hereby granted, provided that both the copyright
|
45 |
* notice and this permission notice appear in all copies of the
|
46 |
* software, derivative works or modified versions, and any portions
|
47 |
* thereof, and that both notices appear in supporting documentation.
|
48 |
*
|
49 |
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
50 |
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
51 |
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
52 |
*
|
53 |
* Carnegie Mellon requests users of this software to return to
|
54 |
*
|
55 |
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
56 |
* School of Computer Science
|
57 |
* Carnegie Mellon University
|
58 |
* Pittsburgh PA 15213-3890
|
59 |
*
|
60 |
* any improvements or extensions that they make and grant Carnegie the
|
61 |
* rights to redistribute these changes.
|
62 |
*/
|
63 |
|
64 |
#ifndef _UVM_UVM_PAGE_H_
|
65 |
#define _UVM_UVM_PAGE_H_
|
66 |
|
67 |
#include <uvm/uvm_extern.h> |
68 |
#include <uvm/uvm_pglist.h> |
69 |
|
70 |
#include <sys/rbtree.h> |
71 |
|
72 |
/*
|
73 |
* Management of resident (logical) pages.
|
74 |
*
|
75 |
* Each resident page has a vm_page structure, indexed by page number.
|
76 |
* There are several lists in the structure:
|
77 |
*
|
78 |
* - A red-black tree rooted with the containing object is used to
|
79 |
* quickly perform object+offset lookups.
|
80 |
* - A list of all pages for a given object, for a quick deactivation
|
81 |
* at a time of deallocation.
|
82 |
* - An ordered list of pages due for pageout.
|
83 |
*
|
84 |
* In addition, the structure contains the object and offset to which
|
85 |
* this page belongs (for pageout) and sundry status bits.
|
86 |
*
|
87 |
* Note that the page structure has no lock of its own. The page is
|
88 |
* generally protected by its owner's lock (UVM object or amap/anon).
|
89 |
* It should be noted that UVM has to serialize pmap(9) operations on
|
90 |
* the managed pages, e.g. for pmap_enter() calls. Hence, the lock
|
91 |
* order is as follows:
|
92 |
*
|
93 |
* [vmpage-owner-lock] ->
|
94 |
* any pmap locks (e.g. PV hash lock)
|
95 |
*
|
96 |
* Since the kernel is always self-consistent, no serialization is
|
97 |
* required for unmanaged mappings, e.g. for pmap_kenter_pa() calls.
|
98 |
*
|
99 |
* Field markings and the corresponding locks:
|
100 |
*
|
101 |
* f: free page queue lock, uvm_fpageqlock
|
102 |
* o: page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock)
|
103 |
* p: page queue lock, uvm_pageqlock
|
104 |
* o,p: o|p for read, o&p for write
|
105 |
* w: wired page queue or uvm_pglistalloc:
|
106 |
* => wired page queue: o&p to change, stable from wire to unwire
|
107 |
* XXX What about concurrent or nested wire?
|
108 |
* => uvm_pglistalloc: owned by caller
|
109 |
* ?: locked by pmap or assumed page owner's lock
|
110 |
*
|
111 |
* UVM and pmap(9) may use uvm_page_locked_p() to assert whether the
|
112 |
* page owner's lock is acquired.
|
113 |
*
|
114 |
* A page can have one of four identities:
|
115 |
*
|
116 |
* o free
|
117 |
* => pageq.list is entry on global free page queue
|
118 |
* => listq.list is entry on per-CPU free page queue
|
119 |
* => uanon is unused (or (void *)0xdeadbeef for DEBUG)
|
120 |
* => uobject is unused (or (void *)0xdeadbeef for DEBUG)
|
121 |
* => PQ_FREE is set in pqflags
|
122 |
* o owned by a uvm_object
|
123 |
* => pageq.queue is entry on wired page queue, if any
|
124 |
* => listq.queue is entry on list of pages in object
|
125 |
* => uanon is NULL or the vm_anon to which it has been O->A loaned
|
126 |
* => uobject is owner
|
127 |
* o owned by a vm_anon
|
128 |
* => pageq is unused (XXX correct?)
|
129 |
* => listq is unused (XXX correct?)
|
130 |
* => uanon is owner
|
131 |
* => uobject is NULL
|
132 |
* => PQ_ANON is set in pqflags
|
133 |
* o allocated by uvm_pglistalloc
|
134 |
* => pageq.queue is entry on resulting pglist, owned by caller
|
135 |
* => listq is unused (XXX correct?)
|
136 |
* => uanon is unused
|
137 |
* => uobject is unused
|
138 |
*
|
139 |
* The following transitions are allowed:
|
140 |
*
|
141 |
* - uvm_pagealloc: free -> owned by a uvm_object/vm_anon
|
142 |
* - uvm_pagefree: owned by a uvm_object/vm_anon -> free
|
143 |
* - uvm_pglistalloc: free -> allocated by uvm_pglistalloc
|
144 |
* - uvm_pglistfree: allocated by uvm_pglistalloc -> free
|
145 |
*/
|
146 |
|
147 |
struct vm_page {
|
148 |
struct rb_node rb_node; /* o: tree of pages in obj */ |
149 |
|
150 |
union {
|
151 |
TAILQ_ENTRY(vm_page) queue; /* w: wired page queue
|
152 |
* or uvm_pglistalloc output */
|
153 |
LIST_ENTRY(vm_page) list; /* f: global free page queue */
|
154 |
} pageq; |
155 |
|
156 |
union {
|
157 |
TAILQ_ENTRY(vm_page) queue; /* o: pages in same object */
|
158 |
LIST_ENTRY(vm_page) list; /* f: CPU free page queue */
|
159 |
} listq; |
160 |
|
161 |
struct vm_anon *uanon; /* o,p: anon */ |
162 |
struct uvm_object *uobject; /* o,p: object */ |
163 |
voff_t offset; /* o,p: offset into object */
|
164 |
uint16_t flags; /* o: object flags */
|
165 |
uint16_t loan_count; /* o,p: num. active loans */
|
166 |
uint16_t wire_count; /* p: wired down map refs */
|
167 |
uint16_t pqflags; /* p: page queue flags */
|
168 |
paddr_t phys_addr; /* physical address of page */
|
169 |
|
170 |
#ifdef __HAVE_VM_PAGE_MD
|
171 |
struct vm_page_md mdpage; /* ?: pmap-specific data */ |
172 |
#endif
|
173 |
|
174 |
#if defined(UVM_PAGE_TRKOWN)
|
175 |
/* debugging fields to track page ownership */
|
176 |
pid_t owner; /* proc that set PG_BUSY */
|
177 |
lwpid_t lowner; /* lwp that set PG_BUSY */
|
178 |
const char *owner_tag; /* why it was set busy */ |
179 |
#endif
|
180 |
}; |
181 |
|
182 |
/*
|
183 |
* Overview of UVM page flags.
|
184 |
*
|
185 |
* Locking notes:
|
186 |
*
|
187 |
* PG_, struct vm_page::flags => locked by owner
|
188 |
* PQ_, struct vm_page::pqflags => locked by uvm_pageqlock
|
189 |
* PQ_FREE => additionally locked by uvm_fpageqlock
|
190 |
*
|
191 |
* Flag descriptions:
|
192 |
*
|
193 |
* PG_BUSY:
|
194 |
* Page is long-term locked, usually because of I/O (transfer from the
|
195 |
* page memory to the backing store) is in progress. LWP attempting
|
196 |
* to access the page shall set PG_WANTED and wait.
|
197 |
*
|
198 |
* PG_WANTED:
|
199 |
* Indicates that the page, which is currently PG_BUSY, is wanted by
|
200 |
* some other LWP. The page owner (i.e. LWP which set PG_BUSY) is
|
201 |
* responsible to clear both flags and wake up any waiters once it has
|
202 |
* released the long-term lock (PG_BUSY).
|
203 |
*
|
204 |
* PG_RELEASED:
|
205 |
* Indicates that the page, which is currently PG_BUSY, should be freed
|
206 |
* after the release of long-term lock. It is responsibility of the
|
207 |
* owning LWP (i.e. which set PG_BUSY) to do it.
|
208 |
*
|
209 |
* PG_CLEAN:
|
210 |
* Page has not been modified since it was loaded from the backing
|
211 |
* store. If this flag is not set, page is considered "dirty".
|
212 |
* XXX: Currently it means that the page *might* be clean; will be
|
213 |
* fixed with yamt-pagecache merge.
|
214 |
*
|
215 |
* PG_FAKE:
|
216 |
* Page has been allocated, but not yet initialised. The flag is used
|
217 |
* to avoid overwriting of valid data, e.g. to prevent read from the
|
218 |
* backing store when in-core data is newer.
|
219 |
*
|
220 |
* PG_TABLED:
|
221 |
* Indicates that the page is currently in the object's offset queue,
|
222 |
* and that it should be removed from it once the page is freed. Used
|
223 |
* diagnostic purposes.
|
224 |
*
|
225 |
* PG_PAGEOUT:
|
226 |
* Indicates that the page is being paged-out in preparation for
|
227 |
* being freed.
|
228 |
*
|
229 |
* PG_RDONLY:
|
230 |
* Indicates that the page must be mapped read-only.
|
231 |
*
|
232 |
* PG_ZERO:
|
233 |
* Indicates that the page has been pre-zeroed. This flag is only
|
234 |
* set when the page is not in the queues and is cleared when the
|
235 |
* page is placed on the free list.
|
236 |
*
|
237 |
* PG_MARKER:
|
238 |
* Dummy marker page.
|
239 |
*/
|
240 |
|
241 |
#define PG_BUSY 0x0001 |
242 |
#define PG_WANTED 0x0002 |
243 |
#define PG_TABLED 0x0004 |
244 |
#define PG_CLEAN 0x0008 |
245 |
#define PG_PAGEOUT 0x0010 |
246 |
#define PG_RELEASED 0x0020 |
247 |
#define PG_FAKE 0x0040 |
248 |
#define PG_RDONLY 0x0080 |
249 |
#define PG_ZERO 0x0100 |
250 |
#define PG_MARKER 0x0200 |
251 |
|
252 |
#define PG_PAGER1 0x1000 /* pager-specific flag */ |
253 |
|
254 |
#define UVM_PGFLAGBITS \
|
255 |
"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
|
256 |
"\11ZERO\12MARKER\15PAGER1"
|
257 |
|
258 |
#define PQ_FREE 0x0001 /* page is on free list */ |
259 |
#define PQ_ANON 0x0002 /* page is part of an anon, rather |
260 |
than an uvm_object */
|
261 |
#define PQ_AOBJ 0x0004 /* page is part of an anonymous |
262 |
uvm_object */
|
263 |
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
|
264 |
#define PQ_READAHEAD 0x0008 /* read-ahead but has not been "hit" yet */ |
265 |
|
266 |
#define PQ_PRIVATE1 0x0100 |
267 |
#define PQ_PRIVATE2 0x0200 |
268 |
#define PQ_PRIVATE3 0x0400 |
269 |
#define PQ_PRIVATE4 0x0800 |
270 |
#define PQ_PRIVATE5 0x1000 |
271 |
#define PQ_PRIVATE6 0x2000 |
272 |
#define PQ_PRIVATE7 0x4000 |
273 |
#define PQ_PRIVATE8 0x8000 |
274 |
|
275 |
#define UVM_PQFLAGBITS \
|
276 |
"\20\1FREE\2ANON\3AOBJ\4READAHEAD" \
|
277 |
"\11PRIVATE1\12PRIVATE2\13PRIVATE3\14PRIVATE4" \
|
278 |
"\15PRIVATE5\16PRIVATE6\17PRIVATE7\20PRIVATE8"
|
279 |
|
280 |
/*
|
281 |
* physical memory layout structure
|
282 |
*
|
283 |
* MD vmparam.h must #define:
|
284 |
* VM_PHYSEG_MAX = max number of physical memory segments we support
|
285 |
* (if this is "1" then we revert to a "contig" case)
|
286 |
* VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
|
287 |
* - VM_PSTRAT_RANDOM: linear search (random order)
|
288 |
* - VM_PSTRAT_BSEARCH: binary search (sorted by address)
|
289 |
* - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
|
290 |
* - others?
|
291 |
* XXXCDC: eventually we should purge all left-over global variables...
|
292 |
*/
|
293 |
#define VM_PSTRAT_RANDOM 1 |
294 |
#define VM_PSTRAT_BSEARCH 2 |
295 |
#define VM_PSTRAT_BIGFIRST 3 |
296 |
|
297 |
/*
|
298 |
* vm_physseg: describes one segment of physical memory
|
299 |
*/
|
300 |
struct vm_physseg {
|
301 |
paddr_t start; /* PF# of first page in segment */
|
302 |
paddr_t end; /* (PF# of last page in segment) + 1 */
|
303 |
paddr_t avail_start; /* PF# of first free page in segment */
|
304 |
paddr_t avail_end; /* (PF# of last free page in segment) +1 */
|
305 |
struct vm_page *pgs; /* vm_page structures (from start) */ |
306 |
struct vm_page *lastpg; /* vm_page structure for end */ |
307 |
int free_list; /* which free list they belong on */ |
308 |
u_int start_hint; /* start looking for free pages here */
|
309 |
/* protected by uvm_fpageqlock */
|
310 |
#ifdef __HAVE_PMAP_PHYSSEG
|
311 |
struct pmap_physseg pmseg; /* pmap specific (MD) data */ |
312 |
#endif
|
313 |
}; |
314 |
|
315 |
#ifdef _KERNEL
|
316 |
|
317 |
/*
|
318 |
* globals
|
319 |
*/
|
320 |
|
321 |
extern bool vm_page_zero_enable; |
322 |
|
323 |
/*
|
324 |
* physical memory config is stored in vm_physmem.
|
325 |
*/
|
326 |
|
327 |
#define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
|
328 |
#if VM_PHYSSEG_MAX == 1 |
329 |
#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */ |
330 |
#else
|
331 |
#define VM_PHYSMEM_PTR_SWAP(i, j) \
|
332 |
do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0) |
333 |
#endif
|
334 |
|
335 |
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; |
336 |
extern int vm_nphysseg; |
337 |
|
338 |
/*
|
339 |
* prototypes: the following prototypes define the interface to pages
|
340 |
*/
|
341 |
|
342 |
void uvm_page_init(vaddr_t *, vaddr_t *);
|
343 |
#if defined(UVM_PAGE_TRKOWN)
|
344 |
void uvm_page_own(struct vm_page *, const char *); |
345 |
#endif
|
346 |
#if !defined(PMAP_STEAL_MEMORY)
|
347 |
bool uvm_page_physget(paddr_t *);
|
348 |
#endif
|
349 |
void uvm_page_recolor(int); |
350 |
void uvm_pageidlezero(void); |
351 |
|
352 |
void uvm_pageactivate(struct vm_page *); |
353 |
vaddr_t uvm_pageboot_alloc(vsize_t); |
354 |
void uvm_pagecopy(struct vm_page *, struct vm_page *); |
355 |
void uvm_pagedeactivate(struct vm_page *); |
356 |
void uvm_pagedequeue(struct vm_page *); |
357 |
void uvm_pageenqueue(struct vm_page *); |
358 |
void uvm_pagefree(struct vm_page *); |
359 |
void uvm_page_unbusy(struct vm_page **, int); |
360 |
struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t); |
361 |
void uvm_pageunwire(struct vm_page *); |
362 |
void uvm_pagewire(struct vm_page *); |
363 |
void uvm_pagezero(struct vm_page *); |
364 |
bool uvm_pageismanaged(paddr_t);
|
365 |
bool uvm_page_locked_p(struct vm_page *); |
366 |
|
367 |
int uvm_page_lookup_freelist(struct vm_page *); |
368 |
|
369 |
int vm_physseg_find(paddr_t, int *); |
370 |
struct vm_page *uvm_phys_to_vm_page(paddr_t);
|
371 |
paddr_t uvm_vm_page_to_phys(const struct vm_page *); |
372 |
|
373 |
/*
|
374 |
* macros
|
375 |
*/
|
376 |
|
377 |
#define UVM_PAGE_TREE_PENALTY 4 /* XXX: a guess */ |
378 |
|
379 |
#define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry)
|
380 |
|
381 |
#ifdef __HAVE_VM_PAGE_MD
|
382 |
#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
|
383 |
#endif
|
384 |
|
385 |
/*
|
386 |
* Compute the page color bucket for a given page.
|
387 |
*/
|
388 |
#define VM_PGCOLOR_BUCKET(pg) \
|
389 |
(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask) |
390 |
|
391 |
#define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa)
|
392 |
|
393 |
#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
|
394 |
#define VM_FREE_PAGE_TO_CPU(pg) ((struct uvm_cpu *)((uintptr_t)pg->offset)) |
395 |
|
396 |
#ifdef DEBUG
|
397 |
void uvm_pagezerocheck(struct vm_page *); |
398 |
#endif /* DEBUG */ |
399 |
|
400 |
#endif /* _KERNEL */ |
401 |
|
402 |
#endif /* _UVM_UVM_PAGE_H_ */ |