root / lab4 / .minix-src / include / uvm / uvm_extern.h @ 14
History | View | Annotate | Download (24.6 KB)
1 |
/* $NetBSD: uvm_extern.h,v 1.194 2015/03/20 15:41:43 riastradh Exp $ */
|
---|---|
2 |
|
3 |
/*
|
4 |
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
5 |
* All rights reserved.
|
6 |
*
|
7 |
* Redistribution and use in source and binary forms, with or without
|
8 |
* modification, are permitted provided that the following conditions
|
9 |
* are met:
|
10 |
* 1. Redistributions of source code must retain the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer.
|
12 |
* 2. Redistributions in binary form must reproduce the above copyright
|
13 |
* notice, this list of conditions and the following disclaimer in the
|
14 |
* documentation and/or other materials provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
17 |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
18 |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
19 |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
20 |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
21 |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
22 |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
23 |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
25 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*
|
27 |
* from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
|
28 |
*/
|
29 |
|
30 |
/*-
|
31 |
* Copyright (c) 1991, 1992, 1993
|
32 |
* The Regents of the University of California. All rights reserved.
|
33 |
*
|
34 |
* Redistribution and use in source and binary forms, with or without
|
35 |
* modification, are permitted provided that the following conditions
|
36 |
* are met:
|
37 |
* 1. Redistributions of source code must retain the above copyright
|
38 |
* notice, this list of conditions and the following disclaimer.
|
39 |
* 2. Redistributions in binary form must reproduce the above copyright
|
40 |
* notice, this list of conditions and the following disclaimer in the
|
41 |
* documentation and/or other materials provided with the distribution.
|
42 |
* 3. Neither the name of the University nor the names of its contributors
|
43 |
* may be used to endorse or promote products derived from this software
|
44 |
* without specific prior written permission.
|
45 |
*
|
46 |
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
47 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
48 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
49 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
50 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
51 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
52 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
53 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
54 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
55 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
56 |
* SUCH DAMAGE.
|
57 |
*
|
58 |
* @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
|
59 |
*/
|
60 |
|
61 |
#ifndef _UVM_UVM_EXTERN_H_
|
62 |
#define _UVM_UVM_EXTERN_H_
|
63 |
|
64 |
/*
|
65 |
* uvm_extern.h: this file defines the external interface to the VM system.
|
66 |
*
|
67 |
* this should be the only file included by non-VM parts of the kernel
|
68 |
* which need access to VM services. if you want to know the interface
|
69 |
* to the MI VM layer without knowing the details, this is the file to
|
70 |
* learn.
|
71 |
*
|
72 |
* NOTE: vm system calls are prototyped in syscallargs.h
|
73 |
*/
|
74 |
|
75 |
/*
|
76 |
* defines
|
77 |
*/
|
78 |
|
79 |
/*
|
80 |
* the following defines are for uvm_map and functions which call it.
|
81 |
*/
|
82 |
|
83 |
/* protections bits */
|
84 |
#define UVM_PROT_MASK 0x07 /* protection mask */ |
85 |
#define UVM_PROT_NONE 0x00 /* protection none */ |
86 |
#define UVM_PROT_ALL 0x07 /* everything */ |
87 |
#define UVM_PROT_READ 0x01 /* read */ |
88 |
#define UVM_PROT_WRITE 0x02 /* write */ |
89 |
#define UVM_PROT_EXEC 0x04 /* exec */ |
90 |
|
91 |
/* protection short codes */
|
92 |
#define UVM_PROT_R 0x01 /* read */ |
93 |
#define UVM_PROT_W 0x02 /* write */ |
94 |
#define UVM_PROT_RW 0x03 /* read-write */ |
95 |
#define UVM_PROT_X 0x04 /* exec */ |
96 |
#define UVM_PROT_RX 0x05 /* read-exec */ |
97 |
#define UVM_PROT_WX 0x06 /* write-exec */ |
98 |
#define UVM_PROT_RWX 0x07 /* read-write-exec */ |
99 |
|
100 |
/* 0x08: not used */
|
101 |
|
102 |
/* inherit codes */
|
103 |
#define UVM_INH_MASK 0x30 /* inherit mask */ |
104 |
#define UVM_INH_SHARE 0x00 /* "share" */ |
105 |
#define UVM_INH_COPY 0x10 /* "copy" */ |
106 |
#define UVM_INH_NONE 0x20 /* "none" */ |
107 |
#define UVM_INH_DONATE 0x30 /* "donate" << not used */ |
108 |
|
109 |
/* 0x40, 0x80: not used */
|
110 |
|
111 |
/* bits 0x700: max protection, 0x800: not used */
|
112 |
|
113 |
/* bits 0x7000: advice, 0x8000: not used */
|
114 |
/* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */
|
115 |
#define UVM_ADV_NORMAL 0x0 /* 'normal' */ |
116 |
#define UVM_ADV_RANDOM 0x1 /* 'random' */ |
117 |
#define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */ |
118 |
#define UVM_ADV_WILLNEED 0x3 /* pages will be needed */ |
119 |
#define UVM_ADV_DONTNEED 0x4 /* pages won't be needed */ |
120 |
#define UVM_ADV_NOREUSE 0x5 /* pages will be used only once */ |
121 |
#define UVM_ADV_MASK 0x7 /* mask */ |
122 |
|
123 |
/* bits 0xffff0000: mapping flags */
|
124 |
#define UVM_FLAG_FIXED 0x010000 /* find space */ |
125 |
#define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */ |
126 |
#define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */ |
127 |
#define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */ |
128 |
#define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce allocations */ |
129 |
#define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */ |
130 |
#define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */ |
131 |
#define UVM_FLAG_WAITVA 0x800000 /* wait for va */ |
132 |
#define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ |
133 |
#define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */ |
134 |
|
135 |
/* macros to extract info */
|
136 |
#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
|
137 |
#define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4) |
138 |
#define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK) |
139 |
#define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK) |
140 |
|
141 |
#define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
|
142 |
(((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) |
143 |
|
144 |
/* magic offset value: offset not known(obj) or don't care(!obj) */
|
145 |
#define UVM_UNKNOWN_OFFSET ((voff_t) -1) |
146 |
|
147 |
/*
|
148 |
* the following defines are for uvm_km_alloc/free's flags
|
149 |
*/
|
150 |
#define UVM_KMF_WIRED 0x1 /* allocation type: wired */ |
151 |
#define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ |
152 |
#define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ |
153 |
#define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED)
|
154 |
#define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ |
155 |
#define UVM_KMF_ZERO 0x10 /* want zero filled memory */ |
156 |
#define UVM_KMF_EXEC 0x20 /* need executable mapping */ |
157 |
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */ |
158 |
#define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */ |
159 |
#define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ |
160 |
#define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH /* start at color in align */ |
161 |
|
162 |
/*
|
163 |
* the following defines the strategies for uvm_pagealloc_strat()
|
164 |
*/
|
165 |
#define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */ |
166 |
#define UVM_PGA_STRAT_ONLY 1 /* only specified free list */ |
167 |
#define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */ |
168 |
|
169 |
/*
|
170 |
* flags for uvm_pagealloc_strat()
|
171 |
*/
|
172 |
#define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ |
173 |
#define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */ |
174 |
|
175 |
/*
|
176 |
* flags for ubc_alloc()
|
177 |
*/
|
178 |
#define UBC_READ 0x001 /* reading from object */ |
179 |
#define UBC_WRITE 0x002 /* writing to object */ |
180 |
#define UBC_FAULTBUSY 0x004 /* nobody else is using these pages, so busy |
181 |
* them at alloc and unbusy at release (e.g.,
|
182 |
* for writes extending a file) */
|
183 |
|
184 |
/*
|
185 |
* flags for ubc_release()
|
186 |
*/
|
187 |
#define UBC_UNMAP 0x010 /* unmap pages now -- don't leave the |
188 |
* mappings cached indefinitely */
|
189 |
|
190 |
/*
|
191 |
* flags for ubc_uiomove()
|
192 |
*/
|
193 |
#define UBC_PARTIALOK 0x100 /* return early on error; otherwise, zero all |
194 |
* remaining bytes after error */
|
195 |
|
196 |
/*
|
197 |
* flags for uvn_findpages().
|
198 |
*/
|
199 |
#define UFP_ALL 0x00 |
200 |
#define UFP_NOWAIT 0x01 |
201 |
#define UFP_NOALLOC 0x02 |
202 |
#define UFP_NOCACHE 0x04 |
203 |
#define UFP_NORDONLY 0x08 |
204 |
#define UFP_DIRTYONLY 0x10 |
205 |
#define UFP_BACKWARD 0x20 |
206 |
|
207 |
/*
|
208 |
* lockflags that control the locking behavior of various functions.
|
209 |
*/
|
210 |
#define UVM_LK_ENTER 0x00000001 /* map locked on entry */ |
211 |
#define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */ |
212 |
|
213 |
/*
|
214 |
* Default number of pages to allocate on the stack
|
215 |
*/
|
216 |
#define UBC_MAX_PAGES 8 |
217 |
|
218 |
/*
|
219 |
* Value representing inactive emap.
|
220 |
*/
|
221 |
#define UVM_EMAP_INACTIVE (0) |
222 |
|
223 |
/*
|
224 |
* structures
|
225 |
*/
|
226 |
|
227 |
struct buf;
|
228 |
struct core;
|
229 |
struct loadavg;
|
230 |
struct mount;
|
231 |
struct pglist;
|
232 |
struct proc;
|
233 |
struct uio;
|
234 |
struct uvm_object;
|
235 |
struct vm_anon;
|
236 |
struct vmspace;
|
237 |
struct pmap;
|
238 |
struct vnode;
|
239 |
struct vm_map_entry;
|
240 |
struct vm_map;
|
241 |
struct vm_page;
|
242 |
struct vmtotal;
|
243 |
|
244 |
/*
|
245 |
* uvm_pctparam: parameter to be shown as percentage to user.
|
246 |
*/
|
247 |
|
248 |
#define UVM_PCTPARAM_SHIFT 8 |
249 |
#define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT) |
250 |
#define UVM_PCTPARAM_APPLY(pct, x) \
|
251 |
(((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT) |
252 |
struct uvm_pctparam {
|
253 |
int pct_pct; /* percent [0, 100] */ /* should be the first member */ |
254 |
int pct_scaled;
|
255 |
int (*pct_check)(struct uvm_pctparam *, int); |
256 |
}; |
257 |
|
258 |
/*
|
259 |
* uvmexp: global data structures that are exported to parts of the kernel
|
260 |
* other than the vm system.
|
261 |
*/
|
262 |
|
263 |
struct uvmexp {
|
264 |
/* vm_page constants */
|
265 |
int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */ |
266 |
int pagemask; /* page mask */ |
267 |
int pageshift; /* page shift */ |
268 |
|
269 |
/* vm_page counters */
|
270 |
int npages; /* number of pages we manage */ |
271 |
int free; /* number of free pages */ |
272 |
int paging; /* number of pages in the process of being paged out */ |
273 |
int wired; /* number of wired pages */ |
274 |
|
275 |
/*
|
276 |
* Adding anything before this line will break binary compatibility
|
277 |
* with top(1) on NetBSD 1.5.
|
278 |
*/
|
279 |
|
280 |
int ncolors; /* number of page color buckets: must be p-o-2 */ |
281 |
int colormask; /* color bucket mask */ |
282 |
|
283 |
int zeropages; /* number of zero'd pages */ |
284 |
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */ |
285 |
int reserve_kernel; /* number of pages reserved for kernel */ |
286 |
unsigned anonpages; /* number of pages used by anon mappings */ |
287 |
unsigned filepages; /* number of pages used by cached file data */ |
288 |
unsigned execpages; /* number of pages used by cached exec data */ |
289 |
|
290 |
/* pageout params */
|
291 |
int freemin; /* min number of free pages */ |
292 |
int freetarg; /* target number of free pages */ |
293 |
int wiredmax; /* max number of wired pages */ |
294 |
|
295 |
/* swap */
|
296 |
int nswapdev; /* number of configured swap devices in system */ |
297 |
int swpages; /* number of PAGE_SIZE'ed swap pages */ |
298 |
int swpgavail; /* number of swap pages currently available */ |
299 |
int swpginuse; /* number of swap pages in use */ |
300 |
int swpgonly; /* number of swap pages in use, not also in RAM */ |
301 |
int nswget; /* number of times fault calls uvm_swap_get() */ |
302 |
|
303 |
/* stat counters. XXX: should be 64-bit counters */
|
304 |
int _unused_faults; /* page fault count */ |
305 |
int _unused_traps; /* trap count */ |
306 |
int _unused_intrs; /* interrupt count */ |
307 |
int _unused_swtch; /* context switch count */ |
308 |
int _unused_softs; /* software interrupt count */ |
309 |
int _unused_syscalls; /* system calls */ |
310 |
int pageins; /* pagein operation count */ |
311 |
/* pageouts are in pdpageouts below */
|
312 |
int _unused1;
|
313 |
int _unused2;
|
314 |
int pgswapin; /* pages swapped in */ |
315 |
int pgswapout; /* pages swapped out */ |
316 |
int forks; /* forks */ |
317 |
int forks_ppwait; /* forks where parent waits */ |
318 |
int forks_sharevm; /* forks where vmspace is shared */ |
319 |
int pga_zerohit; /* pagealloc where zero wanted and zero |
320 |
was available */
|
321 |
int pga_zeromiss; /* pagealloc where zero wanted and zero |
322 |
not available */
|
323 |
int zeroaborts; /* number of times page zeroing was |
324 |
aborted */
|
325 |
int colorhit; /* pagealloc where we got optimal color */ |
326 |
int colormiss; /* pagealloc where we didn't */ |
327 |
int cpuhit; /* pagealloc where we allocated locally */ |
328 |
int cpumiss; /* pagealloc where we didn't */ |
329 |
|
330 |
/* fault subcounters. XXX: should be 64-bit counters */
|
331 |
int fltnoram; /* number of times fault was out of ram */ |
332 |
int fltnoanon; /* number of times fault was out of anons */ |
333 |
int fltpgwait; /* number of times fault had to wait on a page */ |
334 |
int fltpgrele; /* number of times fault found a released page */ |
335 |
int fltrelck; /* number of times fault relock called */ |
336 |
int fltrelckok; /* number of times fault relock is a success */ |
337 |
int fltanget; /* number of times fault gets anon page */ |
338 |
int fltanretry; /* number of times fault retrys an anon get */ |
339 |
int fltamcopy; /* number of times fault clears "needs copy" */ |
340 |
int fltnamap; /* number of times fault maps a neighbor anon page */ |
341 |
int fltnomap; /* number of times fault maps a neighbor obj page */ |
342 |
int fltlget; /* number of times fault does a locked pgo_get */ |
343 |
int fltget; /* number of times fault does an unlocked get */ |
344 |
int flt_anon; /* number of times fault anon (case 1a) */ |
345 |
int flt_acow; /* number of times fault anon cow (case 1b) */ |
346 |
int flt_obj; /* number of times fault is on object page (2a) */ |
347 |
int flt_prcopy; /* number of times fault promotes with copy (2b) */ |
348 |
int flt_przero; /* number of times fault promotes with zerofill (2b) */ |
349 |
|
350 |
/* daemon counters. XXX: should be 64-bit counters */
|
351 |
int pdwoke; /* number of times daemon woke up */ |
352 |
int pdrevs; /* number of times daemon rev'd clock hand */ |
353 |
int _unused3;
|
354 |
int pdfreed; /* number of pages daemon freed since boot */ |
355 |
int pdscans; /* number of pages daemon scanned since boot */ |
356 |
int pdanscan; /* number of anonymous pages scanned by daemon */ |
357 |
int pdobscan; /* number of object pages scanned by daemon */ |
358 |
int pdreact; /* number of pages daemon reactivated since boot */ |
359 |
int pdbusy; /* number of times daemon found a busy page */ |
360 |
int pdpageouts; /* number of times daemon started a pageout */ |
361 |
int pdpending; /* number of times daemon got a pending pagout */ |
362 |
int pddeact; /* number of pages daemon deactivates */ |
363 |
int pdreanon; /* anon pages reactivated due to thresholds */ |
364 |
int pdrefile; /* file pages reactivated due to thresholds */ |
365 |
int pdreexec; /* executable pages reactivated due to thresholds */ |
366 |
}; |
367 |
|
368 |
/*
|
369 |
* The following structure is 64-bit alignment safe. New elements
|
370 |
* should only be added to the end of this structure so binary
|
371 |
* compatibility can be preserved.
|
372 |
*/
|
373 |
struct uvmexp_sysctl {
|
374 |
int64_t pagesize; |
375 |
int64_t pagemask; |
376 |
int64_t pageshift; |
377 |
int64_t npages; |
378 |
int64_t free; |
379 |
int64_t active; |
380 |
int64_t inactive; |
381 |
int64_t paging; |
382 |
int64_t wired; |
383 |
int64_t zeropages; |
384 |
int64_t reserve_pagedaemon; |
385 |
int64_t reserve_kernel; |
386 |
int64_t freemin; |
387 |
int64_t freetarg; |
388 |
int64_t inactarg; /* unused */
|
389 |
int64_t wiredmax; |
390 |
int64_t nswapdev; |
391 |
int64_t swpages; |
392 |
int64_t swpginuse; |
393 |
int64_t swpgonly; |
394 |
int64_t nswget; |
395 |
int64_t unused1; /* unused; was nanon */
|
396 |
int64_t cpuhit; |
397 |
int64_t cpumiss; |
398 |
int64_t faults; |
399 |
int64_t traps; |
400 |
int64_t intrs; |
401 |
int64_t swtch; |
402 |
int64_t softs; |
403 |
int64_t syscalls; |
404 |
int64_t pageins; |
405 |
int64_t swapins; /* unused */
|
406 |
int64_t swapouts; /* unused */
|
407 |
int64_t pgswapin; |
408 |
int64_t pgswapout; |
409 |
int64_t forks; |
410 |
int64_t forks_ppwait; |
411 |
int64_t forks_sharevm; |
412 |
int64_t pga_zerohit; |
413 |
int64_t pga_zeromiss; |
414 |
int64_t zeroaborts; |
415 |
int64_t fltnoram; |
416 |
int64_t fltnoanon; |
417 |
int64_t fltpgwait; |
418 |
int64_t fltpgrele; |
419 |
int64_t fltrelck; |
420 |
int64_t fltrelckok; |
421 |
int64_t fltanget; |
422 |
int64_t fltanretry; |
423 |
int64_t fltamcopy; |
424 |
int64_t fltnamap; |
425 |
int64_t fltnomap; |
426 |
int64_t fltlget; |
427 |
int64_t fltget; |
428 |
int64_t flt_anon; |
429 |
int64_t flt_acow; |
430 |
int64_t flt_obj; |
431 |
int64_t flt_prcopy; |
432 |
int64_t flt_przero; |
433 |
int64_t pdwoke; |
434 |
int64_t pdrevs; |
435 |
int64_t unused4; |
436 |
int64_t pdfreed; |
437 |
int64_t pdscans; |
438 |
int64_t pdanscan; |
439 |
int64_t pdobscan; |
440 |
int64_t pdreact; |
441 |
int64_t pdbusy; |
442 |
int64_t pdpageouts; |
443 |
int64_t pdpending; |
444 |
int64_t pddeact; |
445 |
int64_t anonpages; |
446 |
int64_t filepages; |
447 |
int64_t execpages; |
448 |
int64_t colorhit; |
449 |
int64_t colormiss; |
450 |
int64_t ncolors; |
451 |
}; |
452 |
|
453 |
#ifdef _KERNEL
|
454 |
/* we need this before including uvm_page.h on some platforms */
|
455 |
extern struct uvmexp uvmexp; |
456 |
/* MD code needs this without including <uvm/uvm.h> */
|
457 |
extern bool vm_page_zero_enable; |
458 |
#endif
|
459 |
|
460 |
/*
|
461 |
* Finally, bring in standard UVM headers.
|
462 |
*/
|
463 |
#include <sys/vmmeter.h> |
464 |
#include <sys/queue.h> |
465 |
#include <sys/lock.h> |
466 |
#ifdef _KERNEL
|
467 |
#include <sys/vmem.h> |
468 |
#endif
|
469 |
#include <uvm/uvm_param.h> |
470 |
#include <uvm/uvm_prot.h> |
471 |
#include <uvm/uvm_pmap.h> |
472 |
#include <uvm/uvm_map.h> |
473 |
#include <uvm/uvm_pager.h> |
474 |
|
475 |
/*
|
476 |
* helpers for calling ubc_release()
|
477 |
*/
|
478 |
#ifdef PMAP_CACHE_VIVT
|
479 |
#define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) |
480 |
#else
|
481 |
#define UBC_WANT_UNMAP(vp) false |
482 |
#endif
|
483 |
#define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) |
484 |
|
485 |
/*
|
486 |
* Shareable process virtual address space.
|
487 |
* May eventually be merged with vm_map.
|
488 |
* Several fields are temporary (text, data stuff).
|
489 |
*/
|
490 |
struct vmspace {
|
491 |
struct vm_map vm_map; /* VM address map */ |
492 |
int vm_refcnt; /* number of references * |
493 |
* note: protected by vm_map.ref_lock */
|
494 |
void * vm_shm; /* SYS5 shared memory private data XXX */ |
495 |
/* we copy from vm_startcopy to the end of the structure on fork */
|
496 |
#define vm_startcopy vm_rssize
|
497 |
segsz_t vm_rssize; /* current resident set size in pages */
|
498 |
segsz_t vm_swrss; /* resident set size before last swap */
|
499 |
segsz_t vm_tsize; /* text size (pages) XXX */
|
500 |
segsz_t vm_dsize; /* data size (pages) XXX */
|
501 |
segsz_t vm_ssize; /* stack size (pages) */
|
502 |
segsz_t vm_issize; /* initial unmapped stack size (pages) */
|
503 |
void * vm_taddr; /* user virtual address of text XXX */ |
504 |
void * vm_daddr; /* user virtual address of data XXX */ |
505 |
void *vm_maxsaddr; /* user VA at max stack growth */ |
506 |
void *vm_minsaddr; /* user VA at top of stack */ |
507 |
size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */
|
508 |
}; |
509 |
#define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map)
|
510 |
|
511 |
#ifdef _KERNEL
|
512 |
|
513 |
/*
|
514 |
* used to keep state while iterating over the map for a core dump.
|
515 |
*/
|
516 |
struct uvm_coredump_state {
|
517 |
void *cookie; /* opaque for the caller */ |
518 |
vaddr_t start; /* start of region */
|
519 |
vaddr_t realend; /* real end of region */
|
520 |
vaddr_t end; /* virtual end of region */
|
521 |
vm_prot_t prot; /* protection of region */
|
522 |
int flags; /* flags; see below */ |
523 |
}; |
524 |
|
525 |
#define UVM_COREDUMP_STACK 0x01 /* region is user stack */ |
526 |
|
527 |
/*
|
528 |
* the various kernel maps, owned by MD code
|
529 |
*/
|
530 |
extern struct vm_map *kernel_map; |
531 |
extern struct vm_map *phys_map; |
532 |
|
533 |
/*
|
534 |
* macros
|
535 |
*/
|
536 |
|
537 |
#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
|
538 |
|
539 |
|
540 |
/* vm_machdep.c */
|
541 |
int vmapbuf(struct buf *, vsize_t); |
542 |
void vunmapbuf(struct buf *, vsize_t); |
543 |
|
544 |
/* uvm_aobj.c */
|
545 |
struct uvm_object *uao_create(vsize_t, int); |
546 |
void uao_set_pgfl(struct uvm_object *, int); |
547 |
void uao_detach(struct uvm_object *); |
548 |
void uao_reference(struct uvm_object *); |
549 |
|
550 |
/* uvm_bio.c */
|
551 |
void ubc_init(void); |
552 |
void ubchist_init(void); |
553 |
void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, |
554 |
int);
|
555 |
void ubc_release(void *, int); |
556 |
int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, |
557 |
int, int); |
558 |
void ubc_zerorange(struct uvm_object *, off_t, size_t, int); |
559 |
void ubc_purge(struct uvm_object *); |
560 |
|
561 |
/* uvm_emap.c */
|
562 |
void uvm_emap_sysinit(void); |
563 |
#ifdef __HAVE_PMAP_EMAP
|
564 |
void uvm_emap_switch(lwp_t *);
|
565 |
#else
|
566 |
#define uvm_emap_switch(l)
|
567 |
#endif
|
568 |
|
569 |
u_int uvm_emap_gen_return(void);
|
570 |
void uvm_emap_update(u_int);
|
571 |
|
572 |
vaddr_t uvm_emap_alloc(vsize_t, bool);
|
573 |
void uvm_emap_free(vaddr_t, size_t);
|
574 |
|
575 |
void uvm_emap_enter(vaddr_t, struct vm_page **, u_int); |
576 |
void uvm_emap_remove(vaddr_t, vsize_t);
|
577 |
|
578 |
#ifdef __HAVE_PMAP_EMAP
|
579 |
void uvm_emap_consume(u_int);
|
580 |
u_int uvm_emap_produce(void);
|
581 |
#else
|
582 |
#define uvm_emap_consume(x)
|
583 |
#define uvm_emap_produce() UVM_EMAP_INACTIVE
|
584 |
#endif
|
585 |
|
586 |
/* uvm_fault.c */
|
587 |
#define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) |
588 |
int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); |
589 |
/* handle a page fault */
|
590 |
|
591 |
/* uvm_glue.c */
|
592 |
#if defined(KGDB)
|
593 |
void uvm_chgkprot(void *, size_t, int); |
594 |
#endif
|
595 |
void uvm_proc_fork(struct proc *, struct proc *, bool); |
596 |
void uvm_lwp_fork(struct lwp *, struct lwp *, |
597 |
void *, size_t, void (*)(void *), void *); |
598 |
int uvm_coredump_walkmap(struct proc *, |
599 |
int (*)(struct uvm_coredump_state *), void *); |
600 |
int uvm_coredump_count_segs(struct proc *); |
601 |
void uvm_proc_exit(struct proc *); |
602 |
void uvm_lwp_exit(struct lwp *); |
603 |
void uvm_init_limits(struct proc *); |
604 |
bool uvm_kernacc(void *, size_t, vm_prot_t); |
605 |
__dead void uvm_scheduler(void); |
606 |
vaddr_t uvm_uarea_alloc(void);
|
607 |
void uvm_uarea_free(vaddr_t);
|
608 |
vaddr_t uvm_uarea_system_alloc(struct cpu_info *);
|
609 |
void uvm_uarea_system_free(vaddr_t);
|
610 |
vaddr_t uvm_lwp_getuarea(lwp_t *); |
611 |
void uvm_lwp_setuarea(lwp_t *, vaddr_t);
|
612 |
int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); |
613 |
void uvm_vsunlock(struct vmspace *, void *, size_t); |
614 |
void uvm_cpu_attach(struct cpu_info *); |
615 |
|
616 |
|
617 |
/* uvm_init.c */
|
618 |
void uvm_init(void); |
619 |
|
620 |
/* uvm_io.c */
|
621 |
int uvm_io(struct vm_map *, struct uio *); |
622 |
|
623 |
/* uvm_km.c */
|
624 |
vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t,
|
625 |
uvm_flag_t); |
626 |
void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, |
627 |
uvm_flag_t); |
628 |
|
629 |
struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, |
630 |
vaddr_t *, vsize_t, int, bool, |
631 |
struct vm_map *);
|
632 |
#ifdef _KERNEL
|
633 |
int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t,
|
634 |
vmem_addr_t *); |
635 |
void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t);
|
636 |
bool uvm_km_va_starved_p(void); |
637 |
#endif
|
638 |
|
639 |
/* uvm_map.c */
|
640 |
int uvm_map(struct vm_map *, vaddr_t *, vsize_t, |
641 |
struct uvm_object *, voff_t, vsize_t,
|
642 |
uvm_flag_t); |
643 |
int uvm_map_pageable(struct vm_map *, vaddr_t, |
644 |
vaddr_t, bool, int); |
645 |
int uvm_map_pageable_all(struct vm_map *, int, vsize_t); |
646 |
bool uvm_map_checkprot(struct vm_map *, vaddr_t, |
647 |
vaddr_t, vm_prot_t); |
648 |
int uvm_map_protect(struct vm_map *, vaddr_t, |
649 |
vaddr_t, vm_prot_t, bool);
|
650 |
struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool); |
651 |
void uvmspace_init(struct vmspace *, struct pmap *, |
652 |
vaddr_t, vaddr_t, bool);
|
653 |
void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool); |
654 |
void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool); |
655 |
struct vmspace *uvmspace_fork(struct vmspace *); |
656 |
void uvmspace_addref(struct vmspace *); |
657 |
void uvmspace_free(struct vmspace *); |
658 |
void uvmspace_share(struct proc *, struct proc *); |
659 |
void uvmspace_unshare(struct lwp *); |
660 |
|
661 |
void uvm_whatis(uintptr_t, void (*)(const char *, ...)); |
662 |
|
663 |
/* uvm_meter.c */
|
664 |
int uvm_sysctl(int *, u_int, void *, size_t *, |
665 |
void *, size_t, struct proc *); |
666 |
int uvm_pctparam_check(struct uvm_pctparam *, int); |
667 |
void uvm_pctparam_set(struct uvm_pctparam *, int); |
668 |
int uvm_pctparam_get(struct uvm_pctparam *); |
669 |
void uvm_pctparam_init(struct uvm_pctparam *, int, |
670 |
int (*)(struct uvm_pctparam *, int)); |
671 |
int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, |
672 |
const char *, const char *); |
673 |
|
674 |
/* uvm_mmap.c */
|
675 |
int uvm_mmap_dev(struct proc *, void **, size_t, dev_t, |
676 |
off_t); |
677 |
int uvm_mmap_anon(struct proc *, void **, size_t); |
678 |
vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t);
|
679 |
|
680 |
/* uvm_mremap.c */
|
681 |
int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, |
682 |
struct vm_map *, vaddr_t *, vsize_t,
|
683 |
struct proc *, int); |
684 |
|
685 |
/* uvm_object.c */
|
686 |
void uvm_obj_init(struct uvm_object *, |
687 |
const struct uvm_pagerops *, bool, u_int); |
688 |
void uvm_obj_setlock(struct uvm_object *, kmutex_t *); |
689 |
void uvm_obj_destroy(struct uvm_object *, bool); |
690 |
int uvm_obj_wirepages(struct uvm_object *, off_t, off_t, |
691 |
struct pglist *);
|
692 |
void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); |
693 |
|
694 |
/* uvm_page.c */
|
695 |
struct vm_page *uvm_pagealloc_strat(struct uvm_object *, |
696 |
voff_t, struct vm_anon *, int, int, int); |
697 |
#define uvm_pagealloc(obj, off, anon, flags) \
|
698 |
uvm_pagealloc_strat((obj), (off), (anon), (flags), \ |
699 |
UVM_PGA_STRAT_NORMAL, 0)
|
700 |
void uvm_pagereplace(struct vm_page *, |
701 |
struct vm_page *);
|
702 |
void uvm_pagerealloc(struct vm_page *, |
703 |
struct uvm_object *, voff_t);
|
704 |
/* Actually, uvm_page_physload takes PF#s which need their own type */
|
705 |
void uvm_page_physload(paddr_t, paddr_t, paddr_t,
|
706 |
paddr_t, int);
|
707 |
void uvm_setpagesize(void); |
708 |
|
709 |
/* uvm_pager.c */
|
710 |
void uvm_aio_biodone(struct buf *); |
711 |
void uvm_aio_aiodone(struct buf *); |
712 |
void uvm_aio_aiodone_pages(struct vm_page **, int, bool, |
713 |
int);
|
714 |
|
715 |
/* uvm_pdaemon.c */
|
716 |
void uvm_pageout(void *); |
717 |
struct work;
|
718 |
void uvm_aiodone_worker(struct work *, void *); |
719 |
void uvm_pageout_start(int); |
720 |
void uvm_pageout_done(int); |
721 |
void uvm_estimatepageable(int *, int *); |
722 |
|
723 |
/* uvm_pglist.c */
|
724 |
int uvm_pglistalloc(psize_t, paddr_t, paddr_t,
|
725 |
paddr_t, paddr_t, struct pglist *, int, int); |
726 |
void uvm_pglistfree(struct pglist *); |
727 |
|
728 |
/* uvm_swap.c */
|
729 |
void uvm_swap_init(void); |
730 |
|
731 |
/* uvm_unix.c */
|
732 |
int uvm_grow(struct proc *, vaddr_t); |
733 |
|
734 |
/* uvm_user.c */
|
735 |
void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t); |
736 |
|
737 |
/* uvm_vnode.c */
|
738 |
void uvm_vnp_setsize(struct vnode *, voff_t); |
739 |
void uvm_vnp_setwritesize(struct vnode *, voff_t); |
740 |
int uvn_findpages(struct uvm_object *, voff_t, |
741 |
int *, struct vm_page **, int); |
742 |
bool uvn_text_p(struct uvm_object *); |
743 |
bool uvn_clean_p(struct uvm_object *); |
744 |
bool uvn_needs_writefault_p(struct uvm_object *); |
745 |
|
746 |
/* kern_malloc.c */
|
747 |
void kmeminit_nkmempages(void); |
748 |
extern int nkmempages; |
749 |
|
750 |
#endif /* _KERNEL */ |
751 |
|
752 |
#endif /* _UVM_UVM_EXTERN_H_ */ |