root / lab4 / .minix-src / include / uvm / uvm_amap.h @ 14
History | View | Annotate | Download (9.79 KB)
1 |
/* $NetBSD: uvm_amap.h,v 1.37 2011/06/12 03:36:02 rmind Exp $ */
|
---|---|
2 |
|
3 |
/*
|
4 |
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
5 |
* All rights reserved.
|
6 |
*
|
7 |
* Redistribution and use in source and binary forms, with or without
|
8 |
* modification, are permitted provided that the following conditions
|
9 |
* are met:
|
10 |
* 1. Redistributions of source code must retain the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer.
|
12 |
* 2. Redistributions in binary form must reproduce the above copyright
|
13 |
* notice, this list of conditions and the following disclaimer in the
|
14 |
* documentation and/or other materials provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
17 |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
18 |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
19 |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
20 |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
21 |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
22 |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
23 |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
25 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
#ifndef _UVM_UVM_AMAP_H_
|
29 |
#define _UVM_UVM_AMAP_H_
|
30 |
|
31 |
/*
|
32 |
* uvm_amap.h: general amap interface and amap implementation-specific info
|
33 |
*/
|
34 |
|
35 |
/*
|
36 |
* an amap structure contains pointers to a set of anons that are
|
37 |
* mapped together in virtual memory (an anon is a single page of
|
38 |
* anonymous virtual memory -- see uvm_anon.h). in uvm we hide the
|
39 |
* details of the implementation of amaps behind a general amap
|
40 |
* interface. this allows us to change the amap implementation
|
41 |
* without having to touch the rest of the code. this file is divided
|
42 |
* into two parts: the definition of the uvm amap interface and the
|
43 |
* amap implementation-specific definitions.
|
44 |
*/
|
45 |
|
46 |
#ifdef _KERNEL
|
47 |
|
48 |
/*
|
49 |
* part 1: amap interface
|
50 |
*/
|
51 |
|
52 |
void uvm_amap_init(void); |
53 |
|
54 |
/*
|
55 |
* forward definition of vm_amap structure. only amap
|
56 |
* implementation-specific code should directly access the fields of
|
57 |
* this structure.
|
58 |
*/
|
59 |
|
60 |
struct vm_amap;
|
61 |
|
62 |
|
63 |
/*
|
64 |
* prototypes for the amap interface
|
65 |
*/
|
66 |
|
67 |
void amap_add /* add an anon to an amap */ |
68 |
(struct vm_aref *, vaddr_t,
|
69 |
struct vm_anon *, bool); |
70 |
struct vm_amap *amap_alloc /* allocate a new amap */ |
71 |
(vaddr_t, vaddr_t, int);
|
72 |
void amap_copy /* clear amap needs-copy flag */ |
73 |
(struct vm_map *, struct vm_map_entry *, int, |
74 |
vaddr_t, vaddr_t); |
75 |
void amap_cow_now /* resolve all COW faults now */ |
76 |
(struct vm_map *, struct vm_map_entry *); |
77 |
int amap_extend /* make amap larger */ |
78 |
(struct vm_map_entry *, vsize_t, int); |
79 |
int amap_flags /* get amap's flags */ |
80 |
(struct vm_amap *);
|
81 |
void amap_free /* free amap */ |
82 |
(struct vm_amap *);
|
83 |
void amap_lock /* lock amap */ |
84 |
(struct vm_amap *);
|
85 |
struct vm_anon *amap_lookup /* lookup an anon @ offset in amap */ |
86 |
(struct vm_aref *, vaddr_t);
|
87 |
void amap_lookups /* lookup multiple anons */ |
88 |
(struct vm_aref *, vaddr_t,
|
89 |
struct vm_anon **, int); |
90 |
void amap_ref /* add a reference to an amap */ |
91 |
(struct vm_amap *, vaddr_t, vsize_t, int); |
92 |
int amap_refs /* get number of references of amap */ |
93 |
(struct vm_amap *);
|
94 |
void amap_share_protect /* protect pages in a shared amap */ |
95 |
(struct vm_map_entry *, vm_prot_t);
|
96 |
void amap_splitref /* split reference to amap into two */ |
97 |
(struct vm_aref *, struct vm_aref *, vaddr_t); |
98 |
void amap_unadd /* remove an anon from an amap */ |
99 |
(struct vm_aref *, vaddr_t);
|
100 |
void amap_unlock /* unlock amap */ |
101 |
(struct vm_amap *);
|
102 |
void amap_unref /* drop reference to an amap */ |
103 |
(struct vm_amap *, vaddr_t, vsize_t, bool); |
104 |
void amap_wipeout /* remove all anons from amap */ |
105 |
(struct vm_amap *);
|
106 |
bool amap_swap_off
|
107 |
(int, int); |
108 |
|
109 |
/*
|
110 |
* amap flag values
|
111 |
*/
|
112 |
|
113 |
#define AMAP_SHARED 0x1 /* amap is shared */ |
114 |
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */ |
115 |
#define AMAP_SWAPOFF 0x4 /* amap_swap_off() is in progress */ |
116 |
|
117 |
/*
|
118 |
* amap_copy flags
|
119 |
*/
|
120 |
|
121 |
#define AMAP_COPY_NOWAIT 0x02 /* not allowed to sleep */ |
122 |
#define AMAP_COPY_NOCHUNK 0x04 /* not allowed to chunk */ |
123 |
#define AMAP_COPY_NOMERGE 0x08 /* not allowed to merge */ |
124 |
|
125 |
/*
|
126 |
* amap_extend flags
|
127 |
*/
|
128 |
#define AMAP_EXTEND_BACKWARDS 0x00 /* add "size" to start of map */ |
129 |
#define AMAP_EXTEND_FORWARDS 0x01 /* add "size" to end of map */ |
130 |
#define AMAP_EXTEND_NOWAIT 0x02 /* not allowed to sleep */ |
131 |
|
132 |
#endif /* _KERNEL */ |
133 |
|
134 |
/**********************************************************************/
|
135 |
|
136 |
/*
|
137 |
* part 2: amap implementation-specific info
|
138 |
*/
|
139 |
|
140 |
/*
|
141 |
* we currently provide an array-based amap implementation. in this
|
142 |
* implementation we provide the option of tracking split references
|
143 |
* so that we don't lose track of references during partial unmaps
|
144 |
* ... this is enabled with the "UVM_AMAP_PPREF" define.
|
145 |
*/
|
146 |
|
147 |
#define UVM_AMAP_PPREF /* track partial references */ |
148 |
|
149 |
/*
|
150 |
* here is the definition of the vm_amap structure for this implementation.
|
151 |
*/
|
152 |
|
153 |
struct vm_amap {
|
154 |
kmutex_t *am_lock; /* lock [locks all vm_amap fields] */
|
155 |
int am_ref; /* reference count */ |
156 |
int am_flags; /* flags */ |
157 |
int am_maxslot; /* max # of slots allocated */ |
158 |
int am_nslot; /* # of slots currently in map ( <= maxslot) */ |
159 |
int am_nused; /* # of slots currently in use */ |
160 |
int *am_slots; /* contig array of active slots */ |
161 |
int *am_bckptr; /* back pointer array to am_slots */ |
162 |
struct vm_anon **am_anon; /* array of anonymous pages */ |
163 |
#ifdef UVM_AMAP_PPREF
|
164 |
int *am_ppref; /* per page reference count (if !NULL) */ |
165 |
#endif
|
166 |
LIST_ENTRY(vm_amap) am_list; |
167 |
}; |
168 |
|
169 |
/*
|
170 |
* note that am_slots, am_bckptr, and am_anon are arrays. this allows
|
171 |
* fast lookup of pages based on their virual address at the expense of
|
172 |
* some extra memory. in the future we should be smarter about memory
|
173 |
* usage and fall back to a non-array based implementation on systems
|
174 |
* that are short of memory (XXXCDC).
|
175 |
*
|
176 |
* the entries in the array are called slots... for example an amap that
|
177 |
* covers four pages of virtual memory is said to have four slots. here
|
178 |
* is an example of the array usage for a four slot amap. note that only
|
179 |
* slots one and three have anons assigned to them. "D/C" means that we
|
180 |
* "don't care" about the value.
|
181 |
*
|
182 |
* 0 1 2 3
|
183 |
* am_anon: NULL, anon0, NULL, anon1 (actual pointers to anons)
|
184 |
* am_bckptr: D/C, 1, D/C, 0 (points to am_slots entry)
|
185 |
*
|
186 |
* am_slots: 3, 1, D/C, D/C (says slots 3 and 1 are in use)
|
187 |
*
|
188 |
* note that am_bckptr is D/C if the slot in am_anon is set to NULL.
|
189 |
* to find the entry in am_slots for an anon, look at am_bckptr[slot],
|
190 |
* thus the entry for slot 3 in am_slots[] is at am_slots[am_bckptr[3]].
|
191 |
* in general, if am_anon[X] is non-NULL, then the following must be
|
192 |
* true: am_slots[am_bckptr[X]] == X
|
193 |
*
|
194 |
* note that am_slots is always contig-packed.
|
195 |
*/
|
196 |
|
197 |
/*
|
198 |
* defines for handling of large, sparse amaps:
|
199 |
*
|
200 |
* one of the problems of array-based amaps is that if you allocate a
|
201 |
* large, sparsely-used area of virtual memory you end up allocating
|
202 |
* large arrays that, for the most part, don't get used. this is a
|
203 |
* problem for BSD in that the kernel likes to make these types of
|
204 |
* allocations to "reserve" memory for possible future use.
|
205 |
*
|
206 |
* for example, the kernel allocates (reserves) a large chunk of user
|
207 |
* VM for possible stack growth. most of the time only a page or two
|
208 |
* of this VM is actually used. since the stack is anonymous memory
|
209 |
* it makes sense for it to live in an amap, but if we allocated an
|
210 |
* amap for the entire stack range we could end up wasting a large
|
211 |
* amount of allocated KVM.
|
212 |
*
|
213 |
* for example, on the i386 at boot time we allocate two amaps for the stack
|
214 |
* of /sbin/init:
|
215 |
* 1. a 7680 slot amap at protection 0 (reserve space for stack)
|
216 |
* 2. a 512 slot amap at protection 7 (top of stack)
|
217 |
*
|
218 |
* most of the array allocated for the amaps for this is never used.
|
219 |
* the amap interface provides a way for us to avoid this problem by
|
220 |
* allowing amap_copy() to break larger amaps up into smaller sized
|
221 |
* chunks (controlled by the "canchunk" option). we use this feature
|
222 |
* to reduce our memory usage with the BSD stack management. if we
|
223 |
* are asked to create an amap with more than UVM_AMAP_LARGE slots in it,
|
224 |
* we attempt to break it up into a UVM_AMAP_CHUNK sized amap if the
|
225 |
* "canchunk" flag is set.
|
226 |
*
|
227 |
* so, in the i386 example, the 7680 slot area is never referenced so
|
228 |
* nothing gets allocated (amap_copy is never called because the protection
|
229 |
* is zero). the 512 slot area for the top of the stack is referenced.
|
230 |
* the chunking code breaks it up into 16 slot chunks (hopefully a single
|
231 |
* 16 slot chunk is enough to handle the whole stack).
|
232 |
*/
|
233 |
|
234 |
#define UVM_AMAP_LARGE 256 /* # of slots in "large" amap */ |
235 |
#define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */ |
236 |
|
237 |
#ifdef _KERNEL
|
238 |
|
239 |
/*
|
240 |
* macros
|
241 |
*/
|
242 |
|
243 |
/* AMAP_B2SLOT: convert byte offset to slot */
|
244 |
#define AMAP_B2SLOT(S,B) { \
|
245 |
KASSERT(((B) & (PAGE_SIZE - 1)) == 0); \ |
246 |
(S) = (B) >> PAGE_SHIFT; \ |
247 |
} |
248 |
|
249 |
/*
|
250 |
* lock/unlock/refs/flags macros
|
251 |
*/
|
252 |
|
253 |
#define amap_flags(AMAP) ((AMAP)->am_flags)
|
254 |
#define amap_lock(AMAP) mutex_enter((AMAP)->am_lock)
|
255 |
#define amap_lock_try(AMAP) mutex_tryenter((AMAP)->am_lock)
|
256 |
#define amap_refs(AMAP) ((AMAP)->am_ref)
|
257 |
#define amap_unlock(AMAP) mutex_exit((AMAP)->am_lock)
|
258 |
|
259 |
/*
|
260 |
* if we enable PPREF, then we have a couple of extra functions that
|
261 |
* we need to prototype here...
|
262 |
*/
|
263 |
|
264 |
#ifdef UVM_AMAP_PPREF
|
265 |
|
266 |
#define PPREF_NONE ((int *) -1) /* not using ppref */ |
267 |
|
268 |
void amap_pp_adjref /* adjust references */ |
269 |
(struct vm_amap *, int, vsize_t, int, |
270 |
struct vm_anon **);
|
271 |
void amap_pp_establish /* establish ppref */ |
272 |
(struct vm_amap *, vaddr_t);
|
273 |
void amap_wiperange /* wipe part of an amap */ |
274 |
(struct vm_amap *, int, int, struct vm_anon **); |
275 |
#endif /* UVM_AMAP_PPREF */ |
276 |
|
277 |
#endif /* _KERNEL */ |
278 |
|
279 |
#endif /* _UVM_UVM_AMAP_H_ */ |