root / lab4 / .minix-src / include / uvm / uvm_map.h @ 14
History | View | Annotate | Download (11.5 KB)
1 | 13 | up20180614 | /* $NetBSD: uvm_map.h,v 1.72 2012/10/29 16:00:05 para Exp $ */
|
---|---|---|---|
2 | |||
3 | /*
|
||
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||
5 | * Copyright (c) 1991, 1993, The Regents of the University of California.
|
||
6 | *
|
||
7 | * All rights reserved.
|
||
8 | *
|
||
9 | * This code is derived from software contributed to Berkeley by
|
||
10 | * The Mach Operating System project at Carnegie-Mellon University.
|
||
11 | *
|
||
12 | * Redistribution and use in source and binary forms, with or without
|
||
13 | * modification, are permitted provided that the following conditions
|
||
14 | * are met:
|
||
15 | * 1. Redistributions of source code must retain the above copyright
|
||
16 | * notice, this list of conditions and the following disclaimer.
|
||
17 | * 2. Redistributions in binary form must reproduce the above copyright
|
||
18 | * notice, this list of conditions and the following disclaimer in the
|
||
19 | * documentation and/or other materials provided with the distribution.
|
||
20 | * 3. Neither the name of the University nor the names of its contributors
|
||
21 | * may be used to endorse or promote products derived from this software
|
||
22 | * without specific prior written permission.
|
||
23 | *
|
||
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||
34 | * SUCH DAMAGE.
|
||
35 | *
|
||
36 | * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
|
||
37 | * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
|
||
38 | *
|
||
39 | *
|
||
40 | * Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||
41 | * All rights reserved.
|
||
42 | *
|
||
43 | * Permission to use, copy, modify and distribute this software and
|
||
44 | * its documentation is hereby granted, provided that both the copyright
|
||
45 | * notice and this permission notice appear in all copies of the
|
||
46 | * software, derivative works or modified versions, and any portions
|
||
47 | * thereof, and that both notices appear in supporting documentation.
|
||
48 | *
|
||
49 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
||
50 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
||
51 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
||
52 | *
|
||
53 | * Carnegie Mellon requests users of this software to return to
|
||
54 | *
|
||
55 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
||
56 | * School of Computer Science
|
||
57 | * Carnegie Mellon University
|
||
58 | * Pittsburgh PA 15213-3890
|
||
59 | *
|
||
60 | * any improvements or extensions that they make and grant Carnegie the
|
||
61 | * rights to redistribute these changes.
|
||
62 | */
|
||
63 | |||
64 | #ifndef _UVM_UVM_MAP_H_
|
||
65 | #define _UVM_UVM_MAP_H_
|
||
66 | |||
67 | /*
|
||
68 | * uvm_map.h
|
||
69 | */
|
||
70 | |||
71 | #ifdef _KERNEL
|
||
72 | |||
73 | /*
|
||
74 | * macros
|
||
75 | */
|
||
76 | |||
77 | /*
|
||
78 | * UVM_MAP_CLIP_START: ensure that the entry begins at or after
|
||
79 | * the starting address, if it doesn't we split the entry.
|
||
80 | *
|
||
81 | * => map must be locked by caller
|
||
82 | */
|
||
83 | |||
84 | #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
|
||
85 | if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
|
||
86 | uvm_map_clip_start(MAP,ENTRY,VA); \ |
||
87 | } \ |
||
88 | } |
||
89 | |||
90 | /*
|
||
91 | * UVM_MAP_CLIP_END: ensure that the entry ends at or before
|
||
92 | * the ending address, if it does't we split the entry.
|
||
93 | *
|
||
94 | * => map must be locked by caller
|
||
95 | */
|
||
96 | |||
97 | #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
|
||
98 | if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
|
||
99 | uvm_map_clip_end(MAP,ENTRY,VA); \ |
||
100 | } \ |
||
101 | } |
||
102 | |||
103 | /*
|
||
104 | * extract flags
|
||
105 | */
|
||
106 | #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ |
||
107 | #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ |
||
108 | #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ |
||
109 | #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ |
||
110 | #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ |
||
111 | |||
112 | #endif /* _KERNEL */ |
||
113 | |||
114 | #include <sys/rbtree.h> |
||
115 | #include <sys/pool.h> |
||
116 | #include <sys/rwlock.h> |
||
117 | #include <sys/mutex.h> |
||
118 | #include <sys/condvar.h> |
||
119 | |||
120 | #include <uvm/uvm_anon.h> |
||
121 | |||
122 | /*
|
||
123 | * Address map entries consist of start and end addresses,
|
||
124 | * a VM object (or sharing map) and offset into that object,
|
||
125 | * and user-exported inheritance and protection information.
|
||
126 | * Also included is control information for virtual copy operations.
|
||
127 | */
|
||
128 | struct vm_map_entry {
|
||
129 | struct rb_node rb_node; /* tree information */ |
||
130 | vsize_t gap; /* free space after */
|
||
131 | vsize_t maxgap; /* space in subtree */
|
||
132 | struct vm_map_entry *prev; /* previous entry */ |
||
133 | struct vm_map_entry *next; /* next entry */ |
||
134 | vaddr_t start; /* start address */
|
||
135 | vaddr_t end; /* end address */
|
||
136 | union {
|
||
137 | struct uvm_object *uvm_obj; /* uvm object */ |
||
138 | struct vm_map *sub_map; /* belongs to another map */ |
||
139 | } object; /* object I point to */
|
||
140 | voff_t offset; /* offset into object */
|
||
141 | int etype; /* entry type */ |
||
142 | vm_prot_t protection; /* protection code */
|
||
143 | vm_prot_t max_protection; /* maximum protection */
|
||
144 | vm_inherit_t inheritance; /* inheritance */
|
||
145 | int wired_count; /* can be paged if == 0 */ |
||
146 | struct vm_aref aref; /* anonymous overlay */ |
||
147 | int advice; /* madvise advice */ |
||
148 | uint32_t map_attrib; /* uvm-external map attributes */
|
||
149 | #define uvm_map_entry_stop_copy flags
|
||
150 | u_int8_t flags; /* flags */
|
||
151 | |||
152 | #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ |
||
153 | #define UVM_MAP_STATIC 0x04 /* special static entries */ |
||
154 | #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ |
||
155 | |||
156 | }; |
||
157 | |||
158 | #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) |
||
159 | |||
160 | /*
|
||
161 | * Maps are doubly-linked lists of map entries, kept sorted
|
||
162 | * by address. A single hint is provided to start
|
||
163 | * searches again from the last successful search,
|
||
164 | * insertion, or removal.
|
||
165 | *
|
||
166 | * LOCKING PROTOCOL NOTES:
|
||
167 | * -----------------------
|
||
168 | *
|
||
169 | * VM map locking is a little complicated. There are both shared
|
||
170 | * and exclusive locks on maps. However, it is sometimes required
|
||
171 | * to downgrade an exclusive lock to a shared lock, and upgrade to
|
||
172 | * an exclusive lock again (to perform error recovery). However,
|
||
173 | * another thread *must not* queue itself to receive an exclusive
|
||
174 | * lock while before we upgrade back to exclusive, otherwise the
|
||
175 | * error recovery becomes extremely difficult, if not impossible.
|
||
176 | *
|
||
177 | * In order to prevent this scenario, we introduce the notion of
|
||
178 | * a `busy' map. A `busy' map is read-locked, but other threads
|
||
179 | * attempting to write-lock wait for this flag to clear before
|
||
180 | * entering the lock manager. A map may only be marked busy
|
||
181 | * when the map is write-locked (and then the map must be downgraded
|
||
182 | * to read-locked), and may only be marked unbusy by the thread
|
||
183 | * which marked it busy (holding *either* a read-lock or a
|
||
184 | * write-lock, the latter being gained by an upgrade).
|
||
185 | *
|
||
186 | * Access to the map `flags' member is controlled by the `flags_lock'
|
||
187 | * simple lock. Note that some flags are static (set once at map
|
||
188 | * creation time, and never changed), and thus require no locking
|
||
189 | * to check those flags. All flags which are r/w must be set or
|
||
190 | * cleared while the `flags_lock' is asserted. Additional locking
|
||
191 | * requirements are:
|
||
192 | *
|
||
193 | * VM_MAP_PAGEABLE r/o static flag; no locking required
|
||
194 | *
|
||
195 | * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
|
||
196 | * map is write-locked. may be tested
|
||
197 | * without asserting `flags_lock'.
|
||
198 | *
|
||
199 | * VM_MAP_DYING r/o; set when a vmspace is being
|
||
200 | * destroyed to indicate that updates
|
||
201 | * to the pmap can be skipped.
|
||
202 | *
|
||
203 | * VM_MAP_TOPDOWN r/o; set when the vmspace is
|
||
204 | * created if the unspecified map
|
||
205 | * allocations are to be arranged in
|
||
206 | * a "top down" manner.
|
||
207 | */
|
||
208 | struct vm_map {
|
||
209 | struct pmap * pmap; /* Physical map */ |
||
210 | krwlock_t lock; /* Non-intrsafe lock */
|
||
211 | struct lwp * busy; /* LWP holding map busy */ |
||
212 | kmutex_t misc_lock; /* Lock for ref_count, cv */
|
||
213 | kcondvar_t cv; /* For signalling */
|
||
214 | int flags; /* flags */ |
||
215 | struct rb_tree rb_tree; /* Tree for entries */ |
||
216 | struct vm_map_entry header; /* List of entries */ |
||
217 | int nentries; /* Number of entries */ |
||
218 | vsize_t size; /* virtual size */
|
||
219 | int ref_count; /* Reference count */ |
||
220 | struct vm_map_entry * hint; /* hint for quick lookups */ |
||
221 | struct vm_map_entry * first_free; /* First free space hint */ |
||
222 | unsigned int timestamp; /* Version number */ |
||
223 | }; |
||
224 | |||
225 | #if defined(_KERNEL)
|
||
226 | |||
227 | #include <sys/callback.h> |
||
228 | |||
229 | #endif /* defined(_KERNEL) */ |
||
230 | |||
231 | #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel())
|
||
232 | |||
233 | /* vm_map flags */
|
||
234 | #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ |
||
235 | #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ |
||
236 | #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ |
||
237 | #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */ |
||
238 | #define VM_MAP_WANTVA 0x100 /* rw: want va */ |
||
239 | |||
240 | #ifdef _KERNEL
|
||
241 | struct uvm_map_args {
|
||
242 | struct vm_map_entry *uma_prev;
|
||
243 | |||
244 | vaddr_t uma_start; |
||
245 | vsize_t uma_size; |
||
246 | |||
247 | struct uvm_object *uma_uobj;
|
||
248 | voff_t uma_uoffset; |
||
249 | |||
250 | uvm_flag_t uma_flags; |
||
251 | }; |
||
252 | #endif /* _KERNEL */ |
||
253 | |||
254 | /*
|
||
255 | * globals:
|
||
256 | */
|
||
257 | |||
258 | #ifdef _KERNEL
|
||
259 | |||
260 | #include <sys/proc.h> |
||
261 | |||
262 | #ifdef PMAP_GROWKERNEL
|
||
263 | extern vaddr_t uvm_maxkaddr;
|
||
264 | #endif
|
||
265 | |||
266 | /*
|
||
267 | * protos: the following prototypes define the interface to vm_map
|
||
268 | */
|
||
269 | |||
270 | void uvm_map_deallocate(struct vm_map *); |
||
271 | |||
272 | int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); |
||
273 | int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); |
||
274 | void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, |
||
275 | vaddr_t); |
||
276 | void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, |
||
277 | vaddr_t); |
||
278 | int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, |
||
279 | struct vm_map *, vaddr_t *, int); |
||
280 | struct vm_map_entry *
|
||
281 | uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
|
||
282 | vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); |
||
283 | int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, |
||
284 | vm_inherit_t); |
||
285 | int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); |
||
286 | void uvm_map_init(void); |
||
287 | void uvm_map_init_caches(void); |
||
288 | bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, |
||
289 | struct vm_map_entry **);
|
||
290 | void uvm_map_reference(struct vm_map *); |
||
291 | int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, |
||
292 | vaddr_t *, uvm_flag_t); |
||
293 | void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); |
||
294 | int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, |
||
295 | struct vm_map *);
|
||
296 | void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); |
||
297 | #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) |
||
298 | void uvm_unmap_detach(struct vm_map_entry *,int); |
||
299 | void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, |
||
300 | struct vm_map_entry **, int); |
||
301 | |||
302 | int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, |
||
303 | struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
|
||
304 | struct uvm_map_args *);
|
||
305 | int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, |
||
306 | struct vm_map_entry *);
|
||
307 | |||
308 | int uvm_mapent_trymerge(struct vm_map *, |
||
309 | struct vm_map_entry *, int); |
||
310 | #define UVM_MERGE_COPYING 1 |
||
311 | |||
312 | bool vm_map_starved_p(struct vm_map *); |
||
313 | |||
314 | /*
|
||
315 | * VM map locking operations.
|
||
316 | */
|
||
317 | |||
318 | bool vm_map_lock_try(struct vm_map *); |
||
319 | void vm_map_lock(struct vm_map *); |
||
320 | void vm_map_unlock(struct vm_map *); |
||
321 | void vm_map_unbusy(struct vm_map *); |
||
322 | void vm_map_lock_read(struct vm_map *); |
||
323 | void vm_map_unlock_read(struct vm_map *); |
||
324 | void vm_map_busy(struct vm_map *); |
||
325 | bool vm_map_locked_p(struct vm_map *); |
||
326 | |||
327 | void uvm_map_lock_entry(struct vm_map_entry *); |
||
328 | void uvm_map_unlock_entry(struct vm_map_entry *); |
||
329 | |||
330 | #endif /* _KERNEL */ |
||
331 | |||
332 | /*
|
||
333 | * Functions implemented as macros
|
||
334 | */
|
||
335 | #define vm_map_min(map) ((map)->header.end)
|
||
336 | #define vm_map_max(map) ((map)->header.start)
|
||
337 | #define vm_map_setmin(map, v) ((map)->header.end = (v))
|
||
338 | #define vm_map_setmax(map, v) ((map)->header.start = (v))
|
||
339 | |||
340 | #define vm_map_pmap(map) ((map)->pmap)
|
||
341 | |||
342 | #endif /* _UVM_UVM_MAP_H_ */ |