root / lab4 / .minix-src / include / i386 / profile.h @ 14
History | View | Annotate | Download (3.86 KB)
1 | 13 | up20180614 | /* $NetBSD: profile.h,v 1.33 2007/12/20 23:46:13 ad Exp $ */
|
---|---|---|---|
2 | |||
3 | /*
|
||
4 | * Copyright (c) 1992, 1993
|
||
5 | * The Regents of the University of California. All rights reserved.
|
||
6 | *
|
||
7 | * Redistribution and use in source and binary forms, with or without
|
||
8 | * modification, are permitted provided that the following conditions
|
||
9 | * are met:
|
||
10 | * 1. Redistributions of source code must retain the above copyright
|
||
11 | * notice, this list of conditions and the following disclaimer.
|
||
12 | * 2. Redistributions in binary form must reproduce the above copyright
|
||
13 | * notice, this list of conditions and the following disclaimer in the
|
||
14 | * documentation and/or other materials provided with the distribution.
|
||
15 | * 3. Neither the name of the University nor the names of its contributors
|
||
16 | * may be used to endorse or promote products derived from this software
|
||
17 | * without specific prior written permission.
|
||
18 | *
|
||
19 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||
29 | * SUCH DAMAGE.
|
||
30 | *
|
||
31 | * @(#)profile.h 8.1 (Berkeley) 6/11/93
|
||
32 | */
|
||
33 | |||
34 | #ifdef _KERNEL_OPT
|
||
35 | #include "opt_multiprocessor.h" |
||
36 | #endif
|
||
37 | |||
38 | #ifdef _KERNEL
|
||
39 | #include <machine/cpufunc.h> |
||
40 | #include <machine/lock.h> |
||
41 | #endif
|
||
42 | |||
43 | #define _MCOUNT_DECL static __inline void _mcount |
||
44 | |||
45 | #ifdef __ELF__
|
||
46 | #define MCOUNT_ENTRY "__mcount" |
||
47 | #define MCOUNT_COMPAT __weak_alias(mcount, __mcount)
|
||
48 | #else
|
||
49 | #define MCOUNT_ENTRY "mcount" |
||
50 | #define MCOUNT_COMPAT /* nothing */ |
||
51 | #endif
|
||
52 | |||
53 | #define MCOUNT \
|
||
54 | MCOUNT_COMPAT \ |
||
55 | extern void mcount(void) __asm(MCOUNT_ENTRY) \ |
||
56 | __attribute__((__no_instrument_function__)); \ |
||
57 | void \
|
||
58 | mcount(void) \
|
||
59 | { \ |
||
60 | int selfpc, frompcindex; \
|
||
61 | int eax, ecx, edx; \
|
||
62 | \ |
||
63 | __asm volatile("movl %%eax,%0" : "=g" (eax)); \ |
||
64 | __asm volatile("movl %%ecx,%0" : "=g" (ecx)); \ |
||
65 | __asm volatile("movl %%edx,%0" : "=g" (edx)); \ |
||
66 | /* \
|
||
67 | * find the return address for mcount, \
|
||
68 | * and the return address for mcount's caller. \
|
||
69 | * \
|
||
70 | * selfpc = pc pushed by mcount call \
|
||
71 | */ \
|
||
72 | __asm volatile("movl 4(%%ebp),%0" : "=r" (selfpc)); \ |
||
73 | /* \
|
||
74 | * frompcindex = pc pushed by call into self. \
|
||
75 | */ \
|
||
76 | __asm volatile("movl (%%ebp),%0;movl 4(%0),%0" \ |
||
77 | : "=r" (frompcindex)); \
|
||
78 | _mcount((u_long)frompcindex, (u_long)selfpc); \ |
||
79 | \ |
||
80 | __asm volatile("movl %0,%%edx" : : "g" (edx)); \ |
||
81 | __asm volatile("movl %0,%%ecx" : : "g" (ecx)); \ |
||
82 | __asm volatile("movl %0,%%eax" : : "g" (eax)); \ |
||
83 | } |
||
84 | |||
85 | #ifdef _KERNEL
|
||
86 | #ifdef MULTIPROCESSOR
|
||
87 | __cpu_simple_lock_t __mcount_lock; |
||
88 | |||
89 | static inline void |
||
90 | MCOUNT_ENTER_MP(void)
|
||
91 | { |
||
92 | __cpu_simple_lock(&__mcount_lock); |
||
93 | __insn_barrier(); |
||
94 | } |
||
95 | |||
96 | static inline void |
||
97 | MCOUNT_EXIT_MP(void)
|
||
98 | { |
||
99 | __insn_barrier(); |
||
100 | __mcount_lock = __SIMPLELOCK_UNLOCKED; |
||
101 | } |
||
102 | #else
|
||
103 | #define MCOUNT_ENTER_MP()
|
||
104 | #define MCOUNT_EXIT_MP()
|
||
105 | #endif
|
||
106 | |||
107 | static inline void |
||
108 | mcount_disable_intr(void)
|
||
109 | { |
||
110 | __asm volatile("cli"); |
||
111 | } |
||
112 | |||
113 | static inline u_long |
||
114 | mcount_read_psl(void)
|
||
115 | { |
||
116 | u_long ef; |
||
117 | |||
118 | __asm volatile("pushfl; popl %0" : "=r" (ef)); |
||
119 | return (ef);
|
||
120 | } |
||
121 | |||
122 | static inline void |
||
123 | mcount_write_psl(u_long ef) |
||
124 | { |
||
125 | __asm volatile("pushl %0; popfl" : : "r" (ef)); |
||
126 | } |
||
127 | |||
128 | #define MCOUNT_ENTER \
|
||
129 | s = (int)mcount_read_psl(); \
|
||
130 | mcount_disable_intr(); \ |
||
131 | MCOUNT_ENTER_MP(); |
||
132 | |||
133 | #define MCOUNT_EXIT \
|
||
134 | MCOUNT_EXIT_MP(); \ |
||
135 | mcount_write_psl(s); |
||
136 | |||
137 | #endif /* _KERNEL */ |