vm_extern.h 6.88 KB
Newer Older
Rodney W. Grimes's avatar
Rodney W. Grimes committed
1
/*-
2
3
 * SPDX-License-Identifier: BSD-3-Clause
 *
Rodney W. Grimes's avatar
Rodney W. Grimes committed
4
5
6
7
8
9
10
11
12
13
14
 * Copyright (c) 1992, 1993
 *	The Regents of the University of California.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
Warner Losh's avatar
Warner Losh committed
15
 * 3. Neither the name of the University nor the names of its contributors
Rodney W. Grimes's avatar
Rodney W. Grimes committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	@(#)vm_extern.h	8.2 (Berkeley) 1/12/94
Peter Wemm's avatar
Peter Wemm committed
32
 * $FreeBSD$
Rodney W. Grimes's avatar
Rodney W. Grimes committed
33
34
 */

35
36
#ifndef _VM_EXTERN_H_
#define	_VM_EXTERN_H_
Paul Richards's avatar
Paul Richards committed
37

38
struct pmap;
Rodney W. Grimes's avatar
Rodney W. Grimes committed
39
40
41
struct proc;
struct vmspace;
struct vnode;
42
struct vmem;
Rodney W. Grimes's avatar
Rodney W. Grimes committed
43

44
#ifdef _KERNEL
45
46
struct cdev;
struct cdevsw;
47
struct domainset;
48

49
50
51
52
53
54
55
56
57
/* These operate on kernel virtual addresses only. */
vm_offset_t kva_alloc(vm_size_t);
void kva_free(vm_offset_t, vm_size_t);

/* These operate on pageable virtual addresses. */
vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t);
void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);

/* These operate on virtual addresses backed by memory. */
58
vm_offset_t kmem_alloc_attr(vm_size_t size, int flags,
59
    vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
60
61
vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
    int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
62
vm_offset_t kmem_alloc_contig(vm_size_t size, int flags,
63
    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
64
    vm_memattr_t memattr);
65
66
67
vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
    int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment,
    vm_paddr_t boundary, vm_memattr_t memattr);
68
vm_offset_t kmem_malloc(vm_size_t size, int flags);
69
70
vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
    int flags);
71
void kmem_free(vm_offset_t addr, vm_size_t size);
72
73
74

/* This provides memory for previously allocated address space. */
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
75
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
76
77
78
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);

/* Bootstrapping. */
79
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
Mark Johnston's avatar
Mark Johnston committed
80
81
void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
    bool);
82
83
84
85
86
void kmem_init(vm_offset_t, vm_offset_t);
void kmem_init_zero_region(void);
void kmeminit(void);

int kernacc(void *, int, int);
87
int useracc(void *, int, int);
88
89
int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
    int fault_flags, vm_page_t *m_hold);
90
91
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
    vm_ooffset_t *);
92
93
int vm_fault_disable_pagefaults(void);
void vm_fault_enable_pagefaults(int save);
94
95
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
    vm_prot_t prot, vm_page_t *ma, int max_count);
96
97
int vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
    int fault_flags, int *signo, int *ucode);
98
99
int vm_forkproc(struct thread *, struct proc *, struct thread *,
    struct vmspace *, int);
Alfred Perlstein's avatar
Alfred Perlstein committed
100
void vm_waitproc(struct proc *);
101
102
103
104
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int,
    objtype_t, void *, vm_ooffset_t);
int vm_mmap_object(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t,
    vm_prot_t, int, vm_object_t, vm_ooffset_t, boolean_t, struct thread *);
105
int vm_mmap_to_errno(int rv);
106
107
108
109
int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
    int *, struct cdev *, struct cdevsw *, vm_ooffset_t *, vm_object_t *);
int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *,
    struct vnode *, vm_ooffset_t *, vm_object_t *, boolean_t *);
Alfred Perlstein's avatar
Alfred Perlstein committed
110
void vm_set_page_size(void);
111
void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t);
112
113
typedef int (*pmap_pinit_t)(struct pmap *pmap);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t, pmap_pinit_t);
114
struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
115
116
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
117
118
void vmspace_exit(struct thread *);
struct vmspace *vmspace_acquire_ref(struct proc *);
Alfred Perlstein's avatar
Alfred Perlstein committed
119
120
void vmspace_free(struct vmspace *);
void vmspace_exitfree(struct proc *);
121
void vmspace_switch_aio(struct vmspace *);
Alfred Perlstein's avatar
Alfred Perlstein committed
122
void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
Ka Ho Ng's avatar
Ka Ho Ng committed
123
void vnode_pager_purge_range(struct vnode *, vm_ooffset_t, vm_ooffset_t);
124
125
int vslock(void *, size_t);
void vsunlock(void *, size_t);
126
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
Attilio Rao's avatar
Attilio Rao committed
127
void vm_imgact_unmap_page(struct sf_buf *sf);
128
void vm_thread_dispose(struct thread *td);
129
int vm_thread_new(struct thread *td, int pages);
130
131
void vm_thread_stack_back(struct domainset *ds, vm_offset_t kaddr,
    vm_page_t ma[], int npages, int req_class);
132
133
134
135
u_int vm_active_count(void);
u_int vm_inactive_count(void);
u_int vm_laundry_count(void);
u_int vm_wait_count(void);
136
137
138
139
140
141
142

/*
 * Is pa a multiple of alignment, which is a power-of-two?
 */
static inline bool
vm_addr_align_ok(vm_paddr_t pa, u_long alignment)
{
143
144
145
146
147
#ifdef INVARIANTS
	if (!powerof2(alignment))
		panic("%s: alignment is not a power of 2: %#lx",
		    __func__, alignment);
#endif
148
149
150
151
152
153
154
155
156
157
	return ((pa & (alignment - 1)) == 0);
}

/*
 * Do the first and last addresses of a range match in all bits except the ones
 * in -boundary (a power-of-two)?  For boundary == 0, all addresses match.
 */
static inline bool
vm_addr_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
{
158
159
160
161
162
#ifdef INVARIANTS
	if (!powerof2(boundary))
		panic("%s: boundary is not a power of 2: %#jx",
		    __func__, (uintmax_t)boundary);
#endif
163
164
165
166
167
168
169
170
171
172
	return (((pa ^ (pa + size - 1)) & -boundary) == 0);
}

static inline bool
vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
    vm_paddr_t boundary)
{
	return (vm_addr_align_ok(pa, alignment) &&
	    vm_addr_bound_ok(pa, size, boundary));
}
173
#endif				/* _KERNEL */
174
#endif				/* !_VM_EXTERN_H_ */