|
@@ -1,116 +1,108 @@
|
1
|
|
-/*
|
2
|
|
- * Copyright (C) 2010 Piotr Jaroszyński <p.jaroszynski@gmail.com>
|
3
|
|
- *
|
4
|
|
- * This program is free software; you can redistribute it and/or
|
5
|
|
- * modify it under the terms of the GNU General Public License as
|
6
|
|
- * published by the Free Software Foundation; either version 2 of the
|
7
|
|
- * License, or any later version.
|
8
|
|
- *
|
9
|
|
- * This program is distributed in the hope that it will be useful, but
|
10
|
|
- * WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
|
- * General Public License for more details.
|
13
|
|
- *
|
14
|
|
- * You should have received a copy of the GNU General Public License
|
15
|
|
- * along with this program; if not, write to the Free Software
|
16
|
|
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
17
|
|
- */
|
18
|
|
-
|
19
|
1
|
#ifndef _IPXE_LINUX_UACCESS_H
|
20
|
2
|
#define _IPXE_LINUX_UACCESS_H
|
21
|
3
|
|
22
|
|
-FILE_LICENCE(GPL2_OR_LATER);
|
23
|
|
-
|
24
|
4
|
/** @file
|
25
|
5
|
*
|
26
|
|
- * iPXE user access API for linux
|
|
6
|
+ * iPXE user access API for Linux
|
|
7
|
+ *
|
|
8
|
+ * We run with no distinction between internal and external addresses,
|
|
9
|
+ * so can use trivial_virt_to_user() et al.
|
27
|
10
|
*
|
28
|
|
- * In linux userspace virtual == user == phys addresses.
|
29
|
|
- * Physical addresses also being the same is wrong, but there is no general way
|
30
|
|
- * of converting userspace addresses to physical as what appears to be
|
31
|
|
- * contiguous in userspace is physically fragmented.
|
32
|
|
- * Currently only the DMA memory is special-cased, but its conversion to bus
|
33
|
|
- * addresses is done in phys_to_bus.
|
34
|
|
- * This is known to break virtio as it is passing phys addresses to the virtual
|
35
|
|
- * device.
|
|
11
|
+ * We have no concept of the underlying physical addresses, since
|
|
12
|
+ * these are not exposed to userspace. We provide a stub
|
|
13
|
+ * implementation of user_to_phys() since this is required by
|
|
14
|
+ * alloc_memblock(). We provide no implementation of phys_to_user();
|
|
15
|
+ * any code attempting to access physical addresses will therefore
|
|
16
|
+ * (correctly) fail to link.
|
36
|
17
|
*/
|
37
|
18
|
|
|
19
|
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|
20
|
+
|
38
|
21
|
#ifdef UACCESS_LINUX
|
39
|
22
|
#define UACCESS_PREFIX_linux
|
40
|
23
|
#else
|
41
|
24
|
#define UACCESS_PREFIX_linux __linux_
|
42
|
25
|
#endif
|
43
|
26
|
|
44
|
|
-static inline __always_inline userptr_t
|
45
|
|
-UACCESS_INLINE(linux, phys_to_user)(unsigned long phys_addr)
|
46
|
|
-{
|
47
|
|
- return phys_addr;
|
48
|
|
-}
|
49
|
|
-
|
|
27
|
+/**
|
|
28
|
+ * Convert user buffer to physical address
|
|
29
|
+ *
|
|
30
|
+ * @v userptr User pointer
|
|
31
|
+ * @v offset Offset from user pointer
|
|
32
|
+ * @ret phys_addr Physical address
|
|
33
|
+ */
|
50
|
34
|
static inline __always_inline unsigned long
|
51
|
|
-UACCESS_INLINE(linux, user_to_phys)(userptr_t userptr, off_t offset)
|
52
|
|
-{
|
53
|
|
- return userptr + offset;
|
|
35
|
+UACCESS_INLINE ( linux, user_to_phys ) ( userptr_t userptr, off_t offset ) {
|
|
36
|
+
|
|
37
|
+ /* We do not know the real underlying physical address. We
|
|
38
|
+ * provide this stub implementation only because it is
|
|
39
|
+ * required by alloc_memblock() (which allocates memory with
|
|
40
|
+ * specified physical address alignment). We assume that the
|
|
41
|
+ * low-order bits of virtual addresses match the low-order
|
|
42
|
+ * bits of physical addresses, and so simply returning the
|
|
43
|
+ * virtual address will suffice for the purpose of determining
|
|
44
|
+ * alignment.
|
|
45
|
+ */
|
|
46
|
+ return ( userptr + offset );
|
54
|
47
|
}
|
55
|
48
|
|
56
|
49
|
static inline __always_inline userptr_t
|
57
|
|
-UACCESS_INLINE(linux, virt_to_user)(volatile const void *addr)
|
58
|
|
-{
|
59
|
|
- return trivial_virt_to_user(addr);
|
|
50
|
+UACCESS_INLINE ( linux, virt_to_user ) ( volatile const void *addr ) {
|
|
51
|
+ return trivial_virt_to_user ( addr );
|
60
|
52
|
}
|
61
|
53
|
|
62
|
54
|
static inline __always_inline void *
|
63
|
|
-UACCESS_INLINE(linux, user_to_virt)(userptr_t userptr, off_t offset)
|
64
|
|
-{
|
65
|
|
- return trivial_user_to_virt(userptr, offset);
|
|
55
|
+UACCESS_INLINE ( linux, user_to_virt ) ( userptr_t userptr, off_t offset ) {
|
|
56
|
+ return trivial_user_to_virt ( userptr, offset );
|
66
|
57
|
}
|
67
|
58
|
|
68
|
59
|
static inline __always_inline userptr_t
|
69
|
|
-UACCESS_INLINE(linux, userptr_add)(userptr_t userptr, off_t offset)
|
70
|
|
-{
|
71
|
|
- return trivial_userptr_add(userptr, offset);
|
|
60
|
+UACCESS_INLINE ( linux, userptr_add ) ( userptr_t userptr, off_t offset ) {
|
|
61
|
+ return trivial_userptr_add ( userptr, offset );
|
72
|
62
|
}
|
73
|
63
|
|
74
|
64
|
static inline __always_inline off_t
|
75
|
|
-UACCESS_INLINE(linux, userptr_sub)(userptr_t userptr, userptr_t subtrahend)
|
76
|
|
-{
|
|
65
|
+UACCESS_INLINE ( linux, userptr_sub ) ( userptr_t userptr,
|
|
66
|
+ userptr_t subtrahend ) {
|
77
|
67
|
return trivial_userptr_sub ( userptr, subtrahend );
|
78
|
68
|
}
|
79
|
69
|
|
80
|
70
|
static inline __always_inline void
|
81
|
|
-UACCESS_INLINE(linux, memcpy_user)(userptr_t dest, off_t dest_off, userptr_t src, off_t src_off, size_t len)
|
82
|
|
-{
|
83
|
|
- trivial_memcpy_user(dest, dest_off, src, src_off, len);
|
|
71
|
+UACCESS_INLINE ( linux, memcpy_user ) ( userptr_t dest, off_t dest_off,
|
|
72
|
+ userptr_t src, off_t src_off,
|
|
73
|
+ size_t len ) {
|
|
74
|
+ trivial_memcpy_user ( dest, dest_off, src, src_off, len );
|
84
|
75
|
}
|
85
|
76
|
|
86
|
77
|
static inline __always_inline void
|
87
|
|
-UACCESS_INLINE(linux, memmove_user)(userptr_t dest, off_t dest_off, userptr_t src, off_t src_off, size_t len)
|
88
|
|
-{
|
89
|
|
- trivial_memmove_user(dest, dest_off, src, src_off, len);
|
|
78
|
+UACCESS_INLINE ( linux, memmove_user ) ( userptr_t dest, off_t dest_off,
|
|
79
|
+ userptr_t src, off_t src_off,
|
|
80
|
+ size_t len ) {
|
|
81
|
+ trivial_memmove_user ( dest, dest_off, src, src_off, len );
|
90
|
82
|
}
|
91
|
83
|
|
92
|
84
|
static inline __always_inline int
|
93
|
|
-UACCESS_INLINE(linux, memcmp_user)(userptr_t first, off_t first_off, userptr_t second, off_t second_off, size_t len)
|
94
|
|
-{
|
95
|
|
- return trivial_memcmp_user(first, first_off, second, second_off, len);
|
|
85
|
+UACCESS_INLINE ( linux, memcmp_user ) ( userptr_t first, off_t first_off,
|
|
86
|
+ userptr_t second, off_t second_off,
|
|
87
|
+ size_t len ) {
|
|
88
|
+ return trivial_memcmp_user ( first, first_off, second, second_off, len);
|
96
|
89
|
}
|
97
|
90
|
|
98
|
91
|
static inline __always_inline void
|
99
|
|
-UACCESS_INLINE(linux, memset_user)(userptr_t buffer, off_t offset, int c, size_t len)
|
100
|
|
-{
|
101
|
|
- trivial_memset_user(buffer, offset, c, len);
|
|
92
|
+UACCESS_INLINE ( linux, memset_user ) ( userptr_t buffer, off_t offset,
|
|
93
|
+ int c, size_t len ) {
|
|
94
|
+ trivial_memset_user ( buffer, offset, c, len );
|
102
|
95
|
}
|
103
|
96
|
|
104
|
97
|
static inline __always_inline size_t
|
105
|
|
-UACCESS_INLINE(linux, strlen_user)(userptr_t buffer, off_t offset)
|
106
|
|
-{
|
107
|
|
- return trivial_strlen_user(buffer, offset);
|
|
98
|
+UACCESS_INLINE ( linux, strlen_user ) ( userptr_t buffer, off_t offset ) {
|
|
99
|
+ return trivial_strlen_user ( buffer, offset );
|
108
|
100
|
}
|
109
|
101
|
|
110
|
102
|
static inline __always_inline off_t
|
111
|
|
-UACCESS_INLINE(linux, memchr_user)(userptr_t buffer, off_t offset, int c, size_t len)
|
112
|
|
-{
|
113
|
|
- return trivial_memchr_user(buffer, offset, c, len);
|
|
103
|
+UACCESS_INLINE ( linux, memchr_user ) ( userptr_t buffer, off_t offset,
|
|
104
|
+ int c, size_t len ) {
|
|
105
|
+ return trivial_memchr_user ( buffer, offset, c, len );
|
114
|
106
|
}
|
115
|
107
|
|
116
|
108
|
#endif /* _IPXE_LINUX_UACCESS_H */
|