Sfoglia il codice sorgente

[arm] Add support for 64-bit ARM (Aarch64)

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 8 anni fa
parent
commit
17c6f322ee

+ 5
- 0
src/arch/arm/core/arm_io.c Vedi File

@@ -84,5 +84,10 @@ PROVIDE_IOAPI_INLINE ( arm, writew );
84 84
 PROVIDE_IOAPI_INLINE ( arm, writel );
85 85
 PROVIDE_IOAPI_INLINE ( arm, iodelay );
86 86
 PROVIDE_IOAPI_INLINE ( arm, mb );
87
+#ifdef __aarch64__
88
+PROVIDE_IOAPI_INLINE ( arm, readq );
89
+PROVIDE_IOAPI_INLINE ( arm, writeq );
90
+#else
87 91
 PROVIDE_IOAPI ( arm, readq, arm32_readq );
88 92
 PROVIDE_IOAPI ( arm, writeq, arm32_writeq );
93
+#endif

+ 9
- 0
src/arch/arm/include/bits/xen.h Vedi File

@@ -10,12 +10,21 @@
10 10
 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11 11
 
12 12
 /* Hypercall registers */
13
+#ifdef __aarch64__
14
+#define XEN_HC "x16"
15
+#define XEN_REG1 "x0"
16
+#define XEN_REG2 "x1"
17
+#define XEN_REG3 "x2"
18
+#define XEN_REG4 "x3"
19
+#define XEN_REG5 "x4"
20
+#else
13 21
 #define XEN_HC "r12"
14 22
 #define XEN_REG1 "r0"
15 23
 #define XEN_REG2 "r1"
16 24
 #define XEN_REG3 "r2"
17 25
 #define XEN_REG4 "r3"
18 26
 #define XEN_REG5 "r4"
27
+#endif
19 28
 
20 29
 /**
21 30
  * Issue hypercall with one argument

+ 30
- 14
src/arch/arm/include/ipxe/arm_io.h Vedi File

@@ -43,34 +43,46 @@ IOAPI_INLINE ( arm, bus_to_phys ) ( unsigned long bus_addr ) {
43 43
  *
44 44
  */
45 45
 
46
-#define ARM_READX( _api_func, _type, _insn_suffix )			      \
46
+#define ARM_READX( _api_func, _type, _insn_suffix, _reg_prefix )	      \
47 47
 static inline __always_inline _type					      \
48 48
 IOAPI_INLINE ( arm, _api_func ) ( volatile _type *io_addr ) {		      \
49 49
 	_type data;							      \
50
-	__asm__ __volatile__ ( "ldr" _insn_suffix " %0, %1"		      \
50
+	__asm__ __volatile__ ( "ldr" _insn_suffix " %" _reg_prefix "0, %1"    \
51 51
 			       : "=r" ( data ) : "Qo" ( *io_addr ) );	      \
52 52
 	return data;							      \
53 53
 }
54
-ARM_READX ( readb, uint8_t, "b" );
55
-ARM_READX ( readw, uint16_t, "h" );
56
-ARM_READX ( readl, uint32_t, "" );
54
+#ifdef __aarch64__
55
+ARM_READX ( readb, uint8_t, "b", "w" );
56
+ARM_READX ( readw, uint16_t, "h", "w" );
57
+ARM_READX ( readl, uint32_t, "", "w" );
58
+ARM_READX ( readq, uint64_t, "", "" );
59
+#else
60
+ARM_READX ( readb, uint8_t, "b", "" );
61
+ARM_READX ( readw, uint16_t, "h", "" );
62
+ARM_READX ( readl, uint32_t, "", "" );
63
+#endif
57 64
 
58
-#define ARM_WRITEX( _api_func, _type, _insn_suffix )			      \
65
+#define ARM_WRITEX( _api_func, _type, _insn_suffix, _reg_prefix )			\
59 66
 static inline __always_inline void					      \
60
-IOAPI_INLINE ( arm, _api_func ) ( _type data,				      \
61
-				  volatile _type *io_addr ) {		      \
62
-	__asm__ __volatile__ ( "str" _insn_suffix " %0, %1"		      \
67
+IOAPI_INLINE ( arm, _api_func ) ( _type data, volatile _type *io_addr ) {     \
68
+	__asm__ __volatile__ ( "str" _insn_suffix " %" _reg_prefix "0, %1"    \
63 69
 			       : : "r" ( data ), "Qo" ( *io_addr ) );	      \
64 70
 }
65
-ARM_WRITEX ( writeb, uint8_t, "b" );
66
-ARM_WRITEX ( writew, uint16_t, "h" );
67
-ARM_WRITEX ( writel, uint32_t, "" );
71
+#ifdef __aarch64__
72
+ARM_WRITEX ( writeb, uint8_t, "b", "w" );
73
+ARM_WRITEX ( writew, uint16_t, "h", "w" );
74
+ARM_WRITEX ( writel, uint32_t, "", "w" );
75
+ARM_WRITEX ( writeq, uint64_t, "", "" );
76
+#else
77
+ARM_WRITEX ( writeb, uint8_t, "b", "" );
78
+ARM_WRITEX ( writew, uint16_t, "h", "" );
79
+ARM_WRITEX ( writel, uint32_t, "", "" );
80
+#endif
68 81
 
69 82
 /*
70 83
  * Slow down I/O
71 84
  *
72 85
  */
73
-
74 86
 static inline __always_inline void
75 87
 IOAPI_INLINE ( arm, iodelay ) ( void ) {
76 88
 	/* Nothing to do */
@@ -80,10 +92,14 @@ IOAPI_INLINE ( arm, iodelay ) ( void ) {
80 92
  * Memory barrier
81 93
  *
82 94
  */
83
-
84 95
 static inline __always_inline void
85 96
 IOAPI_INLINE ( arm, mb ) ( void ) {
97
+
98
+#ifdef __aarch64__
99
+	__asm__ __volatile__ ( "dmb sy" );
100
+#else
86 101
 	__asm__ __volatile__ ( "dmb" );
102
+#endif
87 103
 }
88 104
 
89 105
 #endif /* _IPXE_ARM_IO_H */

+ 22
- 0
src/arch/arm64/Makefile Vedi File

@@ -0,0 +1,22 @@
1
+# ARM64-specific directories containing source files
2
+#
3
+SRCDIRS		+= arch/arm64/core
4
+
5
+# ARM64-specific flags
6
+#
7
+CFLAGS		+= -mabi=lp64 -mlittle-endian -mcmodel=small
8
+CFLAGS		+= -fomit-frame-pointer
9
+ASFLAGS		+= -mabi=lp64 -EL
10
+
11
+# EFI requires -fshort-wchar, and nothing else currently uses wchar_t
12
+#
13
+CFLAGS		+= -fshort-wchar
14
+
15
+# Include common ARM Makefile
16
+MAKEDEPS	+= arch/arm/Makefile
17
+include arch/arm/Makefile
18
+
19
+# Include platform-specific Makefile
20
+#
21
+MAKEDEPS	+= arch/arm64/Makefile.$(PLATFORM)
22
+include arch/arm64/Makefile.$(PLATFORM)

+ 14
- 0
src/arch/arm64/Makefile.efi Vedi File

@@ -0,0 +1,14 @@
1
+# -*- makefile -*- : Force emacs to use Makefile mode
2
+
3
+# Specify EFI image builder
4
+#
5
+ELF2EFI		= $(ELF2EFI64)
6
+
7
+# Specify EFI boot file
8
+#
9
+EFI_BOOT_FILE	= bootaa64.efi
10
+
11
+# Include generic EFI Makefile
12
+#
13
+MAKEDEPS	+= arch/arm/Makefile.efi
14
+include arch/arm/Makefile.efi

+ 103
- 0
src/arch/arm64/core/arm64_bigint.c Vedi File

@@ -0,0 +1,103 @@
1
+/*
2
+ * Copyright (C) 2016 Michael Brown <mbrown@fensystems.co.uk>.
3
+ *
4
+ * This program is free software; you can redistribute it and/or
5
+ * modify it under the terms of the GNU General Public License as
6
+ * published by the Free Software Foundation; either version 2 of the
7
+ * License, or any later version.
8
+ *
9
+ * This program is distributed in the hope that it will be useful, but
10
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
+ * General Public License for more details.
13
+ *
14
+ * You should have received a copy of the GNU General Public License
15
+ * along with this program; if not, write to the Free Software
16
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
+ * 02110-1301, USA.
18
+ *
19
+ * You can also choose to distribute this program under the terms of
20
+ * the Unmodified Binary Distribution Licence (as given in the file
21
+ * COPYING.UBDL), provided that you have satisfied its requirements.
22
+ */
23
+
24
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
+
26
+#include <stdint.h>
27
+#include <string.h>
28
+#include <ipxe/bigint.h>
29
+
30
+/** @file
31
+ *
32
+ * Big integer support
33
+ */
34
+
35
+/**
36
+ * Multiply big integers
37
+ *
38
+ * @v multiplicand0	Element 0 of big integer to be multiplied
39
+ * @v multiplier0	Element 0 of big integer to be multiplied
40
+ * @v result0		Element 0 of big integer to hold result
41
+ * @v size		Number of elements
42
+ */
43
+void bigint_multiply_raw ( const uint64_t *multiplicand0,
44
+			   const uint64_t *multiplier0,
45
+			   uint64_t *result0, unsigned int size ) {
46
+	const bigint_t ( size ) __attribute__ (( may_alias )) *multiplicand =
47
+		( ( const void * ) multiplicand0 );
48
+	const bigint_t ( size ) __attribute__ (( may_alias )) *multiplier =
49
+		( ( const void * ) multiplier0 );
50
+	bigint_t ( size * 2 ) __attribute__ (( may_alias )) *result =
51
+		( ( void * ) result0 );
52
+	unsigned int i;
53
+	unsigned int j;
54
+	uint64_t multiplicand_element;
55
+	uint64_t multiplier_element;
56
+	uint64_t *result_elements;
57
+	uint64_t discard_low;
58
+	uint64_t discard_high;
59
+	uint64_t discard_temp_low;
60
+	uint64_t discard_temp_high;
61
+
62
+	/* Zero result */
63
+	memset ( result, 0, sizeof ( *result ) );
64
+
65
+	/* Multiply integers one element at a time */
66
+	for ( i = 0 ; i < size ; i++ ) {
67
+		multiplicand_element = multiplicand->element[i];
68
+		for ( j = 0 ; j < size ; j++ ) {
69
+			multiplier_element = multiplier->element[j];
70
+			result_elements = &result->element[ i + j ];
71
+			/* Perform a single multiply, and add the
72
+			 * resulting double-element into the result,
73
+			 * carrying as necessary.  The carry can
74
+			 * never overflow beyond the end of the
75
+			 * result, since:
76
+			 *
77
+			 *     a < 2^{n}, b < 2^{n} => ab < 2^{2n}
78
+			 */
79
+			__asm__ __volatile__ ( "mul %1, %6, %7\n\t"
80
+					       "umulh %2, %6, %7\n\t"
81
+					       "ldp %3, %4, [%0]\n\t"
82
+					       "adds %3, %3, %1\n\t"
83
+					       "adcs %4, %4, %2\n\t"
84
+					       "stp %3, %4, [%0], #16\n\t"
85
+					       "bcc 2f\n\t"
86
+					       "\n1:\n\t"
87
+					       "ldr %3, [%0]\n\t"
88
+					       "adcs %3, %3, xzr\n\t"
89
+					       "str %3, [%0], #8\n\t"
90
+					       "bcs 1b\n\t"
91
+					       "\n2:\n\t"
92
+					       : "+r" ( result_elements ),
93
+						 "=&r" ( discard_low ),
94
+						 "=&r" ( discard_high ),
95
+						 "=r" ( discard_temp_low ),
96
+						 "=r" ( discard_temp_high ),
97
+						 "+m" ( *result )
98
+					       : "r" ( multiplicand_element ),
99
+						 "r" ( multiplier_element )
100
+					       : "cc" );
101
+		}
102
+	}
103
+}

+ 56
- 0
src/arch/arm64/core/setjmp.S Vedi File

@@ -0,0 +1,56 @@
1
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
2
+
3
+	.text
4
+
5
+	/* Must match jmp_buf structure layout */
6
+	.struct	0
7
+env_x19_x20:	.quad	0, 0
8
+env_x21_x22:	.quad	0, 0
9
+env_x23_x24:	.quad	0, 0
10
+env_x25_x26:	.quad	0, 0
11
+env_x27_x28:	.quad	0, 0
12
+env_x29_x30:	.quad	0, 0
13
+env_sp:		.quad	0
14
+	.previous
15
+
16
+/*
17
+ * Save stack context for non-local goto
18
+ */
19
+	.globl	setjmp
20
+	.type	setjmp, %function
21
+setjmp:
22
+	/* Store registers */
23
+	stp	x19, x20, [x0, #env_x19_x20]
24
+	stp	x21, x22, [x0, #env_x21_x22]
25
+	stp	x23, x24, [x0, #env_x23_x24]
26
+	stp	x25, x26, [x0, #env_x25_x26]
27
+	stp	x27, x28, [x0, #env_x27_x28]
28
+	stp	x29, x30, [x0, #env_x29_x30]
29
+	mov	x16, sp
30
+	str	x16, [x0, #env_sp]
31
+	/* Return 0 when returning as setjmp() */
32
+	mov	x0, #0
33
+	ret
34
+	.size	setjmp, . - setjmp
35
+
36
+/*
37
+ * Non-local jump to a saved stack context
38
+ */
39
+	.globl	longjmp
40
+	.type	longjmp, %function
41
+longjmp:
42
+	/* Restore registers */
43
+	ldp	x19, x20, [x0, #env_x19_x20]
44
+	ldp	x21, x22, [x0, #env_x21_x22]
45
+	ldp	x23, x24, [x0, #env_x23_x24]
46
+	ldp	x25, x26, [x0, #env_x25_x26]
47
+	ldp	x27, x28, [x0, #env_x27_x28]
48
+	ldp	x29, x30, [x0, #env_x29_x30]
49
+	ldr	x16, [x0, #env_sp]
50
+	mov	sp, x16
51
+	/* Force result to non-zero */
52
+	cmp	w1, #0
53
+	csinc	w0, w1, w1, ne
54
+	/* Return to setjmp() caller */
55
+	br	x30
56
+	.size	longjmp, . - longjmp

+ 317
- 0
src/arch/arm64/include/bits/bigint.h Vedi File

@@ -0,0 +1,317 @@
1
+#ifndef _BITS_BIGINT_H
2
+#define _BITS_BIGINT_H
3
+
4
+/** @file
5
+ *
6
+ * Big integer support
7
+ */
8
+
9
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
10
+
11
+#include <stdint.h>
12
+#include <string.h>
13
+#include <strings.h>
14
+
15
+/** Element of a big integer */
16
+typedef uint64_t bigint_element_t;
17
+
18
+/**
19
+ * Initialise big integer
20
+ *
21
+ * @v value0		Element 0 of big integer to initialise
22
+ * @v size		Number of elements
23
+ * @v data		Raw data
24
+ * @v len		Length of raw data
25
+ */
26
+static inline __attribute__ (( always_inline )) void
27
+bigint_init_raw ( uint64_t *value0, unsigned int size,
28
+		  const void *data, size_t len ) {
29
+	size_t pad_len = ( sizeof ( bigint_t ( size ) ) - len );
30
+	uint8_t *value_byte = ( ( void * ) value0 );
31
+	const uint8_t *data_byte = ( data + len );
32
+
33
+	/* Copy raw data in reverse order, padding with zeros */
34
+	while ( len-- )
35
+		*(value_byte++) = *(--data_byte);
36
+	while ( pad_len-- )
37
+		*(value_byte++) = 0;
38
+}
39
+
40
+/**
41
+ * Add big integers
42
+ *
43
+ * @v addend0		Element 0 of big integer to add
44
+ * @v value0		Element 0 of big integer to be added to
45
+ * @v size		Number of elements
46
+ */
47
+static inline __attribute__ (( always_inline )) void
48
+bigint_add_raw ( const uint64_t *addend0, uint64_t *value0,
49
+		 unsigned int size ) {
50
+	bigint_t ( size ) __attribute__ (( may_alias )) *value =
51
+		( ( void * ) value0 );
52
+	uint64_t *discard_addend;
53
+	uint64_t *discard_value;
54
+	uint64_t discard_addend_i;
55
+	uint64_t discard_value_i;
56
+	unsigned int discard_size;
57
+
58
+	__asm__ __volatile__ ( "cmn xzr, xzr\n\t" /* clear CF */
59
+			       "\n1:\n\t"
60
+			       "ldr %3, [%0], #8\n\t"
61
+			       "ldr %4, [%1]\n\t"
62
+			       "adcs %4, %4, %3\n\t"
63
+			       "str %4, [%1], #8\n\t"
64
+			       "sub %w2, %w2, #1\n\t"
65
+			       "cbnz %w2, 1b\n\t"
66
+			       : "=r" ( discard_addend ),
67
+				 "=r" ( discard_value ),
68
+				 "=r" ( discard_size ),
69
+				 "=r" ( discard_addend_i ),
70
+				 "=r" ( discard_value_i ),
71
+				 "+m" ( *value )
72
+			       : "0" ( addend0 ), "1" ( value0 ), "2" ( size )
73
+			       : "cc" );
74
+}
75
+
76
+/**
77
+ * Subtract big integers
78
+ *
79
+ * @v subtrahend0	Element 0 of big integer to subtract
80
+ * @v value0		Element 0 of big integer to be subtracted from
81
+ * @v size		Number of elements
82
+ */
83
+static inline __attribute__ (( always_inline )) void
84
+bigint_subtract_raw ( const uint64_t *subtrahend0, uint64_t *value0,
85
+		      unsigned int size ) {
86
+	bigint_t ( size ) __attribute__ (( may_alias )) *value =
87
+		( ( void * ) value0 );
88
+	uint64_t *discard_subtrahend;
89
+	uint64_t *discard_value;
90
+	uint64_t discard_subtrahend_i;
91
+	uint64_t discard_value_i;
92
+	unsigned int discard_size;
93
+
94
+	__asm__ __volatile__ ( "cmp xzr, xzr\n\t" /* set CF */
95
+			       "\n1:\n\t"
96
+			       "ldr %3, [%0], #8\n\t"
97
+			       "ldr %4, [%1]\n\t"
98
+			       "sbcs %4, %4, %3\n\t"
99
+			       "str %4, [%1], #8\n\t"
100
+			       "sub %w2, %w2, #1\n\t"
101
+			       "cbnz %w2, 1b\n\t"
102
+			       : "=r" ( discard_subtrahend ),
103
+				 "=r" ( discard_value ),
104
+				 "=r" ( discard_size ),
105
+				 "=r" ( discard_subtrahend_i ),
106
+				 "=r" ( discard_value_i ),
107
+				 "+m" ( *value )
108
+			       : "0" ( subtrahend0 ), "1" ( value0 ),
109
+				 "2" ( size )
110
+			       : "cc" );
111
+}
112
+
113
+/**
114
+ * Rotate big integer left
115
+ *
116
+ * @v value0		Element 0 of big integer
117
+ * @v size		Number of elements
118
+ */
119
+static inline __attribute__ (( always_inline )) void
120
+bigint_rol_raw ( uint64_t *value0, unsigned int size ) {
121
+	bigint_t ( size ) __attribute__ (( may_alias )) *value =
122
+		( ( void * ) value0 );
123
+	uint64_t *discard_value;
124
+	uint64_t discard_value_i;
125
+	unsigned int discard_size;
126
+
127
+	__asm__ __volatile__ ( "cmn xzr, xzr\n\t" /* clear CF */
128
+			       "\n1:\n\t"
129
+			       "ldr %2, [%0]\n\t"
130
+			       "adcs %2, %2, %2\n\t"
131
+			       "str %2, [%0], #8\n\t"
132
+			       "sub %w1, %w1, #1\n\t"
133
+			       "cbnz %w1, 1b\n\t"
134
+			       : "=r" ( discard_value ),
135
+				 "=r" ( discard_size ),
136
+				 "=r" ( discard_value_i ),
137
+				 "+m" ( *value )
138
+			       : "0" ( value0 ), "1" ( size )
139
+			       : "cc" );
140
+}
141
+
142
+/**
143
+ * Rotate big integer right
144
+ *
145
+ * @v value0		Element 0 of big integer
146
+ * @v size		Number of elements
147
+ */
148
+static inline __attribute__ (( always_inline )) void
149
+bigint_ror_raw ( uint64_t *value0, unsigned int size ) {
150
+	bigint_t ( size ) __attribute__ (( may_alias )) *value =
151
+		( ( void * ) value0 );
152
+	uint64_t *discard_value;
153
+	uint64_t discard_value_i;
154
+	uint64_t discard_value_j;
155
+	unsigned int discard_size;
156
+
157
+	__asm__ __volatile__ ( "mov %3, #0\n\t"
158
+			       "\n1:\n\t"
159
+			       "sub %w1, %w1, #1\n\t"
160
+			       "ldr %2, [%0, %1, lsl #3]\n\t"
161
+			       "extr %3, %3, %2, #1\n\t"
162
+			       "str %3, [%0, %1, lsl #3]\n\t"
163
+			       "mov %3, %2\n\t"
164
+			       "cbnz %w1, 1b\n\t"
165
+			       : "=r" ( discard_value ),
166
+				 "=r" ( discard_size ),
167
+				 "=r" ( discard_value_i ),
168
+				 "=r" ( discard_value_j ),
169
+				 "+m" ( *value )
170
+			       : "0" ( value0 ), "1" ( size ) );
171
+}
172
+
173
+/**
174
+ * Test if big integer is equal to zero
175
+ *
176
+ * @v value0		Element 0 of big integer
177
+ * @v size		Number of elements
178
+ * @ret is_zero		Big integer is equal to zero
179
+ */
180
+static inline __attribute__ (( always_inline, pure )) int
181
+bigint_is_zero_raw ( const uint64_t *value0, unsigned int size ) {
182
+	const uint64_t *value = value0;
183
+	uint64_t value_i;
184
+
185
+	do {
186
+		value_i = *(value++);
187
+		if ( value_i )
188
+			break;
189
+	} while ( --size );
190
+
191
+	return ( value_i == 0 );
192
+}
193
+
194
+/**
195
+ * Compare big integers
196
+ *
197
+ * @v value0		Element 0 of big integer
198
+ * @v reference0	Element 0 of reference big integer
199
+ * @v size		Number of elements
200
+ * @ret geq		Big integer is greater than or equal to the reference
201
+ */
202
+static inline __attribute__ (( always_inline, pure )) int
203
+bigint_is_geq_raw ( const uint64_t *value0, const uint64_t *reference0,
204
+		    unsigned int size ) {
205
+	const uint64_t *value = ( value0 + size );
206
+	const uint64_t *reference = ( reference0 + size );
207
+	uint64_t value_i;
208
+	uint64_t reference_i;
209
+
210
+	do {
211
+		value_i = *(--value);
212
+		reference_i = *(--reference);
213
+		if ( value_i != reference_i )
214
+			break;
215
+	} while ( --size );
216
+
217
+	return ( value_i >= reference_i );
218
+}
219
+
220
+/**
221
+ * Test if bit is set in big integer
222
+ *
223
+ * @v value0		Element 0 of big integer
224
+ * @v size		Number of elements
225
+ * @v bit		Bit to test
226
+ * @ret is_set		Bit is set
227
+ */
228
+static inline __attribute__ (( always_inline )) int
229
+bigint_bit_is_set_raw ( const uint64_t *value0, unsigned int size,
230
+			unsigned int bit ) {
231
+	const bigint_t ( size ) __attribute__ (( may_alias )) *value =
232
+		( ( const void * ) value0 );
233
+	unsigned int index = ( bit / ( 8 * sizeof ( value->element[0] ) ) );
234
+	unsigned int subindex = ( bit % ( 8 * sizeof ( value->element[0] ) ) );
235
+
236
+	return ( !! ( value->element[index] & ( 1UL << subindex ) ) );
237
+}
238
+
239
+/**
240
+ * Find highest bit set in big integer
241
+ *
242
+ * @v value0		Element 0 of big integer
243
+ * @v size		Number of elements
244
+ * @ret max_bit		Highest bit set + 1 (or 0 if no bits set)
245
+ */
246
+static inline __attribute__ (( always_inline )) int
247
+bigint_max_set_bit_raw ( const uint64_t *value0, unsigned int size ) {
248
+	const uint64_t *value = ( value0 + size );
249
+	int max_bit = ( 8 * sizeof ( bigint_t ( size ) ) );
250
+	uint64_t value_i;
251
+
252
+	do {
253
+		value_i = *(--value);
254
+		max_bit -= ( 64 - fls ( value_i ) );
255
+		if ( value_i )
256
+			break;
257
+	} while ( --size );
258
+
259
+	return max_bit;
260
+}
261
+
262
+/**
263
+ * Grow big integer
264
+ *
265
+ * @v source0		Element 0 of source big integer
266
+ * @v source_size	Number of elements in source big integer
267
+ * @v dest0		Element 0 of destination big integer
268
+ * @v dest_size		Number of elements in destination big integer
269
+ */
270
+static inline __attribute__ (( always_inline )) void
271
+bigint_grow_raw ( const uint64_t *source0, unsigned int source_size,
272
+		  uint64_t *dest0, unsigned int dest_size ) {
273
+	unsigned int pad_size = ( dest_size - source_size );
274
+
275
+	memcpy ( dest0, source0, sizeof ( bigint_t ( source_size ) ) );
276
+	memset ( ( dest0 + source_size ), 0, sizeof ( bigint_t ( pad_size ) ) );
277
+}
278
+
279
+/**
280
+ * Shrink big integer
281
+ *
282
+ * @v source0		Element 0 of source big integer
283
+ * @v source_size	Number of elements in source big integer
284
+ * @v dest0		Element 0 of destination big integer
285
+ * @v dest_size		Number of elements in destination big integer
286
+ */
287
+static inline __attribute__ (( always_inline )) void
288
+bigint_shrink_raw ( const uint64_t *source0, unsigned int source_size __unused,
289
+		    uint64_t *dest0, unsigned int dest_size ) {
290
+
291
+	memcpy ( dest0, source0, sizeof ( bigint_t ( dest_size ) ) );
292
+}
293
+
294
+/**
295
+ * Finalise big integer
296
+ *
297
+ * @v value0		Element 0 of big integer to finalise
298
+ * @v size		Number of elements
299
+ * @v out		Output buffer
300
+ * @v len		Length of output buffer
301
+ */
302
+static inline __attribute__ (( always_inline )) void
303
+bigint_done_raw ( const uint64_t *value0, unsigned int size __unused,
304
+		  void *out, size_t len ) {
305
+	const uint8_t *value_byte = ( ( const void * ) value0 );
306
+	uint8_t *out_byte = ( out + len );
307
+
308
+	/* Copy raw data in reverse order */
309
+	while ( len-- )
310
+		*(--out_byte) = *(value_byte++);
311
+}
312
+
313
+extern void bigint_multiply_raw ( const uint64_t *multiplicand0,
314
+				  const uint64_t *multiplier0,
315
+				  uint64_t *value0, unsigned int size );
316
+
317
+#endif /* _BITS_BIGINT_H */

+ 100
- 0
src/arch/arm64/include/bits/bitops.h Vedi File

@@ -0,0 +1,100 @@
1
+#ifndef _BITS_BITOPS_H
2
+#define _BITS_BITOPS_H
3
+
4
+/** @file
5
+ *
6
+ * ARM bit operations
7
+ *
8
+ */
9
+
10
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11
+
12
+#include <stdint.h>
13
+
14
+/**
15
+ * Test and set bit atomically
16
+ *
17
+ * @v bit		Bit to set
18
+ * @v bits		Bit field
19
+ * @ret old		Old value of bit (zero or non-zero)
20
+ */
21
+static inline __attribute__ (( always_inline )) int
22
+test_and_set_bit ( unsigned int bit, volatile void *bits ) {
23
+	unsigned int index = ( bit / 64 );
24
+	unsigned int offset = ( bit % 64 );
25
+	volatile uint64_t *qword = ( ( ( volatile uint64_t * ) bits ) + index );
26
+	uint64_t mask = ( 1UL << offset );
27
+	uint64_t old;
28
+	uint64_t new;
29
+	uint32_t flag;
30
+
31
+	__asm__ __volatile__ ( "\n1:\n\t"
32
+			       "ldxr %0, %3\n\t"
33
+			       "orr %1, %0, %4\n\t"
34
+			       "stxr %w2, %1, %3\n\t"
35
+			       "tst %w2, %w2\n\t"
36
+			       "bne 1b\n\t"
37
+			       : "=&r" ( old ), "=&r" ( new ), "=&r" ( flag ),
38
+				 "+Q" ( *qword )
39
+			       : "r" ( mask )
40
+			       : "cc" );
41
+
42
+	return ( !! ( old & mask ) );
43
+}
44
+
45
+/**
46
+ * Test and clear bit atomically
47
+ *
48
+ * @v bit		Bit to set
49
+ * @v bits		Bit field
50
+ * @ret old		Old value of bit (zero or non-zero)
51
+ */
52
+static inline __attribute__ (( always_inline )) int
53
+test_and_clear_bit ( unsigned int bit, volatile void *bits ) {
54
+	unsigned int index = ( bit / 64 );
55
+	unsigned int offset = ( bit % 64 );
56
+	volatile uint64_t *qword = ( ( ( volatile uint64_t * ) bits ) + index );
57
+	uint64_t mask = ( 1UL << offset );
58
+	uint64_t old;
59
+	uint64_t new;
60
+	uint32_t flag;
61
+
62
+	__asm__ __volatile__ ( "\n1:\n\t"
63
+			       "ldxr %0, %3\n\t"
64
+			       "bic %1, %0, %4\n\t"
65
+			       "stxr %w2, %1, %3\n\t"
66
+			       "tst %w2, %w2\n\t"
67
+			       "bne 1b\n\t"
68
+			       : "=&r" ( old ), "=&r" ( new ), "=&r" ( flag ),
69
+				 "+Q" ( *qword )
70
+			       : "r" ( mask )
71
+			       : "cc" );
72
+
73
+	return ( !! ( old & mask ) );
74
+}
75
+
76
+/**
77
+ * Set bit atomically
78
+ *
79
+ * @v bit		Bit to set
80
+ * @v bits		Bit field
81
+ */
82
+static inline __attribute__ (( always_inline )) void
83
+set_bit ( unsigned int bit, volatile void *bits ) {
84
+
85
+	test_and_set_bit ( bit, bits );
86
+}
87
+
88
+/**
89
+ * Clear bit atomically
90
+ *
91
+ * @v bit		Bit to set
92
+ * @v bits		Bit field
93
+ */
94
+static inline __attribute__ (( always_inline )) void
95
+clear_bit ( unsigned int bit, volatile void *bits ) {
96
+
97
+	test_and_clear_bit ( bit, bits );
98
+}
99
+
100
+#endif /* _BITS_BITOPS_H */

+ 47
- 0
src/arch/arm64/include/bits/byteswap.h Vedi File

@@ -0,0 +1,47 @@
1
+#ifndef _BITS_BYTESWAP_H
2
+#define _BITS_BYTESWAP_H
3
+
4
+/** @file
5
+ *
6
+ * Byte-order swapping functions
7
+ *
8
+ */
9
+
10
+#include <stdint.h>
11
+
12
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
13
+
14
+static inline __attribute__ (( always_inline, const )) uint16_t
15
+__bswap_variable_16 ( uint16_t x ) {
16
+	__asm__ ( "rev16 %0, %1" : "=r" ( x ) : "r" ( x ) );
17
+	return x;
18
+}
19
+
20
+static inline __attribute__ (( always_inline )) void
21
+__bswap_16s ( uint16_t *x ) {
22
+	*x = __bswap_variable_16 ( *x );
23
+}
24
+
25
+static inline __attribute__ (( always_inline, const )) uint32_t
26
+__bswap_variable_32 ( uint32_t x ) {
27
+	__asm__ ( "rev32 %0, %1" : "=r" ( x ) : "r" ( x ) );
28
+	return x;
29
+}
30
+
31
+static inline __attribute__ (( always_inline )) void
32
+__bswap_32s ( uint32_t *x ) {
33
+	*x = __bswap_variable_32 ( *x );
34
+}
35
+
36
+static inline __attribute__ (( always_inline, const )) uint64_t
37
+__bswap_variable_64 ( uint64_t x ) {
38
+	__asm__ ( "rev %0, %1" : "=r" ( x ) : "r" ( x ) );
39
+	return x;
40
+}
41
+
42
+static inline __attribute__ (( always_inline )) void
43
+__bswap_64s ( uint64_t *x ) {
44
+	*x = __bswap_variable_64 ( *x );
45
+}
46
+
47
+#endif

+ 16
- 0
src/arch/arm64/include/bits/compiler.h Vedi File

@@ -0,0 +1,16 @@
1
+#ifndef _BITS_COMPILER_H
2
+#define _BITS_COMPILER_H
3
+
4
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
5
+
6
+/** Dummy relocation type */
7
+#define RELOC_TYPE_NONE R_AARCH64_NULL
8
+
9
+#ifndef ASSEMBLY
10
+
11
+#define __asmcall
12
+#define __libgcc
13
+
14
+#endif /* ASSEMBLY */
15
+
16
+#endif /*_BITS_COMPILER_H */

+ 30
- 0
src/arch/arm64/include/bits/profile.h Vedi File

@@ -0,0 +1,30 @@
1
+#ifndef _BITS_PROFILE_H
2
+#define _BITS_PROFILE_H
3
+
4
+/** @file
5
+ *
6
+ * Profiling
7
+ *
8
+ */
9
+
10
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11
+
12
+#include <stdint.h>
13
+
14
+/**
15
+ * Get profiling timestamp
16
+ *
17
+ * @ret timestamp	Timestamp
18
+ */
19
+static inline __attribute__ (( always_inline )) uint64_t
20
+profile_timestamp ( void ) {
21
+	uint64_t cycles;
22
+
23
+	/* Read cycle counter */
24
+	__asm__ __volatile__ ( "msr PMCR_EL0, %1\n\t"
25
+			       "mrs %0, PMCCNTR_EL0\n\t"
26
+			       : "=r" ( cycles ) : "r" ( 1 ) );
27
+	return cycles;
28
+}
29
+
30
+#endif /* _BITS_PROFILE_H */

+ 21
- 0
src/arch/arm64/include/bits/stdint.h Vedi File

@@ -0,0 +1,21 @@
1
+#ifndef _BITS_STDINT_H
2
+#define _BITS_STDINT_H
3
+
4
+typedef __SIZE_TYPE__		size_t;
5
+typedef signed long		ssize_t;
6
+typedef signed long		off_t;
7
+
8
+typedef unsigned char		uint8_t;
9
+typedef unsigned short		uint16_t;
10
+typedef unsigned int		uint32_t;
11
+typedef unsigned long long	uint64_t;
12
+
13
+typedef signed char		int8_t;
14
+typedef signed short		int16_t;
15
+typedef signed int		int32_t;
16
+typedef signed long long	int64_t;
17
+
18
+typedef unsigned long		physaddr_t;
19
+typedef unsigned long		intptr_t;
20
+
21
+#endif /* _BITS_STDINT_H */

+ 69
- 0
src/arch/arm64/include/bits/strings.h Vedi File

@@ -0,0 +1,69 @@
1
+#ifndef _BITS_STRINGS_H
2
+#define _BITS_STRINGS_H
3
+
4
+/** @file
5
+ *
6
+ * String functions
7
+ *
8
+ */
9
+
10
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11
+
12
+/**
13
+ * Find first (i.e. least significant) set bit
14
+ *
15
+ * @v value		Value
16
+ * @ret lsb		Least significant bit set in value (LSB=1), or zero
17
+ */
18
+static inline __attribute__ (( always_inline )) int __ffsll ( long long value ){
19
+	unsigned long long bits = value;
20
+	unsigned long long lsb;
21
+	unsigned int lz;
22
+
23
+	/* Extract least significant set bit */
24
+	lsb = ( bits & -bits );
25
+
26
+	/* Count number of leading zeroes before LSB */
27
+	__asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( lsb ) );
28
+
29
+	return ( 64 - lz );
30
+}
31
+
32
+/**
33
+ * Find first (i.e. least significant) set bit
34
+ *
35
+ * @v value		Value
36
+ * @ret lsb		Least significant bit set in value (LSB=1), or zero
37
+ */
38
+static inline __attribute__ (( always_inline )) int __ffsl ( long value ) {
39
+
40
+	return __ffsll ( value );
41
+}
42
+
43
+/**
44
+ * Find last (i.e. most significant) set bit
45
+ *
46
+ * @v value		Value
47
+ * @ret msb		Most significant bit set in value (LSB=1), or zero
48
+ */
49
+static inline __attribute__ (( always_inline )) int __flsll ( long long value ){
50
+	unsigned int lz;
51
+
52
+	/* Count number of leading zeroes */
53
+	__asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( value ) );
54
+
55
+	return ( 64 - lz );
56
+}
57
+
58
+/**
59
+ * Find last (i.e. most significant) set bit
60
+ *
61
+ * @v value		Value
62
+ * @ret msb		Most significant bit set in value (LSB=1), or zero
63
+ */
64
+static inline __attribute__ (( always_inline )) int __flsl ( long value ) {
65
+
66
+	return __flsll ( value );
67
+}
68
+
69
+#endif /* _BITS_STRINGS_H */

+ 46
- 0
src/arch/arm64/include/efi/ipxe/dhcp_arch.h Vedi File

@@ -0,0 +1,46 @@
1
+/*
2
+ * Copyright (C) 2015 Michael Brown <mbrown@fensystems.co.uk>.
3
+ *
4
+ * This program is free software; you can redistribute it and/or
5
+ * modify it under the terms of the GNU General Public License as
6
+ * published by the Free Software Foundation; either version 2 of the
7
+ * License, or (at your option) any later version.
8
+ *
9
+ * This program is distributed in the hope that it will be useful, but
10
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
+ * General Public License for more details.
13
+ *
14
+ * You should have received a copy of the GNU General Public License
15
+ * along with this program; if not, write to the Free Software
16
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
+ * 02110-1301, USA.
18
+ *
19
+ * You can also choose to distribute this program under the terms of
20
+ * the Unmodified Binary Distribution Licence (as given in the file
21
+ * COPYING.UBDL), provided that you have satisfied its requirements.
22
+ */
23
+
24
+#ifndef _DHCP_ARCH_H
25
+#define _DHCP_ARCH_H
26
+
27
+/** @file
28
+ *
29
+ * Architecture-specific DHCP options
30
+ */
31
+
32
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
33
+
34
+#include <ipxe/dhcp.h>
35
+
36
+#define DHCP_ARCH_VENDOR_CLASS_ID \
37
+	DHCP_STRING ( 'P', 'X', 'E', 'C', 'l', 'i', 'e', 'n', 't', ':',      \
38
+		      'A', 'r', 'c', 'h', ':', '0', '0', '0', '0', '7', ':', \
39
+		      'U', 'N', 'D', 'I', ':', '0', '0', '3', '0', '1', '0' )
40
+
41
+#define DHCP_ARCH_CLIENT_ARCHITECTURE \
42
+	DHCP_WORD ( DHCP_CLIENT_ARCHITECTURE_EFI )
43
+
44
+#define DHCP_ARCH_CLIENT_NDI DHCP_OPTION ( 1 /* UNDI */ , 3, 10 /* v3.10 */ )
45
+
46
+#endif

+ 45
- 0
src/arch/arm64/include/gdbmach.h Vedi File

@@ -0,0 +1,45 @@
1
+#ifndef GDBMACH_H
2
+#define GDBMACH_H
3
+
4
+/** @file
5
+ *
6
+ * GDB architecture specifics
7
+ *
8
+ * This file declares functions for manipulating the machine state and
9
+ * debugging context.
10
+ *
11
+ */
12
+
13
+#include <stdint.h>
14
+
15
+typedef unsigned long gdbreg_t;
16
+
17
+/* Register snapshot */
18
+enum {
19
+	/* Not yet implemented */
20
+	GDBMACH_NREGS,
21
+};
22
+
23
+#define GDBMACH_SIZEOF_REGS ( GDBMACH_NREGS * sizeof ( gdbreg_t ) )
24
+
25
+static inline void gdbmach_set_pc ( gdbreg_t *regs, gdbreg_t pc ) {
26
+	/* Not yet implemented */
27
+	( void ) regs;
28
+	( void ) pc;
29
+}
30
+
31
+static inline void gdbmach_set_single_step ( gdbreg_t *regs, int step ) {
32
+	/* Not yet implemented */
33
+	( void ) regs;
34
+	( void ) step;
35
+}
36
+
37
+static inline void gdbmach_breakpoint ( void ) {
38
+	/* Not yet implemented */
39
+}
40
+
41
+extern int gdbmach_set_breakpoint ( int type, unsigned long addr, size_t len,
42
+				    int enable );
43
+extern void gdbmach_init ( void );
44
+
45
+#endif /* GDBMACH_H */

+ 59
- 0
src/arch/arm64/include/limits.h Vedi File

@@ -0,0 +1,59 @@
1
+#ifndef LIMITS_H
2
+#define LIMITS_H	1
3
+
4
+/* Number of bits in a `char' */
5
+#define CHAR_BIT	8
6
+
7
+/* Minimum and maximum values a `signed char' can hold */
8
+#define SCHAR_MIN	(-128)
9
+#define SCHAR_MAX	127
10
+
11
+/* Maximum value an `unsigned char' can hold. (Minimum is 0.) */
12
+#define UCHAR_MAX	255
13
+
14
+/* Minimum and maximum values a `char' can hold */
15
+#define CHAR_MIN	SCHAR_MIN
16
+#define CHAR_MAX	SCHAR_MAX
17
+
18
+/* Minimum and maximum values a `signed short int' can hold */
19
+#define SHRT_MIN	(-32768)
20
+#define SHRT_MAX	32767
21
+
22
+/* Maximum value an `unsigned short' can hold. (Minimum is 0.) */
23
+#define USHRT_MAX	65535
24
+
25
+
26
+/* Minimum and maximum values a `signed int' can hold */
27
+#define INT_MIN		(-INT_MAX - 1)
28
+#define INT_MAX		2147483647
29
+
30
+/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */
31
+#define UINT_MAX	4294967295U
32
+
33
+
34
+/* Minimum and maximum values a `signed int' can hold */
35
+#define INT_MAX		2147483647
36
+#define INT_MIN		(-INT_MAX - 1)
37
+
38
+
39
+/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */
40
+#define UINT_MAX	4294967295U
41
+
42
+
43
+/* Minimum and maximum values a `signed long' can hold */
44
+#define LONG_MAX	9223372036854775807L
45
+#define LONG_MIN	(-LONG_MAX - 1L)
46
+
47
+/* Maximum value an `unsigned long' can hold. (Minimum is 0.) */
48
+#define ULONG_MAX	18446744073709551615UL
49
+
50
+/* Minimum and maximum values a `signed long long' can hold */
51
+#define LLONG_MAX	9223372036854775807LL
52
+#define LLONG_MIN	(-LONG_MAX - 1LL)
53
+
54
+
55
+/* Maximum value an `unsigned long long' can hold. (Minimum is 0.) */
56
+#define ULLONG_MAX	18446744073709551615ULL
57
+
58
+
59
+#endif /* LIMITS_H */

+ 44
- 0
src/arch/arm64/include/setjmp.h Vedi File

@@ -0,0 +1,44 @@
1
+#ifndef _SETJMP_H
2
+#define _SETJMP_H
3
+
4
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
5
+
6
+#include <stdint.h>
7
+
8
+/** A jump buffer */
9
+typedef struct {
10
+	/** Saved x19 */
11
+	uint64_t x19;
12
+	/** Saved x20 */
13
+	uint64_t x20;
14
+	/** Saved x21 */
15
+	uint64_t x21;
16
+	/** Saved x22 */
17
+	uint64_t x22;
18
+	/** Saved x23 */
19
+	uint64_t x23;
20
+	/** Saved x24 */
21
+	uint64_t x24;
22
+	/** Saved x25 */
23
+	uint64_t x25;
24
+	/** Saved x26 */
25
+	uint64_t x26;
26
+	/** Saved x27 */
27
+	uint64_t x27;
28
+	/** Saved x28 */
29
+	uint64_t x28;
30
+	/** Saved frame pointer (x29) */
31
+	uint64_t x29;
32
+	/** Saved link register (x30) */
33
+	uint64_t x30;
34
+	/** Saved stack pointer (x31) */
35
+	uint64_t sp;
36
+} jmp_buf[1];
37
+
38
+extern int __asmcall __attribute__ (( returns_twice ))
39
+setjmp ( jmp_buf env );
40
+
41
+extern void __asmcall __attribute__ (( noreturn ))
42
+longjmp ( jmp_buf env, int val );
43
+
44
+#endif /* _SETJMP_H */

+ 1
- 1
src/config/defaults/efi.h Vedi File

@@ -40,7 +40,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
40 40
 #define	CPUID_CMD		/* x86 CPU feature detection command */
41 41
 #endif
42 42
 
43
-#if defined ( __arm__ )
43
+#if defined ( __arm__ ) || defined ( __aarch64__ )
44 44
 #define IOAPI_ARM
45 45
 #define NAP_EFIARM
46 46
 #endif

+ 1
- 0
src/util/efirom.c Vedi File

@@ -85,6 +85,7 @@ static void read_pe_info ( void *pe, uint16_t *machine,
85 85
 		*subsystem = nt->nt32.OptionalHeader.Subsystem;
86 86
 		break;
87 87
 	case EFI_IMAGE_MACHINE_X64:
88
+	case EFI_IMAGE_MACHINE_AARCH64:
88 89
 		*subsystem = nt->nt64.OptionalHeader.Subsystem;
89 90
 		break;
90 91
 	default:

+ 20
- 0
src/util/elf2efi.c Vedi File

@@ -72,6 +72,11 @@
72 72
 
73 73
 #define ELF_MREL( mach, type ) ( (mach) | ( (type) << 16 ) )
74 74
 
75
+/* Seems to be missing from elf.h */
76
+#ifndef R_AARCH64_NULL
77
+#define R_AARCH64_NULL 256
78
+#endif
79
+
75 80
 #define EFI_FILE_ALIGN 0x20
76 81
 
77 82
 struct elf_file {
@@ -403,6 +408,9 @@ static void set_machine ( struct elf_file *elf, struct pe_header *pe_header ) {
403 408
 	case EM_ARM:
404 409
 		machine = EFI_IMAGE_MACHINE_ARMTHUMB_MIXED;
405 410
 		break;
411
+	case EM_AARCH64:
412
+		machine = EFI_IMAGE_MACHINE_AARCH64;
413
+		break;
406 414
 	default:
407 415
 		eprintf ( "Unknown ELF architecture %d\n", ehdr->e_machine );
408 416
 		exit ( 1 );
@@ -582,6 +590,8 @@ static void process_reloc ( struct elf_file *elf, const Elf_Shdr *shdr,
582 590
 		case ELF_MREL ( EM_386, R_386_NONE ) :
583 591
 		case ELF_MREL ( EM_ARM, R_ARM_NONE ) :
584 592
 		case ELF_MREL ( EM_X86_64, R_X86_64_NONE ) :
593
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_NONE ) :
594
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_NULL ) :
585 595
 			/* Ignore dummy relocations used by REQUIRE_SYMBOL() */
586 596
 			break;
587 597
 		case ELF_MREL ( EM_386, R_386_32 ) :
@@ -590,6 +600,7 @@ static void process_reloc ( struct elf_file *elf, const Elf_Shdr *shdr,
590 600
 			generate_pe_reloc ( pe_reltab, offset, 4 );
591 601
 			break;
592 602
 		case ELF_MREL ( EM_X86_64, R_X86_64_64 ) :
603
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_ABS64 ) :
593 604
 			/* Generate an 8-byte PE relocation */
594 605
 			generate_pe_reloc ( pe_reltab, offset, 8 );
595 606
 			break;
@@ -598,6 +609,15 @@ static void process_reloc ( struct elf_file *elf, const Elf_Shdr *shdr,
598 609
 		case ELF_MREL ( EM_ARM, R_ARM_THM_PC22 ) :
599 610
 		case ELF_MREL ( EM_ARM, R_ARM_THM_JUMP24 ) :
600 611
 		case ELF_MREL ( EM_X86_64, R_X86_64_PC32 ) :
612
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_CALL26 ) :
613
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_JUMP26 ) :
614
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_ADR_PREL_LO21 ) :
615
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_ADR_PREL_PG_HI21 ) :
616
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_ADD_ABS_LO12_NC ) :
617
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_LDST8_ABS_LO12_NC ) :
618
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_LDST16_ABS_LO12_NC ) :
619
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_LDST32_ABS_LO12_NC ) :
620
+		case ELF_MREL ( EM_AARCH64, R_AARCH64_LDST64_ABS_LO12_NC ) :
601 621
 			/* Skip PC-relative relocations; all relative
602 622
 			 * offsets remain unaltered when the object is
603 623
 			 * loaded.

Loading…
Annulla
Salva