|
@@ -42,6 +42,30 @@ struct image_type bzimage_image_type __image_type ( PROBE_NORMAL );
|
42
|
42
|
* @ret rc Return status code
|
43
|
43
|
*/
|
44
|
44
|
static int bzimage_exec ( struct image *image ) {
|
|
45
|
+ unsigned long rm_kernel_seg = image->priv.ul;
|
|
46
|
+
|
|
47
|
+ /* Prepare for exiting */
|
|
48
|
+ shutdown();
|
|
49
|
+
|
|
50
|
+ /* Jump to the kernel */
|
|
51
|
+ __asm__ __volatile__ ( REAL_CODE ( "movw %w0, %%ds\n\t"
|
|
52
|
+ "movw %w0, %%es\n\t"
|
|
53
|
+ "movw %w0, %%fs\n\t"
|
|
54
|
+ "movw %w0, %%gs\n\t"
|
|
55
|
+ "movw %w0, %%ss\n\t"
|
|
56
|
+ "movw %w1, %%sp\n\t"
|
|
57
|
+ "pushw %w2\n\t"
|
|
58
|
+ "pushw $0\n\t"
|
|
59
|
+ "lret\n\t" )
|
|
60
|
+ : : "r" ( rm_kernel_seg ),
|
|
61
|
+ "i" ( BZI_STACK_SIZE ),
|
|
62
|
+ "r" ( rm_kernel_seg + 0x20 ) );
|
|
63
|
+
|
|
64
|
+ /* There is no way for the image to return, since we provide
|
|
65
|
+ * no return address.
|
|
66
|
+ */
|
|
67
|
+
|
|
68
|
+ return -ECANCELED; /* -EIMPOSSIBLE */
|
45
|
69
|
}
|
46
|
70
|
|
47
|
71
|
/**
|
|
@@ -52,17 +76,29 @@ static int bzimage_exec ( struct image *image ) {
|
52
|
76
|
*/
|
53
|
77
|
int bzimage_load ( struct image *image ) {
|
54
|
78
|
struct bzimage_header bzhdr;
|
|
79
|
+ unsigned int rm_kernel_seg = 0x7c0; /* place RM kernel at 07c0:0000 */
|
|
80
|
+ userptr_t rm_kernel = real_to_user ( rm_kernel_seg, 0 );
|
|
81
|
+ userptr_t pm_kernel;
|
|
82
|
+ size_t rm_filesz;
|
|
83
|
+ size_t rm_memsz;
|
|
84
|
+ size_t pm_filesz;
|
|
85
|
+ size_t pm_memsz;
|
|
86
|
+ size_t rm_heap_end;
|
|
87
|
+ size_t rm_cmdline;
|
|
88
|
+ int rc;
|
55
|
89
|
|
56
|
90
|
/* Sanity check */
|
57
|
|
- if ( image->len < ( BZHDR_OFFSET + sizeof ( bzhdr ) ) ) {
|
58
|
|
- DBGC ( image, "BZIMAGE %p too short\n", image );
|
|
91
|
+ if ( image->len < ( BZI_HDR_OFFSET + sizeof ( bzhdr ) ) ) {
|
|
92
|
+ DBGC ( image, "bzImage %p too short for kernel header\n",
|
|
93
|
+ image );
|
59
|
94
|
return -ENOEXEC;
|
60
|
95
|
}
|
61
|
96
|
|
62
|
97
|
/* Read and verify header */
|
63
|
|
- copy_from_user ( &bzhdr, image->data, BZHDR_OFFSET, sizeof ( bzhdr ) );
|
64
|
|
- if ( bzhdr.header != BZIMAGE_SIGNATURE ) {
|
65
|
|
- DBGC ( image, "BZIMAGE %p not a bzImage\n", image );
|
|
98
|
+ copy_from_user ( &bzhdr, image->data, BZI_HDR_OFFSET,
|
|
99
|
+ sizeof ( bzhdr ) );
|
|
100
|
+ if ( bzhdr.header != BZI_SIGNATURE ) {
|
|
101
|
+ DBGC ( image, "bzImage %p not a bzImage\n", image );
|
66
|
102
|
return -ENOEXEC;
|
67
|
103
|
}
|
68
|
104
|
|
|
@@ -70,6 +106,80 @@ int bzimage_load ( struct image *image ) {
|
70
|
106
|
if ( ! image->type )
|
71
|
107
|
image->type = &bzimage_image_type;
|
72
|
108
|
|
|
109
|
+ /* We don't support ancient kernels */
|
|
110
|
+ if ( bzhdr.version < 0x0200 ) {
|
|
111
|
+ DBGC ( image, "bzImage %p version %04x not supported\n",
|
|
112
|
+ image, bzhdr.version );
|
|
113
|
+ return -ENOTSUP;
|
|
114
|
+ }
|
|
115
|
+ DBGC ( image, "bzImage %p version %04x\n", image, bzhdr.version );
|
|
116
|
+
|
|
117
|
+ /* Check size of base memory portions */
|
|
118
|
+ rm_filesz = ( ( bzhdr.setup_sects ? bzhdr.setup_sects : 4 ) << 9 );
|
|
119
|
+ if ( rm_filesz > image->len ) {
|
|
120
|
+ DBGC ( image, "bzImage %p too short for %zd byte of setup\n",
|
|
121
|
+ image, rm_filesz );
|
|
122
|
+ return -ENOEXEC;
|
|
123
|
+ }
|
|
124
|
+ rm_memsz = rm_filesz;
|
|
125
|
+
|
|
126
|
+ /* Allow space for the stack and heap */
|
|
127
|
+ rm_memsz += BZI_STACK_SIZE;
|
|
128
|
+ rm_heap_end = rm_memsz;
|
|
129
|
+
|
|
130
|
+ /* Allow space for the command line, if one exists */
|
|
131
|
+ rm_cmdline = rm_memsz;
|
|
132
|
+ if ( image->cmdline )
|
|
133
|
+ rm_memsz += ( strlen ( image->cmdline ) + 1 );
|
|
134
|
+
|
|
135
|
+ /* Prepare, verify, and load the real-mode segment */
|
|
136
|
+ if ( ( rc = prep_segment ( rm_kernel, rm_filesz, rm_memsz ) ) != 0 ) {
|
|
137
|
+ DBGC ( image, "bzImage %p could not prepare RM segment: %s\n",
|
|
138
|
+ image, strerror ( rc ) );
|
|
139
|
+ return rc;
|
|
140
|
+ }
|
|
141
|
+ memcpy_user ( rm_kernel, 0, image->data, 0, rm_filesz );
|
|
142
|
+
|
|
143
|
+ /* Prepare, verify and load the rest of the kernel */
|
|
144
|
+ pm_kernel = ( ( bzhdr.loadflags & BZI_LOAD_HIGH ) ?
|
|
145
|
+ phys_to_user ( 0x100000 ) : phys_to_user ( 0x10000 ) );
|
|
146
|
+ pm_filesz = pm_memsz = ( image->len - rm_filesz );
|
|
147
|
+ if ( ( rc = prep_segment ( pm_kernel, pm_filesz, pm_memsz ) ) != 0 ) {
|
|
148
|
+ DBGC ( image, "bzImage %p could not prepare PM segment: %s\n",
|
|
149
|
+ image, strerror ( rc ) );
|
|
150
|
+ return rc;
|
|
151
|
+ }
|
|
152
|
+ memcpy_user ( pm_kernel, 0, image->data, rm_filesz, pm_filesz );
|
|
153
|
+
|
|
154
|
+ /* Copy down the command line, if it exists */
|
|
155
|
+ if ( image->cmdline ) {
|
|
156
|
+ copy_to_user ( rm_kernel, rm_cmdline, image->cmdline,
|
|
157
|
+ strlen ( image->cmdline ) + 1 );
|
|
158
|
+ }
|
|
159
|
+
|
|
160
|
+ /* Update the header and copy it into the loaded kernel */
|
|
161
|
+ bzhdr.type_of_loader = BZI_LOADER_TYPE_ETHERBOOT;
|
|
162
|
+ if ( bzhdr.version >= 0x0201 ) {
|
|
163
|
+ bzhdr.heap_end_ptr = ( rm_heap_end - 0x200 );
|
|
164
|
+ bzhdr.loadflags |= BZI_CAN_USE_HEAP;
|
|
165
|
+ }
|
|
166
|
+ if ( bzhdr.version >= 0x0202 ) {
|
|
167
|
+ bzhdr.cmd_line_ptr = user_to_phys ( rm_kernel, rm_cmdline );
|
|
168
|
+ } else {
|
|
169
|
+ uint16_t cmd_line_magic = BZI_CMD_LINE_MAGIC;
|
|
170
|
+ uint16_t cmd_line_offset = rm_cmdline;
|
|
171
|
+
|
|
172
|
+ put_real ( cmd_line_magic, rm_kernel_seg,
|
|
173
|
+ BZI_CMD_LINE_MAGIC_OFFSET );
|
|
174
|
+ put_real ( cmd_line_offset, rm_kernel_seg,
|
|
175
|
+ BZI_CMD_LINE_OFFSET_OFFSET );
|
|
176
|
+ bzhdr.setup_move_size = rm_memsz;
|
|
177
|
+ }
|
|
178
|
+ copy_to_user ( rm_kernel, BZI_HDR_OFFSET, &bzhdr, sizeof ( bzhdr ) );
|
|
179
|
+
|
|
180
|
+ /* Record segment address in image private data field */
|
|
181
|
+ image->priv.ul = rm_kernel_seg;
|
|
182
|
+
|
73
|
183
|
return 0;
|
74
|
184
|
}
|
75
|
185
|
|