|
@@ -93,33 +93,45 @@ static struct pci_device_id e1000_nics[] = {
|
93
|
93
|
PCI_ROM(0x8086, 0x10DA, "e1000-0x10DA", "E1000-0x10DA"),
|
94
|
94
|
};
|
95
|
95
|
|
|
96
|
+/**
|
|
97
|
+ * e1000_get_hw_control - get control of the h/w from f/w
|
|
98
|
+ * @adapter: address of board private structure
|
|
99
|
+ *
|
|
100
|
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
|
|
101
|
+ * For ASF and Pass Through versions of f/w this means that
|
|
102
|
+ * the driver is loaded. For AMT version (only with 82573)
|
|
103
|
+ * of the f/w this means that the network i/f is open.
|
|
104
|
+ *
|
|
105
|
+ **/
|
96
|
106
|
static void
|
97
|
|
-e1000_init_manageability ( struct e1000_adapter *adapter )
|
|
107
|
+e1000_get_hw_control(struct e1000_adapter *adapter)
|
98
|
108
|
{
|
99
|
|
- DBG ( "e1000_init_manageability\n" );
|
100
|
|
-
|
101
|
|
- if (adapter->en_mng_pt) {
|
102
|
|
- uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
|
103
|
|
-
|
104
|
|
- /* disable hardware interception of ARP */
|
105
|
|
- manc &= ~(E1000_MANC_ARP_EN);
|
106
|
|
-
|
107
|
|
- /* enable receiving management packets to the host */
|
108
|
|
- /* this will probably generate destination unreachable messages
|
109
|
|
- * from the host OS, but the packets will be handled on SMBUS */
|
110
|
|
- if (adapter->hw.has_manc2h) {
|
111
|
|
- uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
|
112
|
|
-
|
113
|
|
- manc |= E1000_MANC_EN_MNG2HOST;
|
114
|
|
- manc2h |= E1000_MNG2HOST_PORT_623;
|
115
|
|
- manc2h |= E1000_MNG2HOST_PORT_664;
|
116
|
|
- E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h);
|
117
|
|
- }
|
|
109
|
+ uint32_t ctrl_ext;
|
|
110
|
+ uint32_t swsm;
|
|
111
|
+
|
|
112
|
+ DBG ( "e1000_get_hw_control\n" );
|
118
|
113
|
|
119
|
|
- E1000_WRITE_REG(&adapter->hw, MANC, manc);
|
|
114
|
+ /* Let firmware know the driver has taken over */
|
|
115
|
+ switch (adapter->hw.mac_type) {
|
|
116
|
+ case e1000_82573:
|
|
117
|
+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
|
|
118
|
+ E1000_WRITE_REG(&adapter->hw, SWSM,
|
|
119
|
+ swsm | E1000_SWSM_DRV_LOAD);
|
|
120
|
+ break;
|
|
121
|
+ case e1000_82571:
|
|
122
|
+ case e1000_82572:
|
|
123
|
+ case e1000_80003es2lan:
|
|
124
|
+ case e1000_ich8lan:
|
|
125
|
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
|
|
126
|
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
|
|
127
|
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
|
|
128
|
+ break;
|
|
129
|
+ default:
|
|
130
|
+ break;
|
120
|
131
|
}
|
121
|
132
|
}
|
122
|
133
|
|
|
134
|
+#if 0
|
123
|
135
|
/**
|
124
|
136
|
* e1000_power_up_phy - restore link in case the phy was powered down
|
125
|
137
|
* @adapter: address of board private structure
|
|
@@ -195,6 +207,42 @@ out:
|
195
|
207
|
return;
|
196
|
208
|
}
|
197
|
209
|
|
|
210
|
+#endif
|
|
211
|
+
|
|
212
|
+/**
|
|
213
|
+ * e1000_irq_enable - Enable default interrupt generation settings
|
|
214
|
+ * @adapter: board private structure
|
|
215
|
+ **/
|
|
216
|
+static void
|
|
217
|
+e1000_irq_enable ( struct e1000_adapter *adapter )
|
|
218
|
+{
|
|
219
|
+ E1000_WRITE_REG ( &adapter->hw, IMS, E1000_IMS_RXT0 |
|
|
220
|
+ E1000_IMS_RXSEQ );
|
|
221
|
+ E1000_WRITE_FLUSH ( &adapter->hw );
|
|
222
|
+}
|
|
223
|
+
|
|
224
|
+/**
|
|
225
|
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
|
|
226
|
+ * @adapter: board private structure
|
|
227
|
+ **/
|
|
228
|
+static void
|
|
229
|
+e1000_irq_disable ( struct e1000_adapter *adapter )
|
|
230
|
+{
|
|
231
|
+ E1000_WRITE_REG ( &adapter->hw, IMC, ~0 );
|
|
232
|
+ E1000_WRITE_FLUSH ( &adapter->hw );
|
|
233
|
+}
|
|
234
|
+
|
|
235
|
+/**
|
|
236
|
+ * e1000_irq_force - trigger interrupt
|
|
237
|
+ * @adapter: board private structure
|
|
238
|
+ **/
|
|
239
|
+static void
|
|
240
|
+e1000_irq_force ( struct e1000_adapter *adapter )
|
|
241
|
+{
|
|
242
|
+ E1000_WRITE_REG ( &adapter->hw, ICS, E1000_ICS_RXT0 );
|
|
243
|
+ E1000_WRITE_FLUSH ( &adapter->hw );
|
|
244
|
+}
|
|
245
|
+
|
198
|
246
|
/**
|
199
|
247
|
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
|
200
|
248
|
* @adapter: board private structure to initialize
|
|
@@ -208,53 +256,60 @@ e1000_sw_init ( struct e1000_adapter *adapter )
|
208
|
256
|
{
|
209
|
257
|
struct e1000_hw *hw = &adapter->hw;
|
210
|
258
|
struct pci_device *pdev = adapter->pdev;
|
211
|
|
-
|
212
|
|
- DBG ( "e1000_sw_init\n" );
|
213
|
259
|
|
214
|
260
|
/* PCI config space info */
|
|
261
|
+
|
215
|
262
|
hw->vendor_id = pdev->vendor;
|
216
|
263
|
hw->device_id = pdev->device;
|
217
|
264
|
|
218
|
|
- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
|
|
265
|
+ pci_read_config_word ( pdev, PCI_COMMAND, &hw->pci_cmd_word );
|
219
|
266
|
|
220
|
|
- adapter->rx_buffer_len = 2048;
|
|
267
|
+ /* Disable Flow Control */
|
|
268
|
+ hw->fc = E1000_FC_NONE;
|
|
269
|
+
|
|
270
|
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
|
|
271
|
+ adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
|
|
272
|
+ hw->max_frame_size = E1000_RXBUFFER_2048;
|
|
273
|
+ hw->min_frame_size = 64;
|
221
|
274
|
|
222
|
275
|
/* identify the MAC */
|
223
|
276
|
|
224
|
277
|
if ( e1000_set_mac_type ( hw ) ) {
|
225
|
278
|
DBG ( "Unknown MAC Type\n" );
|
226
|
|
- return -EINVAL;
|
|
279
|
+ return -EIO;
|
227
|
280
|
}
|
228
|
281
|
|
229
|
|
- switch (hw->mac_type) {
|
|
282
|
+ switch ( hw->mac_type ) {
|
230
|
283
|
default:
|
231
|
284
|
break;
|
232
|
285
|
case e1000_82541:
|
233
|
286
|
case e1000_82547:
|
234
|
287
|
case e1000_82541_rev_2:
|
235
|
288
|
case e1000_82547_rev_2:
|
|
289
|
+#if 0
|
236
|
290
|
hw->phy_init_script = 1;
|
|
291
|
+#endif
|
237
|
292
|
break;
|
238
|
293
|
}
|
239
|
294
|
|
240
|
|
- e1000_set_media_type(hw);
|
|
295
|
+ e1000_set_media_type ( hw );
|
241
|
296
|
|
242
|
|
- hw->wait_autoneg_complete = TRUE;
|
|
297
|
+ hw->wait_autoneg_complete = FALSE;
|
243
|
298
|
hw->tbi_compatibility_en = TRUE;
|
244
|
299
|
hw->adaptive_ifs = TRUE;
|
245
|
300
|
|
246
|
301
|
/* Copper options */
|
247
|
302
|
|
248
|
|
- if (hw->media_type == e1000_media_type_copper) {
|
|
303
|
+ if ( hw->media_type == e1000_media_type_copper ) {
|
249
|
304
|
hw->mdix = AUTO_ALL_MODES;
|
250
|
305
|
hw->disable_polarity_correction = FALSE;
|
251
|
306
|
hw->master_slave = E1000_MASTER_SLAVE;
|
252
|
307
|
}
|
253
|
308
|
|
254
|
|
- adapter->num_tx_queues = 1;
|
255
|
|
- adapter->num_rx_queues = 1;
|
|
309
|
+ /* Explicitly disable IRQ since the NIC can be in any state. */
|
|
310
|
+ e1000_irq_disable ( adapter );
|
256
|
311
|
|
257
|
|
- return E1000_SUCCESS;
|
|
312
|
+ return 0;
|
258
|
313
|
}
|
259
|
314
|
|
260
|
315
|
/**
|
|
@@ -267,8 +322,6 @@ e1000_sw_init ( struct e1000_adapter *adapter )
|
267
|
322
|
static int
|
268
|
323
|
e1000_setup_tx_resources ( struct e1000_adapter *adapter )
|
269
|
324
|
{
|
270
|
|
- int i;
|
271
|
|
-
|
272
|
325
|
DBG ( "e1000_setup_tx_resources\n" );
|
273
|
326
|
|
274
|
327
|
/* Allocate transmit descriptor ring memory.
|
|
@@ -281,97 +334,31 @@ e1000_setup_tx_resources ( struct e1000_adapter *adapter )
|
281
|
334
|
cross 64K bytes.
|
282
|
335
|
*/
|
283
|
336
|
|
284
|
|
- adapter->tx_desc_ring =
|
285
|
|
- malloc_dma ( sizeof ( struct e1000_tx_desc ) * NUM_TX_DESC,
|
286
|
|
- sizeof ( struct e1000_tx_desc ) * NUM_TX_DESC );
|
287
|
|
-
|
288
|
|
- if ( ! adapter->tx_desc_ring ) {
|
|
337
|
+ adapter->tx_base =
|
|
338
|
+ malloc_dma ( sizeof ( *adapter->tx_base ) * NUM_TX_DESC,
|
|
339
|
+ sizeof ( *adapter->tx_base ) * NUM_TX_DESC );
|
|
340
|
+
|
|
341
|
+ if ( ! adapter->tx_base ) {
|
289
|
342
|
return -ENOMEM;
|
290
|
343
|
}
|
291
|
|
-
|
292
|
|
- memset ( adapter->tx_desc_ring, 0, sizeof ( struct e1000_tx_desc ) *
|
293
|
|
- NUM_TX_DESC );
|
294
|
|
-
|
295
|
|
- for ( i = 0; i < NUM_TX_DESC; i++ ) {
|
296
|
|
- adapter->tx_desc[i] = (void *) adapter->tx_desc_ring +
|
297
|
|
- ( i * sizeof ( struct e1000_tx_desc ) );
|
298
|
|
- }
|
299
|
|
-
|
300
|
|
- return 0;
|
301
|
|
-}
|
302
|
|
-
|
303
|
|
-static void
|
304
|
|
-e1000_free_tx_resources ( struct e1000_adapter *adapter )
|
305
|
|
-{
|
306
|
|
- DBG ( "e1000_free_tx_resources\n" );
|
307
|
|
-
|
308
|
|
- free_dma ( adapter->tx_desc_ring,
|
309
|
|
- sizeof ( struct e1000_tx_desc ) * NUM_TX_DESC );
|
310
|
|
-}
|
311
|
|
-
|
312
|
|
-/**
|
313
|
|
- * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
|
314
|
|
- * @adapter: board private structure
|
315
|
|
- * @rxdr: rx descriptor ring (for a specific queue) to setup
|
316
|
|
- *
|
317
|
|
- * Returns 0 on success, negative on failure
|
318
|
|
- **/
|
319
|
|
-static int
|
320
|
|
-e1000_setup_rx_resources ( struct e1000_adapter *adapter )
|
321
|
|
-{
|
322
|
|
- int i, j;
|
323
|
344
|
|
324
|
|
- DBG ( "e1000_setup_rx_resources\n" );
|
|
345
|
+ memset ( adapter->tx_base, 0, sizeof ( *adapter->tx_base ) * NUM_TX_DESC );
|
325
|
346
|
|
326
|
|
- /* Allocate receive descriptor ring memory.
|
327
|
|
- It must not cross a 64K boundary because of hardware errata
|
328
|
|
- */
|
329
|
|
-
|
330
|
|
- adapter->rx_desc_ring =
|
331
|
|
- malloc_dma ( sizeof ( struct e1000_rx_desc ) * NUM_RX_DESC,
|
332
|
|
- sizeof ( struct e1000_rx_desc ) * NUM_RX_DESC );
|
333
|
|
-
|
334
|
|
- if ( ! adapter->rx_desc_ring ) {
|
335
|
|
- return -ENOMEM;
|
336
|
|
- }
|
|
347
|
+ DBG ( "adapter->tx_base = %#08lx\n", virt_to_bus ( adapter->tx_base ) );
|
337
|
348
|
|
338
|
|
- memset ( adapter->rx_desc_ring, 0, sizeof (struct e1000_rx_desc) * NUM_RX_DESC );
|
|
349
|
+ DBG ( "sizeof ( *adapter->tx_base ) == %d bytes\n",
|
|
350
|
+ sizeof ( *adapter->tx_base ) );
|
339
|
351
|
|
340
|
|
- for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
341
|
|
-
|
342
|
|
- adapter->rx_iobuf[i] = alloc_iob ( E1000_RXBUFFER_2048 );
|
343
|
|
-
|
344
|
|
- /* If unable to allocate all iobufs, free any that
|
345
|
|
- * were successfully allocated, and return an error
|
346
|
|
- */
|
347
|
|
- if ( ! adapter->rx_iobuf[i] ) {
|
348
|
|
- for ( j = 0; j < i; j++ ) {
|
349
|
|
- free_iob ( adapter->rx_iobuf[j] );
|
350
|
|
- }
|
351
|
|
- return -ENOMEM;
|
352
|
|
- }
|
353
|
|
-
|
354
|
|
- adapter->rx_desc[i] = (void *) adapter->rx_desc_ring +
|
355
|
|
- ( i * sizeof ( struct e1000_rx_desc ) );
|
356
|
|
-
|
357
|
|
- adapter->rx_desc[i]->buffer_addr = virt_to_bus ( adapter->rx_iobuf[i]->data );
|
358
|
|
- }
|
359
|
352
|
return 0;
|
360
|
353
|
}
|
361
|
354
|
|
362
|
355
|
static void
|
363
|
|
-e1000_free_rx_resources ( struct e1000_adapter *adapter )
|
|
356
|
+e1000_free_tx_resources ( struct e1000_adapter *adapter )
|
364
|
357
|
{
|
365
|
|
- int i;
|
366
|
|
-
|
367
|
|
- DBG ( "e1000_free_rx_resources\n" );
|
368
|
|
-
|
369
|
|
- free_dma ( adapter->rx_desc_ring,
|
370
|
|
- sizeof ( struct e1000_rx_desc ) * NUM_RX_DESC );
|
|
358
|
+ DBG ( "e1000_free_tx_resources\n" );
|
371
|
359
|
|
372
|
|
- for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
373
|
|
- free_iob ( adapter->rx_iobuf[i] );
|
374
|
|
- }
|
|
360
|
+ free_dma ( adapter->tx_base,
|
|
361
|
+ sizeof ( *adapter->tx_base ) * NUM_TX_DESC );
|
375
|
362
|
}
|
376
|
363
|
|
377
|
364
|
/**
|
|
@@ -384,15 +371,21 @@ static void
|
384
|
371
|
e1000_configure_tx ( struct e1000_adapter *adapter )
|
385
|
372
|
{
|
386
|
373
|
struct e1000_hw *hw = &adapter->hw;
|
387
|
|
- uint32_t tctl, tipg, tarc;
|
|
374
|
+ uint32_t tctl;
|
|
375
|
+
|
|
376
|
+#if 0
|
|
377
|
+ uint32 tipg, tarc;
|
388
|
378
|
uint32_t ipgr1, ipgr2;
|
|
379
|
+#endif
|
389
|
380
|
|
390
|
381
|
DBG ( "e1000_configure_tx\n" );
|
391
|
382
|
|
392
|
383
|
E1000_WRITE_REG ( hw, TDBAH, 0 );
|
393
|
384
|
E1000_WRITE_REG ( hw, TDBAL, virt_to_bus ( adapter->tx_base ) );
|
394
|
|
- E1000_WRITE_REG ( hw, TDLEN, sizeof ( struct e1000_tx_desc ) *
|
395
|
|
- NUM_TX_DESC );
|
|
385
|
+ E1000_WRITE_REG ( hw, TDLEN, sizeof ( *adapter->tx_base ) * NUM_TX_DESC );
|
|
386
|
+
|
|
387
|
+ DBG ( "TDBAL: %#08lx\n", virt_to_bus ( adapter->tx_base ) );
|
|
388
|
+ DBG ( "TDLEN: %d\n", sizeof ( *adapter->tx_base ) * NUM_TX_DESC );
|
396
|
389
|
|
397
|
390
|
/* Setup the HW Tx Head and Tail descriptor pointers */
|
398
|
391
|
E1000_WRITE_REG ( hw, TDH, 0 );
|
|
@@ -402,6 +395,7 @@ e1000_configure_tx ( struct e1000_adapter *adapter )
|
402
|
395
|
adapter->tx_tail = 0;
|
403
|
396
|
adapter->tx_fill_ctr = 0;
|
404
|
397
|
|
|
398
|
+#if 0
|
405
|
399
|
/* Set the default values for the Tx Inter Packet Gap timer */
|
406
|
400
|
if (adapter->hw.mac_type <= e1000_82547_rev_2 &&
|
407
|
401
|
(hw->media_type == e1000_media_type_fiber ||
|
|
@@ -457,9 +451,15 @@ e1000_configure_tx ( struct e1000_adapter *adapter )
|
457
|
451
|
tarc |= 1;
|
458
|
452
|
E1000_WRITE_REG(hw, TARC1, tarc);
|
459
|
453
|
}
|
|
454
|
+#endif
|
460
|
455
|
|
461
|
|
- e1000_config_collision_dist(hw);
|
|
456
|
+ e1000_config_collision_dist ( hw );
|
462
|
457
|
|
|
458
|
+ tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
|
|
459
|
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT) |
|
|
460
|
+ (E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
|
|
461
|
+
|
|
462
|
+#if 0
|
463
|
463
|
/* Setup Transmit Descriptor Settings for eop descriptor */
|
464
|
464
|
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
|
465
|
465
|
|
|
@@ -477,41 +477,77 @@ e1000_configure_tx ( struct e1000_adapter *adapter )
|
477
|
477
|
if (hw->mac_type == e1000_82544 &&
|
478
|
478
|
hw->bus_type == e1000_bus_type_pcix)
|
479
|
479
|
adapter->pcix_82544 = 1;
|
|
480
|
+#endif
|
480
|
481
|
|
481
|
482
|
E1000_WRITE_REG ( hw, TCTL, tctl );
|
482
|
483
|
}
|
483
|
484
|
|
484
|
485
|
/**
|
485
|
|
- * e1000_setup_rctl - configure the receive control registers
|
486
|
|
- * @adapter: Board private structure
|
|
486
|
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
|
|
487
|
+ * @adapter: board private structure
|
|
488
|
+ * @rxdr: rx descriptor ring (for a specific queue) to setup
|
|
489
|
+ *
|
|
490
|
+ * Returns 0 on success, negative on failure
|
487
|
491
|
**/
|
488
|
|
-static void
|
489
|
|
-e1000_setup_rctl ( struct e1000_adapter *adapter )
|
|
492
|
+static int
|
|
493
|
+e1000_setup_rx_resources ( struct e1000_adapter *adapter )
|
490
|
494
|
{
|
491
|
|
- uint32_t rctl;
|
|
495
|
+ int i, j;
|
|
496
|
+ struct e1000_rx_desc *rx_curr_desc;
|
492
|
497
|
|
493
|
|
- DBG ( "e1000_setup_rctl\n" );
|
494
|
|
-
|
495
|
|
- rctl = E1000_READ_REG ( &adapter->hw, RCTL );
|
|
498
|
+ DBG ( "e1000_setup_rx_resources\n" );
|
|
499
|
+
|
|
500
|
+ /* Allocate receive descriptor ring memory.
|
|
501
|
+ It must not cross a 64K boundary because of hardware errata
|
|
502
|
+ */
|
496
|
503
|
|
497
|
|
- rctl &= ~( 3 << E1000_RCTL_MO_SHIFT );
|
|
504
|
+ adapter->rx_base =
|
|
505
|
+ malloc_dma ( sizeof ( *adapter->rx_base ) * NUM_RX_DESC,
|
|
506
|
+ sizeof ( *adapter->rx_base ) * NUM_RX_DESC );
|
|
507
|
+
|
|
508
|
+ if ( ! adapter->rx_base ) {
|
|
509
|
+ return -ENOMEM;
|
|
510
|
+ }
|
|
511
|
+ memset ( adapter->rx_base, 0, sizeof ( *adapter->rx_base ) * NUM_RX_DESC );
|
498
|
512
|
|
499
|
|
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
|
500
|
|
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
501
|
|
- ( adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT );
|
|
513
|
+ for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
|
514
|
+
|
|
515
|
+ adapter->rx_iobuf[i] = alloc_iob ( E1000_RXBUFFER_2048 );
|
|
516
|
+
|
|
517
|
+ /* If unable to allocate all iobufs, free any that
|
|
518
|
+ * were successfully allocated, and return an error
|
|
519
|
+ */
|
|
520
|
+ if ( ! adapter->rx_iobuf[i] ) {
|
|
521
|
+ for ( j = 0; j < i; j++ ) {
|
|
522
|
+ free_iob ( adapter->rx_iobuf[j] );
|
|
523
|
+ }
|
|
524
|
+ return -ENOMEM;
|
|
525
|
+ }
|
502
|
526
|
|
503
|
|
- if ( adapter->hw.tbi_compatibility_on == 1 )
|
504
|
|
- rctl |= E1000_RCTL_SBP;
|
505
|
|
- else
|
506
|
|
- rctl &= ~E1000_RCTL_SBP;
|
|
527
|
+ rx_curr_desc = ( void * ) ( adapter->rx_base ) +
|
|
528
|
+ ( i * sizeof ( *adapter->rx_base ) );
|
|
529
|
+
|
|
530
|
+ rx_curr_desc->buffer_addr = virt_to_bus ( adapter->rx_iobuf[i]->data );
|
|
531
|
+ DBG ( "i = %d rx_curr_desc->buffer_addr = %#16llx\n",
|
|
532
|
+ i, rx_curr_desc->buffer_addr );
|
|
533
|
+
|
|
534
|
+ }
|
|
535
|
+ return 0;
|
|
536
|
+}
|
507
|
537
|
|
508
|
|
- rctl &= ~E1000_RCTL_LPE;
|
|
538
|
+static void
|
|
539
|
+e1000_free_rx_resources ( struct e1000_adapter *adapter )
|
|
540
|
+{
|
|
541
|
+ int i;
|
|
542
|
+
|
|
543
|
+ DBG ( "e1000_free_rx_resources\n" );
|
509
|
544
|
|
510
|
|
- /* Setup buffer sizes */
|
511
|
|
- rctl |= E1000_RCTL_SZ_2048;
|
512
|
|
- rctl &= ~E1000_RCTL_BSEX;
|
|
545
|
+ free_dma ( adapter->rx_base,
|
|
546
|
+ sizeof ( *adapter->rx_base ) * NUM_RX_DESC );
|
513
|
547
|
|
514
|
|
- E1000_WRITE_REG ( &adapter->hw, RCTL, rctl );
|
|
548
|
+ for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
|
549
|
+ free_iob ( adapter->rx_iobuf[i] );
|
|
550
|
+ }
|
515
|
551
|
}
|
516
|
552
|
|
517
|
553
|
/**
|
|
@@ -524,17 +560,23 @@ static void
|
524
|
560
|
e1000_configure_rx ( struct e1000_adapter *adapter )
|
525
|
561
|
{
|
526
|
562
|
struct e1000_hw *hw = &adapter->hw;
|
527
|
|
- uint32_t rctl, ctrl_ext;
|
|
563
|
+ uint32_t rctl;
|
|
564
|
+
|
|
565
|
+#if 0
|
|
566
|
+ uint32_t ctrl_ext;
|
|
567
|
+#endif
|
528
|
568
|
|
529
|
569
|
DBG ( "e1000_configure_rx\n" );
|
530
|
570
|
|
531
|
571
|
/* disable receives while setting up the descriptors */
|
532
|
|
- rctl = E1000_READ_REG(hw, RCTL);
|
|
572
|
+ rctl = E1000_READ_REG ( hw, RCTL );
|
533
|
573
|
E1000_WRITE_REG ( hw, RCTL, rctl & ~E1000_RCTL_EN );
|
534
|
574
|
|
535
|
575
|
/* set the Receive Delay Timer Register */
|
536
|
|
- E1000_WRITE_REG( hw, RDTR, adapter->rx_int_delay );
|
|
576
|
+ E1000_WRITE_REG ( hw, RDTR, adapter->rx_int_delay );
|
|
577
|
+ E1000_WRITE_REG ( hw, RADV, adapter->rx_abs_int_delay );
|
537
|
578
|
|
|
579
|
+#if 0
|
538
|
580
|
if (hw->mac_type >= e1000_82540) {
|
539
|
581
|
E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
|
540
|
582
|
if (adapter->itr_setting != 0)
|
|
@@ -549,6 +591,7 @@ e1000_configure_rx ( struct e1000_adapter *adapter )
|
549
|
591
|
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
|
550
|
592
|
E1000_WRITE_FLUSH(hw);
|
551
|
593
|
}
|
|
594
|
+#endif
|
552
|
595
|
|
553
|
596
|
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
554
|
597
|
* the Base and Length of the Rx Descriptor Ring */
|
|
@@ -557,32 +600,20 @@ e1000_configure_rx ( struct e1000_adapter *adapter )
|
557
|
600
|
|
558
|
601
|
E1000_WRITE_REG ( hw, RDBAH, 0 );
|
559
|
602
|
E1000_WRITE_REG ( hw, RDBAL, virt_to_bus ( adapter->rx_base ) );
|
560
|
|
- E1000_WRITE_REG ( hw, RDLEN, sizeof ( struct e1000_tx_desc ) *
|
561
|
|
- NUM_TX_DESC );
|
|
603
|
+ E1000_WRITE_REG ( hw, RDLEN, sizeof ( *adapter->rx_base ) *
|
|
604
|
+ NUM_RX_DESC );
|
562
|
605
|
|
563
|
606
|
E1000_WRITE_REG ( hw, RDH, 0);
|
564
|
607
|
E1000_WRITE_REG ( hw, RDT, 0);
|
|
608
|
+
|
|
609
|
+ E1000_WRITE_REG ( hw, RCTL, E1000_RCTL_EN | E1000_RCTL_BAM |
|
|
610
|
+ E1000_RCTL_SZ_2048 | E1000_RCTL_MPE);
|
|
611
|
+ E1000_WRITE_FLUSH ( hw );
|
565
|
612
|
|
566
|
613
|
/* Enable Receives */
|
567
|
|
-
|
568
|
614
|
E1000_WRITE_REG ( hw, RCTL, rctl );
|
569
|
615
|
}
|
570
|
616
|
|
571
|
|
-/**
|
572
|
|
- * e1000_configure - configure the hardware for RX and TX
|
573
|
|
- * @adapter = private board structure
|
574
|
|
- **/
|
575
|
|
-static void e1000_configure ( struct e1000_adapter *adapter )
|
576
|
|
-{
|
577
|
|
- DBG ( "e1000_configure\n" );
|
578
|
|
-
|
579
|
|
- e1000_configure_tx ( adapter );
|
580
|
|
-
|
581
|
|
- e1000_setup_rctl ( adapter );
|
582
|
|
-
|
583
|
|
- e1000_configure_rx ( adapter );
|
584
|
|
-}
|
585
|
|
-
|
586
|
617
|
/**
|
587
|
618
|
* e1000_reset - Put e1000 NIC in known initial state
|
588
|
619
|
*
|
|
@@ -593,7 +624,7 @@ e1000_reset ( struct e1000_adapter *adapter )
|
593
|
624
|
{
|
594
|
625
|
uint32_t pba = 0;
|
595
|
626
|
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
|
596
|
|
-
|
|
627
|
+
|
597
|
628
|
DBG ( "e1000_reset\n" );
|
598
|
629
|
|
599
|
630
|
switch (adapter->hw.mac_type) {
|
|
@@ -631,8 +662,8 @@ e1000_reset ( struct e1000_adapter *adapter )
|
631
|
662
|
break;
|
632
|
663
|
}
|
633
|
664
|
|
634
|
|
- E1000_WRITE_REG(&adapter->hw, PBA, pba);
|
635
|
|
-
|
|
665
|
+ E1000_WRITE_REG ( &adapter->hw, PBA, pba );
|
|
666
|
+
|
636
|
667
|
/* flow control settings */
|
637
|
668
|
/* Set the FC high water mark to 90% of the FIFO size.
|
638
|
669
|
* Required to clear last 3 LSB */
|
|
@@ -652,13 +683,14 @@ e1000_reset ( struct e1000_adapter *adapter )
|
652
|
683
|
adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
|
653
|
684
|
adapter->hw.fc_send_xon = 1;
|
654
|
685
|
adapter->hw.fc = adapter->hw.original_fc;
|
655
|
|
-
|
656
|
686
|
/* Allow time for pending master requests to run */
|
657
|
|
- e1000_reset_hw(&adapter->hw);
|
658
|
|
- if (adapter->hw.mac_type >= e1000_82544)
|
659
|
|
- E1000_WRITE_REG(&adapter->hw, WUC, 0);
|
660
|
687
|
|
661
|
|
- if (e1000_init_hw(&adapter->hw))
|
|
688
|
+ e1000_reset_hw ( &adapter->hw );
|
|
689
|
+
|
|
690
|
+ if ( adapter->hw.mac_type >= e1000_82544 )
|
|
691
|
+ E1000_WRITE_REG ( &adapter->hw, WUC, 0 );
|
|
692
|
+
|
|
693
|
+ if ( e1000_init_hw ( &adapter->hw ) )
|
662
|
694
|
DBG ( "Hardware Error\n" );
|
663
|
695
|
|
664
|
696
|
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
|
|
@@ -674,7 +706,7 @@ e1000_reset ( struct e1000_adapter *adapter )
|
674
|
706
|
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
|
675
|
707
|
}
|
676
|
708
|
|
677
|
|
- e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
|
|
709
|
+ e1000_phy_get_info ( &adapter->hw, &adapter->phy_info );
|
678
|
710
|
|
679
|
711
|
if (!adapter->smart_power_down &&
|
680
|
712
|
(adapter->hw.mac_type == e1000_82571 ||
|
|
@@ -689,7 +721,6 @@ e1000_reset ( struct e1000_adapter *adapter )
|
689
|
721
|
e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
|
690
|
722
|
phy_data);
|
691
|
723
|
}
|
692
|
|
-
|
693
|
724
|
}
|
694
|
725
|
|
695
|
726
|
/** Functions that implement the gPXE driver API **/
|
|
@@ -703,12 +734,13 @@ e1000_reset ( struct e1000_adapter *adapter )
|
703
|
734
|
static void
|
704
|
735
|
e1000_close ( struct net_device *netdev )
|
705
|
736
|
{
|
706
|
|
- struct e1000_adapter *adapter = netdev_priv( netdev );
|
|
737
|
+ struct e1000_adapter *adapter = netdev_priv ( netdev );
|
707
|
738
|
|
708
|
739
|
DBG ( "e1000_close\n" );
|
|
740
|
+
|
|
741
|
+ e1000_irq_disable ( adapter );
|
709
|
742
|
|
710
|
|
- e1000_reset ( adapter );
|
711
|
|
- e1000_power_down_phy ( adapter );
|
|
743
|
+ e1000_reset_hw ( &adapter->hw );
|
712
|
744
|
|
713
|
745
|
e1000_free_tx_resources ( adapter );
|
714
|
746
|
e1000_free_rx_resources ( adapter );
|
|
@@ -727,6 +759,8 @@ e1000_transmit ( struct net_device *netdev, struct io_buffer *iobuf )
|
727
|
759
|
{
|
728
|
760
|
struct e1000_adapter *adapter = netdev_priv( netdev );
|
729
|
761
|
struct e1000_hw *hw = &adapter->hw;
|
|
762
|
+ uint32_t tx_curr = adapter->tx_tail;
|
|
763
|
+ struct e1000_tx_desc *tx_curr_desc;
|
730
|
764
|
|
731
|
765
|
DBG ("e1000_transmit\n");
|
732
|
766
|
|
|
@@ -738,28 +772,42 @@ e1000_transmit ( struct net_device *netdev, struct io_buffer *iobuf )
|
738
|
772
|
/* Save pointer to iobuf we have been given to transmit,
|
739
|
773
|
netdev_tx_complete() will need it later
|
740
|
774
|
*/
|
741
|
|
- adapter->tx_iobuf[adapter->tx_tail] = iobuf;
|
|
775
|
+ adapter->tx_iobuf[tx_curr] = iobuf;
|
|
776
|
+
|
|
777
|
+ tx_curr_desc = ( void * ) ( adapter->tx_base ) +
|
|
778
|
+ ( tx_curr * sizeof ( *adapter->tx_base ) );
|
|
779
|
+
|
|
780
|
+ DBG ( "tx_curr_desc = %#08lx\n", virt_to_bus ( tx_curr_desc ) );
|
|
781
|
+ DBG ( "tx_curr_desc + 16 = %#08lx\n", virt_to_bus ( tx_curr_desc ) + 16 );
|
|
782
|
+ DBG ( "iobuf->data = %#08lx\n", virt_to_bus ( iobuf->data ) );
|
742
|
783
|
|
743
|
784
|
/* Add the packet to TX ring
|
744
|
785
|
*/
|
745
|
|
- adapter->tx_desc[adapter->tx_tail]->buffer_addr =
|
|
786
|
+ tx_curr_desc->buffer_addr =
|
746
|
787
|
virt_to_bus ( iobuf->data );
|
747
|
|
- adapter->tx_desc[adapter->tx_tail]->lower.data =
|
748
|
|
- E1000_TXD_CMD_RPS | E1000_TXD_CMD_RS | E1000_TXD_CMD_EOP |
|
|
788
|
+ tx_curr_desc->lower.data =
|
|
789
|
+ E1000_TXD_CMD_RPS | E1000_TXD_CMD_EOP |
|
749
|
790
|
E1000_TXD_CMD_IFCS | iob_len ( iobuf );
|
750
|
|
- adapter->tx_desc[adapter->tx_tail]->upper.data = 0;
|
|
791
|
+ tx_curr_desc->upper.data = 0;
|
751
|
792
|
|
752
|
|
- DBG ( "TX fill: %ld tail: %ld addr: %#08lx len: %d\n", adapter->tx_fill_ctr,
|
753
|
|
- adapter->tx_tail, virt_to_bus ( iobuf->data ), iob_len ( iobuf ) );
|
754
|
|
-
|
|
793
|
+ DBG ( "TX fill: %ld tx_curr: %ld addr: %#08lx len: %d\n", adapter->tx_fill_ctr,
|
|
794
|
+ tx_curr, virt_to_bus ( iobuf->data ), iob_len ( iobuf ) );
|
|
795
|
+
|
755
|
796
|
/* Point to next free descriptor */
|
756
|
797
|
adapter->tx_tail = ( adapter->tx_tail + 1 ) % NUM_TX_DESC;
|
757
|
|
-
|
758
|
798
|
adapter->tx_fill_ctr++;
|
759
|
799
|
|
760
|
800
|
/* Write new tail to NIC, making packet available for transmit
|
761
|
801
|
*/
|
762
|
|
- E1000_WRITE_REG ( hw, TDT, adapter->tx_tail ) ;
|
|
802
|
+ E1000_WRITE_REG ( hw, TDT, adapter->tx_tail );
|
|
803
|
+
|
|
804
|
+#if 0
|
|
805
|
+ while ( ! ( tx_curr_desc->upper.data & E1000_TXD_STAT_DD ) ) {
|
|
806
|
+ udelay ( 10 ); /* give the nic a chance to write to the register */
|
|
807
|
+ }
|
|
808
|
+
|
|
809
|
+ DBG ( "Leaving XMIT\n" );
|
|
810
|
+#endif
|
763
|
811
|
|
764
|
812
|
return 0;
|
765
|
813
|
}
|
|
@@ -781,7 +829,8 @@ e1000_poll ( struct net_device *netdev )
|
781
|
829
|
uint32_t rx_len;
|
782
|
830
|
uint32_t rx_err;
|
783
|
831
|
struct io_buffer *rx_iob;
|
784
|
|
-
|
|
832
|
+ struct e1000_tx_desc *tx_curr_desc;
|
|
833
|
+ struct e1000_rx_desc *rx_curr_desc;
|
785
|
834
|
uint32_t i;
|
786
|
835
|
|
787
|
836
|
#if 0
|
|
@@ -790,25 +839,30 @@ e1000_poll ( struct net_device *netdev )
|
790
|
839
|
|
791
|
840
|
/* Acknowledge interrupt. */
|
792
|
841
|
icr = E1000_READ_REG ( hw, ICR );
|
793
|
|
-
|
794
|
|
-#if 0
|
|
842
|
+ if ( ! icr )
|
|
843
|
+ return;
|
|
844
|
+
|
795
|
845
|
DBG ( "e1000_poll: intr_status = %#08lx\n", icr );
|
796
|
|
-#endif
|
797
|
846
|
|
798
|
847
|
/* Check status of transmitted packets
|
799
|
848
|
*/
|
800
|
849
|
while ( ( i = adapter->tx_head ) != adapter->tx_tail ) {
|
801
|
|
-
|
802
|
|
- //tx_status = adapter->tx_desc[i]->upper.fields.status;
|
803
|
|
- tx_status = adapter->tx_desc[i]->upper.data;
|
|
850
|
+
|
|
851
|
+ tx_curr_desc = ( void * ) ( adapter->tx_base ) +
|
|
852
|
+ ( i * sizeof ( *adapter->tx_base ) );
|
|
853
|
+
|
|
854
|
+ tx_status = tx_curr_desc->upper.data;
|
804
|
855
|
|
805
|
|
- DBG ( "e1000_poll: tx_status = %#08lx\n", tx_status );
|
|
856
|
+#if 0
|
|
857
|
+ DBG ( "tx_curr_desc = %#08lx status = %#08lx\n",
|
|
858
|
+ virt_to_bus ( tx_curr_desc ), tx_status );
|
|
859
|
+#endif
|
806
|
860
|
|
807
|
861
|
/* if the packet at tx_head is not owned by hardware */
|
808
|
862
|
if ( ! ( tx_status & E1000_TXD_STAT_DD ) )
|
809
|
863
|
break;
|
810
|
864
|
|
811
|
|
- DBG ( "got packet. tx_head: %ld tx_tail: %ld tx_status: %#08lx\n",
|
|
865
|
+ DBG ( "Sent packet. tx_head: %ld tx_tail: %ld tx_status: %#08lx\n",
|
812
|
866
|
adapter->tx_head, adapter->tx_tail, tx_status );
|
813
|
867
|
|
814
|
868
|
if ( tx_status & ( E1000_TXD_STAT_EC | E1000_TXD_STAT_LC |
|
|
@@ -825,30 +879,40 @@ e1000_poll ( struct net_device *netdev )
|
825
|
879
|
/* Decrement count of used descriptors, clear this descriptor
|
826
|
880
|
*/
|
827
|
881
|
adapter->tx_fill_ctr--;
|
828
|
|
- memset ( &adapter->tx_desc[i], 0, sizeof ( struct e1000_tx_desc ) );
|
|
882
|
+ memset ( tx_curr_desc, 0, sizeof ( *tx_curr_desc ) );
|
829
|
883
|
|
830
|
884
|
adapter->tx_head = ( adapter->tx_head + 1 ) % NUM_TX_DESC;
|
831
|
885
|
}
|
832
|
886
|
|
833
|
887
|
/* Process received packets
|
834
|
888
|
*/
|
835
|
|
- while ( ( rx_status = adapter->rx_desc[adapter->rx_tail]->status ) & E1000_RXD_STAT_DD ) {
|
|
889
|
+ while ( TRUE ) {
|
836
|
890
|
|
837
|
891
|
i = adapter->rx_tail;
|
838
|
892
|
|
839
|
|
- rx_len = adapter->rx_desc[i]->length;
|
|
893
|
+ rx_curr_desc = ( void * ) ( adapter->rx_base ) +
|
|
894
|
+ ( i * sizeof ( *adapter->rx_base ) );
|
|
895
|
+ rx_status = rx_curr_desc->status;
|
|
896
|
+
|
|
897
|
+ // DBG ( "Before DD Check RX_status: %#08lx\n", rx_status );
|
|
898
|
+
|
|
899
|
+ if ( ! ( rx_status & E1000_RXD_STAT_DD ) )
|
|
900
|
+ break;
|
|
901
|
+
|
|
902
|
+ DBG ( "RCTL = %#08lx\n", E1000_READ_REG ( &adapter->hw, RCTL ) );
|
|
903
|
+
|
|
904
|
+ rx_len = rx_curr_desc->length;
|
840
|
905
|
|
841
|
906
|
DBG ( "Received packet, rx_tail: %ld rx_status: %#08lx rx_len: %ld\n",
|
842
|
907
|
i, rx_status, rx_len );
|
843
|
908
|
|
844
|
|
- rx_err = adapter->rx_desc[adapter->rx_tail]->errors;
|
|
909
|
+ rx_err = rx_curr_desc->errors;
|
845
|
910
|
|
846
|
911
|
if ( rx_err & E1000_RXD_ERR_FRAME_ERR_MASK ) {
|
847
|
912
|
|
848
|
913
|
netdev_rx_err ( netdev, NULL, -EINVAL );
|
849
|
914
|
DBG ( "e1000_poll: Corrupted packet received!"
|
850
|
915
|
" rx_err: %#08lx\n", rx_err );
|
851
|
|
-
|
852
|
916
|
} else {
|
853
|
917
|
|
854
|
918
|
/* If unable allocate space for this packet,
|
|
@@ -866,12 +930,18 @@ e1000_poll ( struct net_device *netdev )
|
866
|
930
|
netdev_rx ( netdev, rx_iob );
|
867
|
931
|
}
|
868
|
932
|
|
|
933
|
+ memset ( rx_curr_desc, 0, sizeof ( *rx_curr_desc ) );
|
|
934
|
+
|
|
935
|
+ rx_curr_desc->buffer_addr = virt_to_bus ( adapter->rx_iobuf[adapter->rx_tail]->data );
|
|
936
|
+
|
869
|
937
|
adapter->rx_tail = ( adapter->rx_tail + 1 ) % NUM_RX_DESC;
|
|
938
|
+
|
|
939
|
+ E1000_WRITE_REG ( hw, RDT, adapter->rx_tail );
|
870
|
940
|
}
|
871
|
941
|
}
|
872
|
942
|
|
873
|
943
|
/**
|
874
|
|
- * e1000_irq - Enable, Disable, or Force interrupts
|
|
944
|
+ * e1000_irq - enable or Disable interrupts
|
875
|
945
|
*
|
876
|
946
|
* @v adapter e1000 adapter
|
877
|
947
|
* @v action requested interrupt action
|
|
@@ -885,17 +955,13 @@ e1000_irq ( struct net_device *netdev, int enable )
|
885
|
955
|
|
886
|
956
|
switch ( enable ) {
|
887
|
957
|
case 0 :
|
888
|
|
- E1000_WRITE_REG ( &adapter->hw, IMC, ~0 );
|
889
|
|
- E1000_WRITE_FLUSH ( &adapter->hw );
|
|
958
|
+ e1000_irq_enable ( adapter );
|
890
|
959
|
break;
|
891
|
960
|
case 1 :
|
892
|
|
- E1000_WRITE_REG ( &adapter->hw, IMS,
|
893
|
|
- E1000_IMS_RXT0 | E1000_IMS_RXSEQ );
|
894
|
|
- E1000_WRITE_FLUSH ( &adapter->hw );
|
|
961
|
+ e1000_irq_disable ( adapter );
|
895
|
962
|
break;
|
896
|
|
- /* FIXME: Etherboot has a "FORCE" action, does gPXE? */
|
897
|
963
|
case 2 :
|
898
|
|
- E1000_WRITE_REG ( &adapter->hw, ICS, E1000_ICS_RXT0 );
|
|
964
|
+ e1000_irq_force ( adapter );
|
899
|
965
|
break;
|
900
|
966
|
}
|
901
|
967
|
}
|
|
@@ -921,53 +987,58 @@ e1000_probe ( struct pci_device *pdev,
|
921
|
987
|
unsigned long flash_start, flash_len;
|
922
|
988
|
|
923
|
989
|
DBG ( "e1000_probe\n" );
|
924
|
|
-
|
|
990
|
+
|
925
|
991
|
err = -ENOMEM;
|
926
|
992
|
|
927
|
|
- /* Allocate net device (also allocates memory for netdev->priv
|
928
|
|
- and makes netdev-priv point to it
|
929
|
|
- */
|
|
993
|
+ /* Allocate net device ( also allocates memory for netdev->priv
|
|
994
|
+ and makes netdev-priv point to it ) */
|
930
|
995
|
netdev = alloc_etherdev ( sizeof ( struct e1000_adapter ) );
|
931
|
996
|
if ( ! netdev )
|
932
|
997
|
goto err_alloc_etherdev;
|
933
|
|
-
|
934
|
|
- pci_set_drvdata ( pdev, netdev );
|
935
|
|
-
|
|
998
|
+
|
|
999
|
+ /* Associate e1000-specific network operations operations with
|
|
1000
|
+ * generic network device layer */
|
|
1001
|
+ netdev_init ( netdev, &e1000_operations );
|
|
1002
|
+
|
|
1003
|
+ /* Associate this network device with given PCI device */
|
|
1004
|
+ pci_set_drvdata ( pdev, netdev );
|
|
1005
|
+ netdev->dev = &pdev->dev;
|
|
1006
|
+
|
|
1007
|
+ /* Initialize driver private storage */
|
936
|
1008
|
adapter = netdev_priv ( netdev );
|
937
|
|
- memset ( adapter, 0, ( sizeof ( struct e1000_adapter ) ) );
|
938
|
|
-
|
939
|
|
- /* Enable PCI device associated with this NIC device */
|
940
|
|
- adjust_pci_device ( pdev );
|
941
|
|
-
|
942
|
|
- adapter->ioaddr = pdev->ioaddr;
|
943
|
|
- adapter->irqno = pdev->irq;
|
944
|
|
- adapter->netdev = netdev;
|
945
|
|
- adapter->pdev = pdev;
|
946
|
|
- adapter->hw.back = adapter;
|
947
|
|
-
|
|
1009
|
+ memset ( adapter, 0, ( sizeof ( *adapter ) ) );
|
|
1010
|
+
|
|
1011
|
+ adapter->hw.io_base = pdev->ioaddr;
|
|
1012
|
+ adapter->ioaddr = pdev->ioaddr;
|
|
1013
|
+ adapter->irqno = pdev->irq;
|
|
1014
|
+ adapter->netdev = netdev;
|
|
1015
|
+ adapter->pdev = pdev;
|
|
1016
|
+ adapter->hw.back = adapter;
|
|
1017
|
+ adapter->eeprom_wol = 0;
|
|
1018
|
+ adapter->wol = adapter->eeprom_wol;
|
|
1019
|
+#if 0
|
|
1020
|
+ adapter->en_mng_pt = 0;
|
|
1021
|
+ adapter->rx_int_delay = 0;
|
|
1022
|
+ adapter->rx_abs_int_delay = 0;
|
|
1023
|
+#endif
|
948
|
1024
|
mmio_start = pci_bar_start ( pdev, PCI_BASE_ADDRESS_0 );
|
949
|
1025
|
mmio_len = pci_bar_size ( pdev, PCI_BASE_ADDRESS_0 );
|
950
|
1026
|
|
|
1027
|
+ DBG ( "mmio_start: %#08lx\n", mmio_start );
|
|
1028
|
+ DBG ( "mmio_len: %#08lx\n", mmio_len );
|
|
1029
|
+
|
|
1030
|
+ /* Fix up PCI device */
|
|
1031
|
+ adjust_pci_device ( pdev );
|
|
1032
|
+
|
951
|
1033
|
err = -EIO;
|
952
|
1034
|
|
953
|
1035
|
adapter->hw.hw_addr = ioremap ( mmio_start, mmio_len );
|
|
1036
|
+
|
|
1037
|
+ DBG ( "adapter->hw.hw_addr: %p\n", adapter->hw.hw_addr );
|
|
1038
|
+
|
954
|
1039
|
if ( ! adapter->hw.hw_addr )
|
955
|
1040
|
goto err_ioremap;
|
956
|
1041
|
|
957
|
|
- for ( i = BAR_1; i <= BAR_5; i++ ) {
|
958
|
|
- if ( pci_bar_size ( pdev, i ) == 0 )
|
959
|
|
- continue;
|
960
|
|
- if ( pci_find_capability ( pdev, i ) & IORESOURCE_IO ) {
|
961
|
|
- adapter->hw.io_base = pci_bar_start ( pdev, i );
|
962
|
|
- break;
|
963
|
|
- }
|
964
|
|
- }
|
965
|
|
-
|
966
|
|
- /* Associate e1000-specific network operations operations with
|
967
|
|
- * generic network device layer
|
968
|
|
- */
|
969
|
|
- netdev_init ( netdev, &e1000_operations );
|
970
|
|
-
|
971
|
1042
|
/* setup the private structure */
|
972
|
1043
|
if ( ( err = e1000_sw_init ( adapter ) ) )
|
973
|
1044
|
goto err_sw_init;
|
|
@@ -994,7 +1065,11 @@ e1000_probe ( struct pci_device *pdev,
|
994
|
1065
|
/* before reading the EEPROM, reset the controller to
|
995
|
1066
|
* put the device in a known good starting state
|
996
|
1067
|
*/
|
997
|
|
- e1000_reset_hw ( &adapter->hw );
|
|
1068
|
+ err = e1000_reset_hw ( &adapter->hw );
|
|
1069
|
+ if ( err < 0 ) {
|
|
1070
|
+ DBG ( "Hardware Initialization Failed\n" );
|
|
1071
|
+ goto err_reset;
|
|
1072
|
+ }
|
998
|
1073
|
|
999
|
1074
|
/* make sure the EEPROM is good */
|
1000
|
1075
|
if ( e1000_validate_eeprom_checksum( &adapter->hw ) < 0 ) {
|
|
@@ -1008,16 +1083,40 @@ e1000_probe ( struct pci_device *pdev,
|
1008
|
1083
|
|
1009
|
1084
|
memcpy ( netdev->ll_addr, adapter->hw.mac_addr, ETH_ALEN );
|
1010
|
1085
|
|
|
1086
|
+ /* print bus type/speed/width info */
|
|
1087
|
+ {
|
|
1088
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
1089
|
+ DBG ( "(PCI%s:%s:%s) ",
|
|
1090
|
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
|
|
1091
|
+ (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
|
|
1092
|
+ ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
|
|
1093
|
+ (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
|
|
1094
|
+ (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
|
|
1095
|
+ (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
|
|
1096
|
+ (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
|
|
1097
|
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
|
|
1098
|
+ (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
|
|
1099
|
+ (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
|
|
1100
|
+ "32-bit"));
|
|
1101
|
+ }
|
|
1102
|
+ for (i = 0; i < 6; i++)
|
|
1103
|
+ DBG ("%02x%s", netdev->ll_addr[i], i == 5 ? "\n" : ":");
|
|
1104
|
+
|
1011
|
1105
|
/* reset the hardware with the new settings */
|
1012
|
1106
|
e1000_reset ( adapter );
|
|
1107
|
+
|
|
1108
|
+ e1000_get_hw_control ( adapter );
|
1013
|
1109
|
|
1014
|
1110
|
if ( ( err = register_netdev ( netdev ) ) != 0)
|
1015
|
1111
|
goto err_register;
|
|
1112
|
+
|
|
1113
|
+ DBG ( "e1000_probe succeeded!\n" );
|
1016
|
1114
|
|
1017
|
1115
|
/* No errors, return success */
|
1018
|
1116
|
return 0;
|
1019
|
1117
|
|
1020
|
1118
|
/* Error return paths */
|
|
1119
|
+err_reset:
|
1021
|
1120
|
err_register:
|
1022
|
1121
|
err_eeprom:
|
1023
|
1122
|
if ( ! e1000_check_phy_reset_block ( &adapter->hw ) )
|
|
@@ -1069,20 +1168,23 @@ e1000_open ( struct net_device *netdev )
|
1069
|
1168
|
|
1070
|
1169
|
/* allocate transmit descriptors */
|
1071
|
1170
|
err = e1000_setup_tx_resources ( adapter );
|
1072
|
|
- if (err)
|
|
1171
|
+ if (err) {
|
1073
|
1172
|
goto err_setup_tx;
|
|
1173
|
+ DBG ( "Error setting up TX resources!\n" );
|
|
1174
|
+ }
|
1074
|
1175
|
|
1075
|
1176
|
/* allocate receive descriptors */
|
1076
|
1177
|
err = e1000_setup_rx_resources ( adapter );
|
1077
|
|
- if (err)
|
|
1178
|
+ if (err) {
|
|
1179
|
+ DBG ( "Error setting up RX resources!\n" );
|
1078
|
1180
|
goto err_setup_rx;
|
|
1181
|
+ }
|
1079
|
1182
|
|
1080
|
|
- e1000_power_up_phy ( adapter );
|
1081
|
|
-
|
1082
|
|
- /* disable firmware control */
|
1083
|
|
- e1000_init_manageability ( adapter );
|
|
1183
|
+ e1000_configure_tx ( adapter );
|
1084
|
1184
|
|
1085
|
|
- e1000_configure ( adapter );
|
|
1185
|
+ e1000_configure_rx ( adapter );
|
|
1186
|
+
|
|
1187
|
+ e1000_irq_enable ( adapter );
|
1086
|
1188
|
|
1087
|
1189
|
return E1000_SUCCESS;
|
1088
|
1190
|
|