|
@@ -106,11 +106,13 @@ struct natsemi_nic {
|
106
|
106
|
unsigned short rx_cur;
|
107
|
107
|
struct natsemi_tx tx[TX_RING_SIZE];
|
108
|
108
|
struct natsemi_rx rx[NUM_RX_DESC];
|
|
109
|
+
|
109
|
110
|
/* need to add iobuf as we cannot free iobuf->data in close without this
|
110
|
111
|
* alternatively substracting sizeof(head) and sizeof(list_head) can also
|
111
|
112
|
* give the same.
|
112
|
113
|
*/
|
113
|
114
|
struct io_buffer *iobuf[NUM_RX_DESC];
|
|
115
|
+
|
114
|
116
|
/* netdev_tx_complete needs pointer to the iobuf of the data so as to free
|
115
|
117
|
* it from the memory.
|
116
|
118
|
*/
|
|
@@ -154,6 +156,7 @@ enum register_offsets {
|
154
|
156
|
PhyStatus = 0xC0,
|
155
|
157
|
MIntrCtrl = 0xC4,
|
156
|
158
|
MIntrStatus = 0xC8,
|
|
159
|
+
|
157
|
160
|
/* These are from the spec, around page 78... on a separate table.
|
158
|
161
|
*/
|
159
|
162
|
PGSEL = 0xCC,
|
|
@@ -238,7 +241,7 @@ static int nat_spi_read_bit ( struct bit_basher *basher,
|
238
|
241
|
uint8_t mask = nat_ee_bits[bit_id];
|
239
|
242
|
uint8_t eereg;
|
240
|
243
|
|
241
|
|
- eereg = inb ( nat->ioaddr + EE_REG);
|
|
244
|
+ eereg = inb ( nat->ioaddr + EE_REG );
|
242
|
245
|
return ( eereg & mask );
|
243
|
246
|
}
|
244
|
247
|
|
|
@@ -252,7 +255,7 @@ static void nat_spi_write_bit ( struct bit_basher *basher,
|
252
|
255
|
eereg = inb ( nat->ioaddr + EE_REG );
|
253
|
256
|
eereg &= ~mask;
|
254
|
257
|
eereg |= ( data & mask );
|
255
|
|
- outb ( eereg, nat->ioaddr + EE_REG);
|
|
258
|
+ outb ( eereg, nat->ioaddr + EE_REG );
|
256
|
259
|
}
|
257
|
260
|
|
258
|
261
|
static struct bit_basher_operations nat_basher_ops = {
|
|
@@ -302,26 +305,27 @@ static struct nvo_fragment nat_nvo_fragments[] = {
|
302
|
305
|
static void nat_reset ( struct natsemi_nic *nat ) {
|
303
|
306
|
|
304
|
307
|
int i;
|
|
308
|
+
|
305
|
309
|
/* Reset chip
|
306
|
310
|
*/
|
307
|
311
|
outl ( ChipReset, nat->ioaddr + ChipCmd );
|
308
|
312
|
mdelay ( 10 );
|
309
|
|
- nat->tx_dirty=0;
|
310
|
|
- nat->tx_cur=0;
|
311
|
|
- for(i=0;i<TX_RING_SIZE;i++) {
|
312
|
|
- nat->tx[i].link=0;
|
313
|
|
- nat->tx[i].cmdsts=0;
|
314
|
|
- nat->tx[i].bufptr=0;
|
|
313
|
+ nat->tx_dirty = 0;
|
|
314
|
+ nat->tx_cur = 0;
|
|
315
|
+ for ( i = 0 ; i < TX_RING_SIZE ; i++ ) {
|
|
316
|
+ nat->tx[i].link = 0;
|
|
317
|
+ nat->tx[i].cmdsts = 0;
|
|
318
|
+ nat->tx[i].bufptr = 0;
|
315
|
319
|
}
|
316
|
320
|
nat->rx_cur = 0;
|
317
|
|
- outl(virt_to_bus(&nat->tx[0]),nat->ioaddr+TxRingPtr);
|
318
|
|
- outl(virt_to_bus(&nat->rx[0]), nat->ioaddr + RxRingPtr);
|
|
321
|
+ outl ( virt_to_bus( &nat->tx[0] ),nat->ioaddr + TxRingPtr );
|
|
322
|
+ outl ( virt_to_bus( &nat->rx[0] ),nat->ioaddr + RxRingPtr );
|
319
|
323
|
|
320
|
|
- outl(TxOff|RxOff, nat->ioaddr + ChipCmd);
|
|
324
|
+ outl ( TxOff|RxOff, nat->ioaddr + ChipCmd );
|
321
|
325
|
|
322
|
326
|
/* Restore PME enable bit
|
323
|
327
|
*/
|
324
|
|
- outl(SavedClkRun, nat->ioaddr + ClkRun);
|
|
328
|
+ outl ( SavedClkRun, nat->ioaddr + ClkRun );
|
325
|
329
|
}
|
326
|
330
|
|
327
|
331
|
/*
|
|
@@ -342,85 +346,86 @@ static int nat_open ( struct net_device *netdev ) {
|
342
|
346
|
* With PME set the chip will scan incoming packets but
|
343
|
347
|
* nothing will be written to memory.
|
344
|
348
|
*/
|
345
|
|
- SavedClkRun = inl(nat->ioaddr + ClkRun);
|
346
|
|
- outl(SavedClkRun & ~0x100, nat->ioaddr + ClkRun);
|
|
349
|
+ SavedClkRun = inl ( nat->ioaddr + ClkRun );
|
|
350
|
+ outl ( SavedClkRun & ~0x100, nat->ioaddr + ClkRun );
|
347
|
351
|
|
348
|
352
|
/* Setting up Mac address in the NIC
|
349
|
353
|
*/
|
350
|
354
|
for ( i = 0 ; i < ETH_ALEN ; i+=2 ) {
|
351
|
|
- outl(i,nat->ioaddr+RxFilterAddr);
|
352
|
|
- outw ( netdev->ll_addr[i] + (netdev->ll_addr[i+1]<<8),
|
353
|
|
- nat->ioaddr +RxFilterData);
|
|
355
|
+ outl ( i,nat->ioaddr + RxFilterAddr );
|
|
356
|
+ outw ( netdev->ll_addr[i] + ( netdev->ll_addr[i + 1] << 8 ),
|
|
357
|
+ nat->ioaddr + RxFilterData );
|
354
|
358
|
}
|
355
|
359
|
|
356
|
360
|
/*Set up the Tx Ring
|
357
|
361
|
*/
|
358
|
|
- nat->tx_cur=0;
|
359
|
|
- nat->tx_dirty=0;
|
360
|
|
- for (i=0;i<TX_RING_SIZE;i++) {
|
361
|
|
- nat->tx[i].link = virt_to_bus((i+1 < TX_RING_SIZE) ? &nat->tx[i+1] : &nat->tx[0]);
|
|
362
|
+ nat->tx_cur = 0;
|
|
363
|
+ nat->tx_dirty = 0;
|
|
364
|
+ for ( i = 0 ; i < TX_RING_SIZE ; i++ ) {
|
|
365
|
+ nat->tx[i].link = virt_to_bus ( ( i + 1 < TX_RING_SIZE ) ? &nat->tx[i + 1] : &nat->tx[0] );
|
362
|
366
|
nat->tx[i].cmdsts = 0;
|
363
|
367
|
nat->tx[i].bufptr = 0;
|
364
|
368
|
}
|
365
|
369
|
|
366
|
370
|
/* Set up RX ring
|
367
|
371
|
*/
|
368
|
|
- nat->rx_cur=0;
|
369
|
|
- for (i=0;i<NUM_RX_DESC;i++) {
|
|
372
|
+ nat->rx_cur = 0;
|
|
373
|
+ for ( i = 0 ; i < NUM_RX_DESC ; i++ ) {
|
370
|
374
|
nat->iobuf[i] = alloc_iob ( RX_BUF_SIZE );
|
371
|
|
- if (!nat->iobuf[i])
|
|
375
|
+ if ( !nat->iobuf[i] )
|
372
|
376
|
goto memory_alloc_err;
|
373
|
|
- nat->rx[i].link = virt_to_bus((i+1 < NUM_RX_DESC) ? &nat->rx[i+1] : &nat->rx[0]);
|
|
377
|
+ nat->rx[i].link = virt_to_bus ( ( i + 1 < NUM_RX_DESC ) ? &nat->rx[i + 1] : &nat->rx[0] );
|
374
|
378
|
nat->rx[i].cmdsts = RX_BUF_SIZE;
|
375
|
|
- nat->rx[i].bufptr = virt_to_bus(nat->iobuf[i]->data);
|
|
379
|
+ nat->rx[i].bufptr = virt_to_bus ( nat->iobuf[i]->data );
|
376
|
380
|
}
|
377
|
381
|
|
378
|
382
|
/* load Receive Descriptor Register
|
379
|
383
|
*/
|
380
|
|
- outl(virt_to_bus(&nat->rx[0]), nat->ioaddr + RxRingPtr);
|
381
|
|
- DBG("Natsemi Rx descriptor loaded with: %X\n",
|
382
|
|
- (unsigned int)inl(nat->ioaddr+RxRingPtr));
|
|
384
|
+ outl ( virt_to_bus ( &nat->rx[0] ), nat->ioaddr + RxRingPtr );
|
|
385
|
+ DBG ( "Natsemi Rx descriptor loaded with: %X\n",
|
|
386
|
+ (unsigned int) inl ( nat->ioaddr + RxRingPtr ) );
|
383
|
387
|
|
384
|
388
|
/* setup Tx ring
|
385
|
389
|
*/
|
386
|
|
- outl(virt_to_bus(&nat->tx[0]),nat->ioaddr+TxRingPtr);
|
387
|
|
- DBG("Natsemi Tx descriptor loaded with: %X\n",
|
388
|
|
- (unsigned int)inl(nat->ioaddr+TxRingPtr));
|
|
390
|
+ outl ( virt_to_bus ( &nat->tx[0] ),nat->ioaddr + TxRingPtr );
|
|
391
|
+ DBG ( "Natsemi Tx descriptor loaded with: %X\n",
|
|
392
|
+ (unsigned int)inl ( nat->ioaddr + TxRingPtr ) );
|
389
|
393
|
|
390
|
394
|
/* Enables RX
|
391
|
395
|
*/
|
392
|
|
- outl(RxFilterEnable|AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys,
|
393
|
|
- nat->ioaddr+RxFilterAddr);
|
|
396
|
+ outl ( RxFilterEnable|AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys,
|
|
397
|
+ nat->ioaddr + RxFilterAddr );
|
394
|
398
|
|
395
|
399
|
/* Initialize other registers.
|
396
|
400
|
* Configure the PCI bus bursts and FIFO thresholds.
|
397
|
401
|
* Configure for standard, in-spec Ethernet.
|
398
|
402
|
*/
|
399
|
|
- if (inl(nat->ioaddr + ChipConfig) & 0x20000000) { /* Full duplex */
|
|
403
|
+ if ( inl ( nat->ioaddr + ChipConfig ) & 0x20000000 ) { /* Full duplex */
|
400
|
404
|
tx_config = 0xD0801002;
|
401
|
405
|
rx_config = 0x10000020;
|
402
|
406
|
} else {
|
403
|
407
|
tx_config = 0x10801002;
|
404
|
408
|
rx_config = 0x0020;
|
405
|
409
|
}
|
406
|
|
- outl(tx_config, nat->ioaddr + TxConfig);
|
407
|
|
- outl(rx_config, nat->ioaddr + RxConfig);
|
|
410
|
+ outl ( tx_config, nat->ioaddr + TxConfig );
|
|
411
|
+ outl ( rx_config, nat->ioaddr + RxConfig );
|
408
|
412
|
|
409
|
413
|
/*start the receiver
|
410
|
414
|
*/
|
411
|
|
- outl(RxOn, nat->ioaddr + ChipCmd);
|
|
415
|
+ outl ( RxOn, nat->ioaddr + ChipCmd );
|
412
|
416
|
|
413
|
417
|
/* mask the interrupts. note interrupt is not enabled here
|
414
|
418
|
*/
|
415
|
419
|
return 0;
|
416
|
420
|
|
417
|
421
|
memory_alloc_err:
|
|
422
|
+
|
418
|
423
|
/* this block frees the previously allocated buffers
|
419
|
424
|
* if memory for all the buffers is not available
|
420
|
425
|
*/
|
421
|
|
- i=0;
|
422
|
|
- while(nat->rx[i].cmdsts == RX_BUF_SIZE) {
|
423
|
|
- free_iob(nat->iobuf[i]);
|
|
426
|
+ i = 0;
|
|
427
|
+ while ( nat->rx[i].cmdsts == RX_BUF_SIZE ) {
|
|
428
|
+ free_iob ( nat->iobuf[i] );
|
424
|
429
|
i++;
|
425
|
430
|
}
|
426
|
431
|
return -ENOMEM;
|
|
@@ -434,15 +439,16 @@ memory_alloc_err:
|
434
|
439
|
static void nat_close ( struct net_device *netdev ) {
|
435
|
440
|
struct natsemi_nic *nat = netdev->priv;
|
436
|
441
|
int i;
|
|
442
|
+
|
437
|
443
|
/* Reset the hardware to disable everything in one go
|
438
|
444
|
*/
|
439
|
445
|
nat_reset ( nat );
|
440
|
446
|
|
441
|
447
|
/* Free RX ring
|
442
|
448
|
*/
|
443
|
|
- for (i=0;i<NUM_RX_DESC;i++) {
|
|
449
|
+ for ( i = 0; i < NUM_RX_DESC ; i++ ) {
|
444
|
450
|
|
445
|
|
- free_iob( nat->iobuf[i] );
|
|
451
|
+ free_iob ( nat->iobuf[i] );
|
446
|
452
|
}
|
447
|
453
|
}
|
448
|
454
|
|
|
@@ -458,14 +464,14 @@ static int nat_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
458
|
464
|
|
459
|
465
|
/* check for space in TX ring
|
460
|
466
|
*/
|
461
|
|
- if (nat->tx[nat->tx_cur].cmdsts !=0) {
|
462
|
|
- DBG( "TX overflow\n" );
|
|
467
|
+ if ( nat->tx[nat->tx_cur].cmdsts != 0 ) {
|
|
468
|
+ DBG ( "TX overflow\n" );
|
463
|
469
|
return -ENOBUFS;
|
464
|
470
|
}
|
465
|
471
|
|
466
|
472
|
/* to be used in netdev_tx_complete
|
467
|
473
|
*/
|
468
|
|
- nat->tx_iobuf[nat->tx_cur]=iobuf;
|
|
474
|
+ nat->tx_iobuf[nat->tx_cur] = iobuf;
|
469
|
475
|
|
470
|
476
|
/* Pad and align packet has not been used because its not required here
|
471
|
477
|
* iob_pad ( iobuf, ETH_ZLEN ); can be used to achieve it
|
|
@@ -473,18 +479,19 @@ static int nat_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
473
|
479
|
|
474
|
480
|
/* Add to TX ring
|
475
|
481
|
*/
|
476
|
|
- DBG ( "TX id %d at %lx+%x\n", nat->tx_cur,
|
|
482
|
+ DBG ( "TX id %d at %lx + %x\n", nat->tx_cur,
|
477
|
483
|
virt_to_bus ( &iobuf->data ), iob_len ( iobuf ) );
|
478
|
484
|
|
479
|
|
- nat->tx[nat->tx_cur].bufptr = virt_to_bus(iobuf->data);
|
480
|
|
- nat->tx[nat->tx_cur].cmdsts= iob_len(iobuf)|OWN;
|
|
485
|
+ nat->tx[nat->tx_cur].bufptr = virt_to_bus ( iobuf->data );
|
|
486
|
+ nat->tx[nat->tx_cur].cmdsts = iob_len ( iobuf ) | OWN;
|
|
487
|
+
|
481
|
488
|
/* increment the circular buffer pointer to the next buffer location
|
482
|
489
|
*/
|
483
|
|
- nat->tx_cur=(nat->tx_cur+1) % TX_RING_SIZE;
|
|
490
|
+ nat->tx_cur = ( nat->tx_cur + 1 ) % TX_RING_SIZE;
|
484
|
491
|
|
485
|
492
|
/*start the transmitter
|
486
|
493
|
*/
|
487
|
|
- outl(TxOn, nat->ioaddr + ChipCmd);
|
|
494
|
+ outl ( TxOn, nat->ioaddr + ChipCmd );
|
488
|
495
|
|
489
|
496
|
return 0;
|
490
|
497
|
}
|
|
@@ -506,74 +513,80 @@ static void nat_poll ( struct net_device *netdev) {
|
506
|
513
|
|
507
|
514
|
/* read the interrupt register
|
508
|
515
|
*/
|
509
|
|
- intr_status=inl(nat->ioaddr+IntrStatus);
|
510
|
|
- if(!intr_status)
|
511
|
|
- goto end;
|
|
516
|
+ intr_status = inl ( nat->ioaddr + IntrStatus );
|
|
517
|
+ if ( !intr_status )
|
|
518
|
+ goto end;
|
512
|
519
|
|
513
|
520
|
/* check the status of packets given to card for transmission
|
514
|
521
|
*/
|
515
|
|
- DBG("Intr status %X\n",intr_status);
|
|
522
|
+ DBG ( "Intr status %X\n",intr_status );
|
516
|
523
|
|
517
|
|
- i=nat->tx_dirty;
|
518
|
|
- while(i!=nat->tx_cur) {
|
519
|
|
- status=nat->tx[nat->tx_dirty].cmdsts;
|
520
|
|
- DBG("value of tx_dirty = %d tx_cur=%d status=%X\n",
|
521
|
|
- nat->tx_dirty,nat->tx_cur,status);
|
|
524
|
+ i = nat->tx_dirty;
|
|
525
|
+ while ( i!= nat->tx_cur ) {
|
|
526
|
+ status = nat->tx[nat->tx_dirty].cmdsts;
|
|
527
|
+ DBG ( "value of tx_dirty = %d tx_cur=%d status=%X\n",
|
|
528
|
+ nat->tx_dirty,nat->tx_cur,status );
|
522
|
529
|
|
523
|
530
|
/* check if current packet has been transmitted or not
|
524
|
531
|
*/
|
525
|
|
- if(status & OWN)
|
|
532
|
+ if ( status & OWN )
|
526
|
533
|
break;
|
|
534
|
+
|
527
|
535
|
/* Check if any errors in transmission
|
528
|
536
|
*/
|
529
|
|
- if (! (status & DescPktOK)) {
|
530
|
|
- DBG("Error in sending Packet status:%X\n",
|
531
|
|
- (unsigned int)status);
|
532
|
|
- netdev_tx_complete_err(netdev,nat->tx_iobuf[nat->tx_dirty],-EINVAL);
|
|
537
|
+ if (! ( status & DescPktOK ) ) {
|
|
538
|
+ DBG ( "Error in sending Packet status:%X\n",
|
|
539
|
+ (unsigned int) status );
|
|
540
|
+ netdev_tx_complete_err ( netdev,nat->tx_iobuf[nat->tx_dirty],-EINVAL );
|
533
|
541
|
} else {
|
534
|
|
- DBG("Success in transmitting Packet\n");
|
535
|
|
- netdev_tx_complete(netdev,nat->tx_iobuf[nat->tx_dirty]);
|
|
542
|
+ DBG ( "Success in transmitting Packet\n" );
|
|
543
|
+ netdev_tx_complete ( netdev,nat->tx_iobuf[nat->tx_dirty] );
|
536
|
544
|
}
|
|
545
|
+
|
537
|
546
|
/* setting cmdsts zero, indicating that it can be reused
|
538
|
547
|
*/
|
539
|
|
- nat->tx[nat->tx_dirty].cmdsts=0;
|
540
|
|
- nat->tx_dirty=(nat->tx_dirty +1) % TX_RING_SIZE;
|
541
|
|
- i=(i+1) % TX_RING_SIZE;
|
|
548
|
+ nat->tx[nat->tx_dirty].cmdsts = 0;
|
|
549
|
+ nat->tx_dirty = ( nat->tx_dirty + 1 ) % TX_RING_SIZE;
|
|
550
|
+ i = ( i + 1 ) % TX_RING_SIZE;
|
542
|
551
|
}
|
543
|
552
|
|
544
|
553
|
/* Handle received packets
|
545
|
554
|
*/
|
546
|
|
- rx_status=(unsigned int)nat->rx[nat->rx_cur].cmdsts;
|
547
|
|
- while ((rx_status & OWN)) {
|
548
|
|
- rx_len= (rx_status & DSIZE) - CRC_SIZE;
|
|
555
|
+ rx_status = (unsigned int) nat->rx[nat->rx_cur].cmdsts;
|
|
556
|
+ while ( ( rx_status & OWN ) ) {
|
|
557
|
+ rx_len = ( rx_status & DSIZE ) - CRC_SIZE;
|
|
558
|
+
|
549
|
559
|
/*check for the corrupt packet
|
550
|
560
|
*/
|
551
|
|
- if((rx_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
|
552
|
|
- DBG("natsemi_poll: Corrupted packet received, "
|
|
561
|
+ if ( ( rx_status & ( DescMore|DescPktOK|RxTooLong ) ) != DescPktOK) {
|
|
562
|
+ DBG ( "natsemi_poll: Corrupted packet received, "
|
553
|
563
|
"buffer status = %X ^ %X \n",rx_status,
|
554
|
|
- (unsigned int) nat->rx[nat->rx_cur].cmdsts);
|
555
|
|
- netdev_rx_err(netdev,NULL,-EINVAL);
|
|
564
|
+ (unsigned int) nat->rx[nat->rx_cur].cmdsts );
|
|
565
|
+ netdev_rx_err ( netdev,NULL,-EINVAL );
|
556
|
566
|
} else {
|
557
|
|
- rx_iob = alloc_iob(rx_len);
|
558
|
|
- if(!rx_iob)
|
|
567
|
+ rx_iob = alloc_iob ( rx_len );
|
|
568
|
+
|
|
569
|
+ if ( !rx_iob )
|
559
|
570
|
/* leave packet for next call to poll
|
560
|
571
|
*/
|
561
|
572
|
goto end;
|
562
|
|
- memcpy(iob_put(rx_iob,rx_len),
|
563
|
|
- nat->iobuf[nat->rx_cur]->data,rx_len);
|
564
|
|
- DBG("received packet\n");
|
|
573
|
+ memcpy ( iob_put ( rx_iob,rx_len ),
|
|
574
|
+ nat->iobuf[nat->rx_cur]->data,rx_len );
|
|
575
|
+ DBG ( "received packet\n" );
|
|
576
|
+
|
565
|
577
|
/* add to the receive queue.
|
566
|
578
|
*/
|
567
|
|
- netdev_rx(netdev,rx_iob);
|
|
579
|
+ netdev_rx ( netdev,rx_iob );
|
568
|
580
|
}
|
569
|
581
|
nat->rx[nat->rx_cur].cmdsts = RX_BUF_SIZE;
|
570
|
|
- nat->rx_cur=(nat->rx_cur+1) % NUM_RX_DESC;
|
571
|
|
- rx_status=nat->rx[nat->rx_cur].cmdsts;
|
|
582
|
+ nat->rx_cur = ( nat->rx_cur + 1 ) % NUM_RX_DESC;
|
|
583
|
+ rx_status = nat->rx[nat->rx_cur].cmdsts;
|
572
|
584
|
}
|
573
|
585
|
end:
|
|
586
|
+
|
574
|
587
|
/* re-enable the potentially idle receive state machine
|
575
|
588
|
*/
|
576
|
|
- outl(RxOn, nat->ioaddr + ChipCmd);
|
|
589
|
+ outl ( RxOn, nat->ioaddr + ChipCmd );
|
577
|
590
|
}
|
578
|
591
|
|
579
|
592
|
/**
|
|
@@ -583,11 +596,11 @@ end:
|
583
|
596
|
* @v enable Interrupts should be enabled
|
584
|
597
|
*/
|
585
|
598
|
static void nat_irq ( struct net_device *netdev, int enable ) {
|
586
|
|
- struct natsemi_nic *nat= netdev->priv;
|
|
599
|
+ struct natsemi_nic *nat = netdev->priv;
|
587
|
600
|
|
588
|
|
- outl((enable?(RxOk|RxErr|TxOk|TxErr):0),
|
|
601
|
+ outl ( ( enable ? ( RxOk|RxErr|TxOk|TxErr ) :0 ),
|
589
|
602
|
nat->ioaddr + IntrMask);
|
590
|
|
- outl((enable ? 1:0),nat->ioaddr +IntrEnable);
|
|
603
|
+ outl ( ( enable ? 1:0 ),nat->ioaddr + IntrEnable );
|
591
|
604
|
}
|
592
|
605
|
|
593
|
606
|
|
|
@@ -617,8 +630,8 @@ static int nat_probe ( struct pci_device *pci,
|
617
|
630
|
int rc;
|
618
|
631
|
int i;
|
619
|
632
|
uint8_t ll_addr_encoded[MAX_LL_ADDR_LEN];
|
620
|
|
- uint8_t last=0;
|
621
|
|
- uint8_t last1=0;
|
|
633
|
+ uint8_t last = 0;
|
|
634
|
+ uint8_t last1 = 0;
|
622
|
635
|
uint8_t prev_bytes[2];
|
623
|
636
|
|
624
|
637
|
/* Allocate net device
|
|
@@ -626,7 +639,7 @@ static int nat_probe ( struct pci_device *pci,
|
626
|
639
|
netdev = alloc_etherdev ( sizeof ( *nat ) );
|
627
|
640
|
if ( ! netdev )
|
628
|
641
|
return -ENOMEM;
|
629
|
|
- netdev_init(netdev,&nat_operations);
|
|
642
|
+ netdev_init ( netdev,&nat_operations );
|
630
|
643
|
nat = netdev->priv;
|
631
|
644
|
pci_set_drvdata ( pci, netdev );
|
632
|
645
|
netdev->dev = &pci->dev;
|
|
@@ -641,16 +654,17 @@ static int nat_probe ( struct pci_device *pci,
|
641
|
654
|
*/
|
642
|
655
|
nat_reset ( nat );
|
643
|
656
|
nat_init_eeprom ( nat );
|
644
|
|
- nvs_read ( &nat->eeprom.nvs, EE_MAC-1, prev_bytes, 1);
|
|
657
|
+ nvs_read ( &nat->eeprom.nvs, EE_MAC-1, prev_bytes, 1 );
|
645
|
658
|
nvs_read ( &nat->eeprom.nvs, EE_MAC, ll_addr_encoded, ETH_ALEN );
|
|
659
|
+
|
646
|
660
|
/* decoding the MAC address read from NVS
|
647
|
661
|
* and save it in netdev->ll_addr
|
648
|
662
|
*/
|
649
|
|
- last=prev_bytes[1]>>7;
|
650
|
|
- for ( i = 0 ; i < ETH_ALEN ; i++) {
|
651
|
|
- last1=ll_addr_encoded[i]>>7;
|
652
|
|
- netdev->ll_addr[i]=ll_addr_encoded[i]<<1|last;
|
653
|
|
- last=last1;
|
|
663
|
+ last = prev_bytes[1] >> 7;
|
|
664
|
+ for ( i = 0 ; i < ETH_ALEN ; i++ ) {
|
|
665
|
+ last1 = ll_addr_encoded[i] >> 7;
|
|
666
|
+ netdev->ll_addr[i] = ll_addr_encoded[i] << 1 | last;
|
|
667
|
+ last = last1;
|
654
|
668
|
}
|
655
|
669
|
|
656
|
670
|
/* Register network device
|
|
@@ -661,9 +675,11 @@ static int nat_probe ( struct pci_device *pci,
|
661
|
675
|
return 0;
|
662
|
676
|
|
663
|
677
|
err_register_netdev:
|
|
678
|
+
|
664
|
679
|
/* Disable NIC
|
665
|
680
|
*/
|
666
|
681
|
nat_reset ( nat );
|
|
682
|
+
|
667
|
683
|
/* Free net device
|
668
|
684
|
*/
|
669
|
685
|
netdev_put ( netdev );
|