Browse Source

[golan] Bug fixes and improved paging allocation method

Updates:
- revert Support for clear interrupt via BAR

Signed-off-by: Raed Salem <raeds@mellanox.com>
Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Raed Salem 7 years ago
parent
commit
1ff1eebcf7

+ 1
- 0
src/Makefile View File

@@ -89,6 +89,7 @@ SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig
89 89
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac
90 90
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds
91 91
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed
92
+SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu
92 93
 SRCDIRS		+= drivers/infiniband/mlx_nodnic/src
93 94
 SRCDIRS		+= drivers/usb
94 95
 SRCDIRS		+= interface/pxe interface/efi interface/smbios

+ 22
- 7
src/drivers/infiniband/flexboot_nodnic.c View File

@@ -44,6 +44,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
44 44
 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
45 45
 #include "mlx_utils/include/public/mlx_pci_gw.h"
46 46
 #include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
47
+#include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h"
47 48
 
48 49
 /***************************************************************************
49 50
  *
@@ -823,6 +824,7 @@ static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused
823 824
 		netdev_rx_err ( netdev, iobuf, -ENOTTY );
824 825
 		return;
825 826
 	}
827
+
826 828
 	netdev_rx ( netdev, iobuf );
827 829
 }
828 830
 
@@ -907,6 +909,7 @@ static int flexboot_nodnic_eth_open ( struct net_device *netdev ) {
907 909
 	list_del(&port->eth_qp->send.list);
908 910
 	list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
909 911
 	port->eth_qp->recv.cq = port->eth_cq;
912
+	port->cmdsn = 0;
910 913
 	list_del(&port->eth_qp->recv.list);
911 914
 	list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
912 915
 
@@ -1445,12 +1448,6 @@ static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic )
1445 1448
 	struct pci_device *pci = flexboot_nodnic->pci;
1446 1449
 	nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1447 1450
 
1448
-	if ( ! flexboot_nodnic->device_priv.utils ) {
1449
-		uar->virt = NULL;
1450
-		DBGC ( flexboot_nodnic, "%s: mlx_utils is not initialized \n", __FUNCTION__ );
1451
-		return -EINVAL;
1452
-	}
1453
-
1454 1451
 	if  ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
1455 1452
 		DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
1456 1453
 		return -ENOTSUP;
@@ -1467,6 +1464,18 @@ static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic )
1467 1464
 	return status;
1468 1465
 }
1469 1466
 
1467
+static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
1468
+       nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1469
+
1470
+       if ( uar->virt ) {
1471
+               iounmap( uar->virt );
1472
+               uar->virt = NULL;
1473
+       }
1474
+
1475
+       return MLX_SUCCESS;
1476
+}
1477
+
1478
+
1470 1479
 int flexboot_nodnic_probe ( struct pci_device *pci,
1471 1480
 		struct flexboot_nodnic_callbacks *callbacks,
1472 1481
 		void *drv_priv __unused ) {
@@ -1508,6 +1517,10 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
1508 1517
 	MLX_FATAL_CHECK_STATUS(status, get_cap_err,
1509 1518
 					"nodnic_device_get_cap failed");
1510 1519
 
1520
+	if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) {
1521
+                MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" );
1522
+        }
1523
+
1511 1524
 	status =  flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
1512 1525
 	MLX_FATAL_CHECK_STATUS(status, err_set_masking,
1513 1526
 						"flexboot_nodnic_set_port_masking failed");
@@ -1522,7 +1535,7 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
1522 1535
 						"flexboot_nodnic_thin_init_ports failed");
1523 1536
 
1524 1537
 	if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
1525
-		DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_pci_init failed"
1538
+		DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed"
1526 1539
 				" ( status = %d )\n",__FUNCTION__, status );
1527 1540
 	}
1528 1541
 
@@ -1550,6 +1563,7 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
1550 1563
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1551 1564
 reg_err:
1552 1565
 err_set_ports_types:
1566
+	flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1553 1567
 err_thin_init_ports:
1554 1568
 err_alloc_ibdev:
1555 1569
 err_set_masking:
@@ -1568,6 +1582,7 @@ void flexboot_nodnic_remove ( struct pci_device *pci )
1568 1582
 	struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
1569 1583
 	nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
1570 1584
 
1585
+	flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1571 1586
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1572 1587
 	nodnic_device_teardown( device_priv );
1573 1588
 	free_mlx_utils ( & device_priv->utils );

+ 1
- 0
src/drivers/infiniband/flexboot_nodnic.h View File

@@ -42,6 +42,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
42 42
 #define FLEXBOOT_NODNIC_PAGE_SHIFT	12
43 43
 #define	FLEXBOOT_NODNIC_PAGE_SIZE		(1 << FLEXBOOT_NODNIC_PAGE_SHIFT)
44 44
 #define FLEXBOOT_NODNIC_PAGE_MASK		(FLEXBOOT_NODNIC_PAGE_SIZE - 1)
45
+#define EN_DEFAULT_ADMIN_MTU 1522
45 46
 
46 47
 /* Port protocol */
47 48
 enum flexboot_nodnic_protocol {

+ 73
- 139
src/drivers/infiniband/golan.c View File

@@ -42,80 +42,47 @@ FILE_LICENCE ( GPL2_OR_LATER );
42 42
 #include "mlx_utils/include/public/mlx_bail.h"
43 43
 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
44 44
 
45
+
45 46
 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
47
+
46 48
 /******************************************************************************/
47 49
 /************* Very simple memory management for umalloced pages **************/
48 50
 /******* Temporary solution until full memory management is implemented *******/
49 51
 /******************************************************************************/
52
+
50 53
 struct golan_page {
51 54
 	struct list_head list;
52 55
 	userptr_t addr;
53 56
 };
54 57
 
55
-static void golan_free_pages ( struct list_head *head ) {
56
-	struct golan_page *page, *tmp;
57
-	list_for_each_entry_safe ( page, tmp, head, list ) {
58
-		list_del ( &page->list );
59
-		ufree ( page->addr );
60
-		free ( page );
58
+static void golan_free_fw_areas ( struct golan *golan ) {
59
+	int i;
60
+
61
+	for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
62
+		if ( golan->fw_areas[i].area ) {
63
+			ufree ( golan->fw_areas[i].area );
64
+			golan->fw_areas[i].area = UNULL;
65
+		}
61 66
 	}
62 67
 }
63 68
 
64
-static int golan_init_pages ( struct list_head *head ) {
65
-	int rc = 0;
69
+static int golan_init_fw_areas ( struct golan *golan ) {
70
+	int rc = 0, i =  0;
66 71
 
67
-	if ( !head ) {
72
+	if ( ! golan ) {
68 73
 		rc = -EINVAL;
69
-		goto err_golan_init_pages_bad_param;
74
+		goto err_golan_init_fw_areas_bad_param;
70 75
 	}
71 76
 
72
-	INIT_LIST_HEAD ( head );
73
-	return rc;
77
+	for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
78
+		golan->fw_areas[i].area = UNULL;
74 79
 
75
-err_golan_init_pages_bad_param:
76 80
 	return rc;
77
-}
78
-
79
-static userptr_t golan_get_page ( struct list_head *head ) {
80
-	struct golan_page *page;
81
-	userptr_t addr;
82
-
83
-	if ( list_empty ( head ) ) {
84
-		addr = umalloc ( GOLAN_PAGE_SIZE );
85
-		if ( addr == UNULL ) {
86
-			goto err_golan_iget_page_alloc_page;
87
-		}
88
-	} else {
89
-		page = list_first_entry ( head, struct golan_page, list );
90
-		list_del ( &page->list );
91
-		addr = page->addr;
92
-		free ( page );
93
-	}
94
-err_golan_iget_page_alloc_page:
95
-	return addr;
96
-}
97
-
98
-static int golan_return_page ( struct list_head *head,
99
-		userptr_t addr ) {
100
-	struct golan_page *new_entry;
101
-	int rc = 0;
102
-
103
-	if ( ! head ) {
104
-		rc = -EINVAL;
105
-		goto err_golan_return_page_bad_param;
106
-	}
107
-	new_entry = zalloc ( sizeof ( *new_entry ) );
108
-	if ( new_entry == NULL ) {
109
-		rc = -ENOMEM;
110
-		goto err_golan_return_page_alloc_page;
111
-	}
112
-	new_entry->addr = addr;
113
-	list_add_tail( &new_entry->list, head );
114 81
 
115
-err_golan_return_page_alloc_page:
116
-err_golan_return_page_bad_param:
82
+	err_golan_init_fw_areas_bad_param:
117 83
 	return rc;
118 84
 }
85
+
119 86
 /******************************************************************************/
120 87
 
121 88
 const char *golan_qp_state_as_string[] = {
@@ -177,16 +144,6 @@ static inline u8 xor8_buf(void *buf, int len)
177 144
 	return sum;
178 145
 }
179 146
 
180
-static inline int verify_block_sig(struct golan_cmd_prot_block *block)
181
-{
182
-	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
183
-		return -EINVAL;
184
-
185
-	if (xor8_buf(block, sizeof(*block)) != 0xff)
186
-		return -EINVAL;
187
-	return 0;
188
-}
189
-
190 147
 static inline const char *cmd_status_str(u8 status)
191 148
 {
192 149
 	switch (status) {
@@ -258,24 +215,6 @@ static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
258 215
 	cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
259 216
 }
260 217
 
261
-/**
262
-  * Get Golan FW
263
-  */
264
-static int fw_ver_and_cmdif ( struct golan *golan ) {
265
-	DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
266
-		golan->iseg->fw_rev,
267
-		golan->iseg->cmdif_rev_fw_sub,
268
-		fw_rev_maj ( golan ), fw_rev_min ( golan ),
269
-		fw_rev_sub ( golan ), cmdif_rev ( golan));
270
-
271
-	if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
272
-		DBGC (golan ,"CMDIF %d not supported current is %d\n",
273
-			cmdif_rev ( golan ), PXE_CMDIF_REF);
274
-		return 1;
275
-	}
276
-	return 0;
277
-}
278
-
279 218
 static inline void show_out_status(uint32_t *out)
280 219
 {
281 220
 	DBG("%x\n", be32_to_cpu(out[0]));
@@ -466,10 +405,8 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
466 405
 
467 406
 	while ( pages > 0 ) {
468 407
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
469
-		unsigned i;
470 408
 		struct golan_cmd_layout	*cmd;
471 409
 		struct golan_manage_pages_inbox *in;
472
-		struct golan_manage_pages_outbox_data *out;
473 410
 
474 411
 		size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
475 412
 		size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
@@ -485,11 +422,7 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
485 422
 		in->num_entries = cpu_to_be32(pas_num);
486 423
 
487 424
 		if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
488
-			out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
489 425
 			out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
490
-			for (i = 0; i < out_num_entries; ++i) {
491
-				golan_return_page ( &golan->pages, ( BE64_BUS_2_USR( out->pas[i] ) ) );
492
-			}
493 426
 		} else {
494 427
 			if ( rc == -EBUSY ) {
495 428
 				DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
@@ -506,17 +439,29 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
506 439
 		pages -= out_num_entries;
507 440
 	}
508 441
 	DBGC( golan , "%s Pages handled\n", __FUNCTION__);
509
-	return 0;
442
+	return rc;
510 443
 }
511 444
 
512
-static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
445
+static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
446
+		, __be16 func_id,struct golan_firmware_area *fw_area) {
513 447
 	struct mbox *mailbox;
514 448
 	int size_ibox = 0;
515 449
 	int size_obox = 0;
516 450
 	int rc = 0;
451
+	userptr_t next_page_addr = UNULL;
517 452
 
518 453
 	DBGC(golan, "%s\n", __FUNCTION__);
519
-
454
+	if ( ! fw_area->area ) {
455
+		fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
456
+		if ( fw_area->area == UNULL ) {
457
+			rc = -ENOMEM;
458
+			DBGC (golan ,"Failed to allocated %d pages \n",pages);
459
+			goto err_golan_alloc_fw_area;
460
+		}
461
+		fw_area->npages = pages;
462
+	}
463
+	assert ( fw_area->npages == pages );
464
+	next_page_addr = fw_area->area;
520 465
 	while ( pages > 0 ) {
521 466
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
522 467
 		unsigned i, j;
@@ -538,12 +483,9 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
538 483
 		in->func_id 	= func_id; /* Already BE */
539 484
 		in->num_entries = cpu_to_be32(pas_num);
540 485
 
541
-		for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
542
-			if ( ! ( addr = golan_get_page ( & golan->pages ) ) ) {
543
-				rc = -ENOMEM;
544
-				DBGC (golan ,"Couldnt allocated page \n");
545
-				goto malloc_dma_failed;
546
-			}
486
+		for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
487
+				next_page_addr += GOLAN_PAGE_SIZE ) {
488
+			addr = next_page_addr;
547 489
 			if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
548 490
 				DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
549 491
 			}
@@ -563,7 +505,6 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
563 505
 						get_cmd( golan , MEM_CMD_IDX )->status_own,
564 506
 						be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
565 507
 			}
566
-			golan_return_page ( &golan->pages ,addr );
567 508
 			goto err_send_command;
568 509
 		}
569 510
 	}
@@ -571,7 +512,7 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
571 512
 	return 0;
572 513
 
573 514
 err_send_command:
574
-malloc_dma_failed:
515
+err_golan_alloc_fw_area:
575 516
 	/* Go over In box and free pages */
576 517
 	/* Send Error to FW */
577 518
 	/* What is next - Disable HCA? */
@@ -609,7 +550,7 @@ static inline int golan_handle_pages(struct golan *golan,
609 550
 	total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
610 551
 
611 552
 	if ( mode == GOLAN_PAGES_GIVE ) {
612
-		rc = golan_provide_pages(golan, total_pages, func_id);
553
+		rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
613 554
 	} else {
614 555
 		rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
615 556
 		golan->total_dma_pages = 0;
@@ -799,16 +740,14 @@ static int golan_create_eq(struct golan *golan)
799 740
 	struct golan_cmd_layout	*cmd;
800 741
 	struct golan_create_eq_mbox_out *out;
801 742
 	int rc, i;
802
-	userptr_t addr;
803 743
 
804 744
 	eq->cons_index	= 0;
805 745
 	eq->size	= GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
806
-	addr		= golan_get_page ( &golan->pages );
807
-	if (!addr) {
746
+	eq->eqes	= malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
747
+	if (!eq->eqes) {
808 748
 		rc = -ENOMEM;
809 749
 		goto err_create_eq_eqe_alloc;
810 750
 	}
811
-	eq->eqes		= (struct golan_eqe *)user_to_virt(addr, 0);
812 751
 
813 752
 	/* Set EQEs ownership bit to HW ownership */
814 753
 	for (i = 0; i < GOLAN_NUM_EQES; ++i) {
@@ -823,7 +762,7 @@ static int golan_create_eq(struct golan *golan)
823 762
 	in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
824 763
 
825 764
 	/* Fill the physical address of the page */
826
-	in->pas[0]		= USR_2_BE64_BUS(addr);
765
+	in->pas[0]		= VIRT_2_BE64_BUS( eq->eqes );
827 766
 	in->ctx.log_sz_usr_page	= cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
828 767
 	DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
829 768
 	in->events_mask		= cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
@@ -842,7 +781,7 @@ static int golan_create_eq(struct golan *golan)
842 781
 	return 0;
843 782
 
844 783
 err_create_eq_cmd:
845
-	golan_return_page ( & golan->pages, virt_to_user ( eq->eqes ) );
784
+	free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
846 785
 err_create_eq_eqe_alloc:
847 786
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
848 787
 	return rc;
@@ -867,7 +806,7 @@ static void golan_destory_eq(struct golan *golan)
867 806
 	rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
868 807
 	GOLAN_PRINT_RC_AND_CMD_STATUS;
869 808
 
870
-	golan_return_page ( &golan->pages, virt_to_user ( golan->eq.eqes ) );
809
+	free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
871 810
 	golan->eq.eqn = 0;
872 811
 
873 812
 	DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
@@ -1016,7 +955,6 @@ static int golan_create_cq(struct ib_device *ibdev,
1016 955
 	struct golan_create_cq_mbox_out *out;
1017 956
 	int	rc;
1018 957
 	unsigned int i;
1019
-	userptr_t addr;
1020 958
 
1021 959
 	golan_cq = zalloc(sizeof(*golan_cq));
1022 960
 	if (!golan_cq) {
@@ -1031,12 +969,11 @@ static int golan_create_cq(struct ib_device *ibdev,
1031 969
 		goto err_create_cq_db_alloc;
1032 970
 	}
1033 971
 
1034
-	addr = golan_get_page ( &golan->pages );
1035
-	if (!addr) {
972
+	golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
973
+	if (!golan_cq->cqes) {
1036 974
 		rc = -ENOMEM;
1037 975
 		goto err_create_cq_cqe_alloc;
1038 976
 	}
1039
-	golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
1040 977
 
1041 978
 	/* Set CQEs ownership bit to HW ownership */
1042 979
 	for (i = 0; i < cq->num_cqes; ++i) {
@@ -1053,7 +990,7 @@ static int golan_create_cq(struct ib_device *ibdev,
1053 990
 	in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1054 991
 
1055 992
 	/* Fill the physical address of the page */
1056
-	in->pas[0]		= USR_2_BE64_BUS(addr);
993
+	in->pas[0]		= VIRT_2_BE64_BUS( golan_cq->cqes );
1057 994
 	in->ctx.cqe_sz_flags	= GOLAN_CQE_SIZE_64 << 5;
1058 995
 	in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
1059 996
 	in->ctx.c_eqn		= cpu_to_be16(golan->eq.eqn);
@@ -1071,7 +1008,7 @@ static int golan_create_cq(struct ib_device *ibdev,
1071 1008
 	return 0;
1072 1009
 
1073 1010
 err_create_cq_cmd:
1074
-	golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
1011
+	free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
1075 1012
 err_create_cq_cqe_alloc:
1076 1013
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1077 1014
 err_create_cq_db_alloc:
@@ -1108,7 +1045,7 @@ static void golan_destroy_cq(struct ib_device *ibdev,
1108 1045
 	cq->cqn = 0;
1109 1046
 
1110 1047
 	ib_cq_set_drvdata(cq, NULL);
1111
-	golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
1048
+	free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
1112 1049
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1113 1050
 	free(golan_cq);
1114 1051
 
@@ -1154,7 +1091,6 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
1154 1091
 	struct golan_cmd_layout *cmd;
1155 1092
 	struct golan_wqe_data_seg *data;
1156 1093
 	struct golan_create_qp_mbox_out *out;
1157
-	userptr_t addr;
1158 1094
 	uint32_t wqe_size_in_bytes;
1159 1095
 	uint32_t max_qp_size_in_wqes;
1160 1096
 	unsigned int i;
@@ -1202,12 +1138,11 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
1202 1138
 	golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1203 1139
 
1204 1140
 	/* allocate dma memory for WQEs (1 page is enough) - should change it */
1205
-	addr = golan_get_page ( &golan->pages );
1206
-	if (!addr) {
1141
+	golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
1142
+	if (!golan_qp->wqes) {
1207 1143
 		rc = -ENOMEM;
1208 1144
 		goto err_create_qp_wqe_alloc;
1209 1145
 	}
1210
-	golan_qp->wqes		= user_to_virt(addr, 0);
1211 1146
 	golan_qp->rq.wqes	= golan_qp->wqes;
1212 1147
 	golan_qp->sq.wqes	= golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1213 1148
 			//(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
@@ -1241,7 +1176,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
1241 1176
 	in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1242 1177
 
1243 1178
 	/* Fill the physical address of the page */
1244
-	in->pas[0]			= USR_2_BE64_BUS(addr);
1179
+	in->pas[0]			= VIRT_2_BE64_BUS(golan_qp->wqes);
1245 1180
 	in->ctx.qp_counter_set_usr_page	= cpu_to_be32(golan->uar.index);
1246 1181
 
1247 1182
 	in->ctx.flags_pd 	= cpu_to_be32(golan->pdn);
@@ -1280,7 +1215,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
1280 1215
 err_create_qp_cmd:
1281 1216
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1282 1217
 err_create_qp_db_alloc:
1283
-	golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
1218
+	free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1284 1219
 err_create_qp_wqe_alloc:
1285 1220
 err_create_qp_sq_size:
1286 1221
 err_create_qp_sq_wqe_size:
@@ -1488,7 +1423,7 @@ static void golan_destroy_qp(struct ib_device *ibdev,
1488 1423
 
1489 1424
 	ib_qp_set_drvdata(qp, NULL);
1490 1425
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1491
-	golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
1426
+	free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1492 1427
 	free(golan_qp);
1493 1428
 
1494 1429
 	DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
@@ -1526,7 +1461,6 @@ static int golan_post_send(struct ib_device *ibdev,
1526 1461
 	unsigned long			wqe_idx;
1527 1462
 	struct golan_wqe_data_seg	*data		= NULL;
1528 1463
 	struct golan_wqe_ctrl_seg	*ctrl		= NULL;
1529
-//	static uint8_t			toggle		= 0;
1530 1464
 
1531 1465
 
1532 1466
 	wqe_idx_mask = (qp->send.num_wqes - 1);
@@ -1576,8 +1510,9 @@ static int golan_post_send(struct ib_device *ibdev,
1576 1510
 	golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1577 1511
 	golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1578 1512
 	wmb();
1579
-	writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
1580
-//			((toggle++ & 0x1) ? 0x100 : 0x0));
1513
+	writeq(*((__be64 *)ctrl), golan->uar.virt
1514
+			+ ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
1515
+					: DB_BUFFER0_ODD_OFFSET ) );
1581 1516
 	return 0;
1582 1517
 }
1583 1518
 
@@ -1702,7 +1637,6 @@ err_query_vport_gid_cmd:
1702 1637
 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1703 1638
 	struct golan *golan = ib_get_drvdata ( ibdev );
1704 1639
 	struct golan_cmd_layout	*cmd;
1705
-	//struct golan_query_hca_vport_pkey_data *pkey_table;
1706 1640
 	struct golan_query_hca_vport_pkey_inbox *in;
1707 1641
 	int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1708 1642
 	int rc;
@@ -1719,8 +1653,6 @@ static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1719 1653
 	rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1720 1654
 	GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1721 1655
 
1722
-	//pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1723
-
1724 1656
 	return 0;
1725 1657
 err_query_vport_pkey_cmd:
1726 1658
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
@@ -2100,10 +2032,15 @@ static void golan_poll_eq(struct ib_device *ibdev)
2100 2032
 				   cqn, eqe->data.cq_err.syndrome);
2101 2033
 //			mlx5_cq_event(dev, cqn, eqe->type);
2102 2034
 			break;
2035
+		/*
2036
+		 * currently the driver do not support dynamic memory request
2037
+		 * during FW run, a follow up change will allocate FW pages once and
2038
+		 * never release them till driver shutdown, this change will not support
2039
+		 * this request as currently this request is not issued anyway.
2103 2040
 		case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2104 2041
 			{
2105
-				/* we should check if we get this event while we
2106
-				 * waiting for a command */
2042
+				// we should check if we get this event while we
2043
+				// waiting for a command
2107 2044
 				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2108 2045
 				s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2109 2046
 
@@ -2112,6 +2049,7 @@ static void golan_poll_eq(struct ib_device *ibdev)
2112 2049
 				golan_provide_pages(golan, npages, func_id);
2113 2050
 			}
2114 2051
 			break;
2052
+		*/
2115 2053
 		default:
2116 2054
 			DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2117 2055
 				   eqe->type, eq->eqn);
@@ -2231,7 +2169,6 @@ static int golan_register_ibdev(struct golan_port *port)
2231 2169
 
2232 2170
 static inline void golan_bring_down(struct golan *golan)
2233 2171
 {
2234
-
2235 2172
 	DBGC(golan, "%s: start\n", __FUNCTION__);
2236 2173
 
2237 2174
 	if (~golan->flags & GOLAN_OPEN) {
@@ -2413,7 +2350,8 @@ static int golan_probe_normal ( struct pci_device *pci ) {
2413 2350
 		goto err_golan_alloc;
2414 2351
 	}
2415 2352
 
2416
-	if ( golan_init_pages( &golan->pages ) ) {
2353
+	/* at POST stage some BIOSes have limited available dynamic memory */
2354
+	if ( golan_init_fw_areas ( golan ) ) {
2417 2355
 		rc = -ENOMEM;
2418 2356
 		goto err_golan_golan_init_pages;
2419 2357
 	}
@@ -2423,11 +2361,6 @@ static int golan_probe_normal ( struct pci_device *pci ) {
2423 2361
 	golan->pci = pci;
2424 2362
 	golan_pci_init( golan );
2425 2363
 	/* config command queues */
2426
-	if ( fw_ver_and_cmdif( golan ) ) {
2427
-		rc = -1;
2428
-		goto err_fw_ver_cmdif;
2429
-	}
2430
-
2431 2364
 	if ( golan_bring_up( golan ) ) {
2432 2365
 		DBGC (golan ,"golan bringup failed\n");
2433 2366
 		rc = -1;
@@ -2482,9 +2415,8 @@ err_golan_probe_alloc_ibdev:
2482 2415
 err_utils_init:
2483 2416
 	golan_bring_down ( golan );
2484 2417
 err_golan_bringup:
2485
-err_fw_ver_cmdif:
2486 2418
 	iounmap( golan->iseg );
2487
-	golan_free_pages( &golan->pages );
2419
+	golan_free_fw_areas ( golan );
2488 2420
 err_golan_golan_init_pages:
2489 2421
 	free ( golan );
2490 2422
 err_golan_alloc:
@@ -2513,7 +2445,7 @@ static void golan_remove_normal ( struct pci_device *pci ) {
2513 2445
 		free_mlx_utils ( & golan->utils );
2514 2446
 	}
2515 2447
 	iounmap( golan->iseg );
2516
-	golan_free_pages( &golan->pages );
2448
+	golan_free_fw_areas ( golan );
2517 2449
 	free(golan);
2518 2450
 }
2519 2451
 
@@ -2528,14 +2460,16 @@ static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
2528 2460
 			( struct shomron_nodnic_eth_send_wqe * )wqbb;
2529 2461
 	struct shomronprm_wqe_segment_ctrl_send *ctrl;
2530 2462
 
2531
-	if ( ! ibdev || ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2463
+	if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2532 2464
 		DBG("%s: Invalid parameters\n",__FUNCTION__);
2533 2465
 		status = MLX_FAILED;
2534 2466
 		goto err;
2535 2467
 	}
2536 2468
 	wmb();
2537 2469
 	ctrl = & eth_wqe->ctrl;
2538
-	writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt + 0x800);
2470
+	writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
2471
+			( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
2472
+			: DB_BUFFER0_EVEN_OFFSET ) );
2539 2473
 err:
2540 2474
 	return status;
2541 2475
 }

+ 16
- 0
src/drivers/infiniband/golan.h View File

@@ -111,6 +111,18 @@ struct golan_uar {
111 111
     unsigned long	phys;
112 112
 };
113 113
 
114
+
115
+struct golan_firmware_area {
116
+	/* length of area in pages */
117
+	uint32_t npages;
118
+	/** Firmware area in external memory
119
+	 *
120
+	 * This is allocated when first needed, and freed only on
121
+	 * final teardown, in order to avoid memory map changes at
122
+	 * runtime.
123
+	 */
124
+	userptr_t area;
125
+};
114 126
 /* Queue Pair */
115 127
 #define GOLAN_SEND_WQE_BB_SIZE			64
116 128
 #define GOLAN_SEND_UD_WQE_SIZE			sizeof(struct golan_send_wqe_ud)
@@ -204,6 +216,8 @@ struct golan_completion_queue {
204 216
 #define GOLAN_EQE_SIZE				sizeof(struct golan_eqe)
205 217
 #define GOLAN_NUM_EQES 				8
206 218
 #define GOLAN_EQ_DOORBELL_OFFSET		0x40
219
+#define DB_BUFFER0_EVEN_OFFSET	0x800
220
+#define DB_BUFFER0_ODD_OFFSET	0x900
207 221
 
208 222
 #define GOLAN_EQ_MAP_ALL_EVENTS					\
209 223
 	((1 << GOLAN_EVENT_TYPE_PATH_MIG         	)|	\
@@ -323,6 +337,8 @@ struct golan {
323 337
 	mlx_utils		*utils;
324 338
 
325 339
 	struct golan_port		ports[GOLAN_MAX_PORTS];
340
+#define GOLAN_FW_AREAS_NUM 2
341
+	struct golan_firmware_area fw_areas[GOLAN_FW_AREAS_NUM];
326 342
 };
327 343
 
328 344
 #endif /* _GOLAN_H_*/

+ 1
- 7
src/drivers/infiniband/mlx_nodnic/src/mlx_device.c View File

@@ -169,13 +169,7 @@ nodnic_device_clear_int (
169 169
 	mlx_status 			status = MLX_SUCCESS;
170 170
 	mlx_uint32			disable = 1;
171 171
 #ifndef DEVICE_CX3
172
-#define NODNIC_CLEAR_INT_BAR_OFFSET 0x100C
173
-	if ( device_priv->device_cap.support_bar_cq_ctrl ) {
174
-		status = mlx_pci_mem_write ( device_priv->utils, MlxPciWidthUint32, 0,
175
-			( mlx_uint64 ) ( NODNIC_CLEAR_INT_BAR_OFFSET ), 1, &disable );
176
-	} else {
177
-		status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
178
-	}
172
+	status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
179 173
 	MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
180 174
 #else
181 175
 	mlx_utils *utils = device_priv->utils;

+ 36
- 2
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c View File

@@ -20,8 +20,8 @@
20 20
 FILE_LICENCE ( GPL2_OR_LATER );
21 21
 
22 22
 #include "mlx_mtu.h"
23
-#include "mlx_memory.h"
24
-#include "mlx_bail.h"
23
+#include "../../include/public/mlx_memory.h"
24
+#include "../../include/public/mlx_bail.h"
25 25
 
26 26
 mlx_status
27 27
 mlx_get_max_mtu(
@@ -58,3 +58,37 @@ reg_err:
58 58
 bad_param:
59 59
 	return status;
60 60
 }
61
+
62
+mlx_status
63
+mlx_set_admin_mtu(
64
+		IN mlx_utils 	*utils,
65
+		IN mlx_uint8 	port_num,
66
+		IN mlx_uint32 	admin_mtu
67
+		)
68
+{
69
+	mlx_status status = MLX_SUCCESS;
70
+	struct mlx_mtu mtu;
71
+	mlx_uint32 reg_status;
72
+
73
+	if (utils == NULL) {
74
+		status = MLX_INVALID_PARAMETER;
75
+		goto bad_param;
76
+	}
77
+
78
+	mlx_memory_set(utils, &mtu, 0, sizeof(mtu));
79
+
80
+	mtu.local_port = port_num;
81
+	mtu.admin_mtu = admin_mtu;
82
+
83
+	status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_WRITE, &mtu,
84
+			sizeof(mtu), &reg_status);
85
+	MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
86
+	if (reg_status != 0) {
87
+		MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
88
+		status = MLX_FAILED;
89
+		goto reg_err;
90
+	}
91
+reg_err:
92
+bad_param:
93
+	return status;
94
+}

+ 8
- 2
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h View File

@@ -22,8 +22,8 @@
22 22
 
23 23
 FILE_LICENCE ( GPL2_OR_LATER );
24 24
 
25
-#include "mlx_reg_access.h"
26
-#include "mlx_utils.h"
25
+#include "../../include/public/mlx_utils.h"
26
+#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
27 27
 
28 28
 #define BYTE_TO_BIT	0x8
29 29
 
@@ -49,4 +49,10 @@ mlx_get_max_mtu(
49 49
 		OUT mlx_uint32 	*max_mtu
50 50
 		);
51 51
 
52
+mlx_status
53
+mlx_set_admin_mtu(
54
+		IN mlx_utils 	*utils,
55
+		IN mlx_uint8 	port_num,
56
+		IN mlx_uint32 	admin_mtu
57
+		);
52 58
 #endif /* MLX_MTU_H_ */

+ 4
- 4
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c View File

@@ -39,7 +39,6 @@ struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = {
39 39
 		TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE),
40 40
 		TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE),
41 41
 		TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE),
42
-		TlvMappingEntry(0x2020, 0x2020, NVRAM_TLV_CLASS_PHYSICAL_PORT, FALSE),
43 42
 		TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE),
44 43
 		TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE),
45 44
 		TlvMappingEntry(0x2006, 0x206, NVRAM_TLV_CLASS_HOST, FALSE),
@@ -67,6 +66,7 @@ struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = {
67 66
 		TlvMappingEntry(0x110, 0x110, NVRAM_TLV_CLASS_HOST, FALSE),
68 67
 		TlvMappingEntry(0x192, 0x192, NVRAM_TLV_CLASS_GLOBAL, FALSE),
69 68
 		TlvMappingEntry(0x101, 0x101, NVRAM_TLV_CLASS_GLOBAL, TRUE),
69
+		TlvMappingEntry(0x194, 0x194, NVRAM_TLV_CLASS_GLOBAL, FALSE),
70 70
 		TlvMappingEntry(0, 0, 0, 0),
71 71
 };
72 72
 
@@ -239,6 +239,7 @@ nvconfig_nvdata_access(
239 239
 		IN REG_ACCESS_OPT opt,
240 240
 		IN mlx_size data_size,
241 241
 		IN NV_DEFAULT_OPT def_en,
242
+		IN NVDA_WRITER_ID writer_id,
242 243
 		IN OUT mlx_uint8 *version,
243 244
 		IN OUT mlx_void *data
244 245
 		)
@@ -263,10 +264,9 @@ nvconfig_nvdata_access(
263 264
 	data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32);
264 265
 	mlx_memory_set(utils, &nvda, 0, sizeof(nvda));
265 266
 	nvda.nv_header.length = data_size_align_to_dword;
266
-	nvda.nv_header.rd_en = 0;
267
-	nvda.nv_header.def_en = def_en;
268
-	nvda.nv_header.over_en = 1;
267
+	nvda.nv_header.access_mode = def_en;
269 268
 	nvda.nv_header.version = *version;
269
+	nvda.nv_header.writer_id = writer_id;
270 270
 
271 271
 	nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type);
272 272
 

+ 23
- 13
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h View File

@@ -31,6 +31,17 @@ typedef enum {
31 31
 	NVRAM_TLV_CLASS_HOST = 3,
32 32
 } NVRAM_CLASS_CODE;
33 33
 
34
+typedef enum {
35
+	NVDA_NV_HEADER_WRITER_ID_UEFI_HII  = 0x6,
36
+	NVDA_NV_HEADER_WRITER_ID_FLEXBOOT = 0x8,
37
+} NVDA_WRITER_ID;
38
+
39
+typedef enum {
40
+  TLV_ACCESS_DEFAULT_DIS = 0,
41
+  TLV_ACCESS_CURRENT = 1,
42
+  TLV_ACCESS_DEFAULT_EN = 2,
43
+} NV_DEFAULT_OPT;
44
+
34 45
 struct nvconfig_tlv_type_per_port {
35 46
 	 mlx_uint32 param_idx	:16;
36 47
 	 mlx_uint32 port		:8;
@@ -78,26 +89,24 @@ struct nvconfig_header {
78 89
 	 mlx_uint32 length		:9; /*Size of configuration item data in bytes between 0..256 */
79 90
 	 mlx_uint32 reserved0	:3;
80 91
 	 mlx_uint32 version		:4; /* Configuration item version */
81
-	 mlx_uint32 reserved1	:7;
82
-
83
-	 mlx_uint32 def_en		:1; /*Choose whether to access the default value or the user-defined value.
84
-									0x0 Read or write the user-defined value.
85
-									0x1 Read the default value (only valid for reads).*/
86
-
87
-	 mlx_uint32 rd_en		:1; /*enables reading the TLV by lower priorities
88
-									0 - TLV can be read by the subsequent lifecycle priorities.
89
-									1 - TLV cannot be read by the subsequent lifecycle priorities. */
90
-	 mlx_uint32 over_en		:1; /*enables overwriting the TLV by lower priorities
91
-									0 - Can only be overwritten by the current lifecycle priority
92
-									1 - Allowed to be overwritten by subsequent lifecycle priorities */
92
+	 mlx_uint32 writer_id	:5;
93
+	 mlx_uint32 reserved1	:1;
94
+
95
+	 mlx_uint32 access_mode	:2; /*Defines which value of the Configuration Item will be accessed.
96
+								0x0: NEXT - Next value to be applied
97
+								0x1: CURRENT - Currently set values (only valid for Query operation) Supported only if NVGC.nvda_read_current_settings==1.
98
+								0x2: FACTORY - Default factory values (only valid for Query operation). Supported only if NVGC.nvda_read_factory_settings==1.*/
99
+
100
+	 mlx_uint32 reserved2	:2;
93 101
 	 mlx_uint32 header_type	:2;
94
-	 mlx_uint32 priority		:2;
102
+	 mlx_uint32 reserved3	:2;
95 103
 	 mlx_uint32 valid	:2;
96 104
 /* -------------- */
97 105
 	 union nvconfig_tlv_type tlv_type;;
98 106
 /* -------------- */
99 107
 	mlx_uint32 crc			:16;
100 108
 	mlx_uint32 reserved		:16;
109
+
101 110
 };
102 111
 
103 112
 #define NVCONFIG_MAX_TLV_SIZE 256
@@ -149,6 +158,7 @@ nvconfig_nvdata_access(
149 158
 		IN REG_ACCESS_OPT opt,
150 159
 		IN mlx_size data_size,
151 160
 		IN NV_DEFAULT_OPT def_en,
161
+		IN NVDA_WRITER_ID writer_id,
152 162
 		IN OUT mlx_uint8 *version,
153 163
 		IN OUT mlx_void *data
154 164
 		);

+ 14
- 3
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c View File

@@ -386,7 +386,8 @@ nvconfig_nvdata_default_access(
386 386
 	mlx_uint8 version = 0;
387 387
 
388 388
 	status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ,
389
-			data_size, TLV_ACCESS_DEFAULT_EN, &version, data);
389
+			data_size, TLV_ACCESS_DEFAULT_EN, 0,
390
+			&version, data);
390 391
 	MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
391 392
 				"nvconfig_nvdata_access failed ");
392 393
 	for (index = 0; index * 4 < data_size; index++) {
@@ -493,6 +494,8 @@ nvconfig_read_rom_ini_values(
493 494
 		)
494 495
 {
495 496
 	mlx_status status = MLX_SUCCESS;
497
+	mlx_uint8 version = 0;
498
+	mlx_uint32 index;
496 499
 
497 500
 	if (utils == NULL || rom_ini == NULL) {
498 501
 		status = MLX_INVALID_PARAMETER;
@@ -501,8 +504,16 @@ nvconfig_read_rom_ini_values(
501 504
 	}
502 505
 	mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini));
503 506
 
504
-	status = nvconfig_nvdata_default_access(utils, 0, GLOBAL_ROM_INI_TYPE,
505
-			sizeof(*rom_ini), rom_ini);
507
+	status = nvconfig_nvdata_access(utils, 0, GLOBAL_ROM_INI_TYPE, REG_ACCESS_READ,
508
+			sizeof(*rom_ini), TLV_ACCESS_DEFAULT_DIS, 0,
509
+			&version, rom_ini);
510
+	MLX_CHECK_STATUS(NULL, status, bad_param,
511
+				"nvconfig_nvdata_access failed ");
512
+	for (index = 0; index * 4 < sizeof(*rom_ini); index++) {
513
+		mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) rom_ini)[index]),
514
+				((mlx_uint32 *) rom_ini) + index);
515
+	}
516
+
506 517
 bad_param:
507 518
 	return status;
508 519
 }

+ 0
- 145
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c View File

@@ -1,145 +0,0 @@
1
-/*
2
- * Copyright (C) 2015 Mellanox Technologies Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License as
6
- * published by the Free Software Foundation; either version 2 of the
7
- * License, or any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
- * 02110-1301, USA.
18
- */
19
-
20
-FILE_LICENCE ( GPL2_OR_LATER );
21
-
22
-#include "mlx_ocbb.h"
23
-#include "mlx_icmd.h"
24
-#include "mlx_bail.h"
25
-
26
-mlx_status
27
-mlx_ocbb_init (
28
-	IN mlx_utils *utils,
29
-	IN mlx_uint64 address
30
-	)
31
-{
32
-	mlx_status status = MLX_SUCCESS;
33
-	struct mlx_ocbb_init ocbb_init;
34
-	ocbb_init.address_hi = (mlx_uint32)(address >> 32);
35
-	ocbb_init.address_lo = (mlx_uint32)address;
36
-
37
-	if (utils == NULL) {
38
-		status = MLX_INVALID_PARAMETER;
39
-		goto bad_param;
40
-	}
41
-
42
-	status = mlx_icmd_send_command(
43
-			utils,
44
-			OCBB_INIT,
45
-			&ocbb_init,
46
-			sizeof(ocbb_init),
47
-			0
48
-			);
49
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
50
-icmd_err:
51
-bad_param:
52
-	return status;
53
-}
54
-
55
-mlx_status
56
-mlx_ocbb_query_header_status (
57
-	IN mlx_utils *utils,
58
-	OUT mlx_uint8 *ocbb_status
59
-	)
60
-{
61
-	mlx_status status = MLX_SUCCESS;
62
-	struct mlx_ocbb_query_status ocbb_query_status;
63
-
64
-	if (utils == NULL) {
65
-		status = MLX_INVALID_PARAMETER;
66
-		goto bad_param;
67
-	}
68
-
69
-	status = mlx_icmd_send_command(
70
-			utils,
71
-			OCBB_QUERY_HEADER_STATUS,
72
-			&ocbb_query_status,
73
-			0,
74
-			sizeof(ocbb_query_status)
75
-			);
76
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
77
-	*ocbb_status = ocbb_query_status.status;
78
-icmd_err:
79
-bad_param:
80
-	return status;
81
-}
82
-
83
-mlx_status
84
-mlx_ocbb_query_etoc_status (
85
-	IN mlx_utils *utils,
86
-	OUT mlx_uint8 *ocbb_status
87
-	)
88
-{
89
-	mlx_status status = MLX_SUCCESS;
90
-	struct mlx_ocbb_query_status ocbb_query_status;
91
-
92
-	if (utils == NULL) {
93
-		status = MLX_INVALID_PARAMETER;
94
-		goto bad_param;
95
-	}
96
-
97
-	status = mlx_icmd_send_command(
98
-			utils,
99
-			OCBB_QUERY_ETOC_STATUS,
100
-			&ocbb_query_status,
101
-			0,
102
-			sizeof(ocbb_query_status)
103
-			);
104
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
105
-	*ocbb_status = ocbb_query_status.status;
106
-icmd_err:
107
-bad_param:
108
-	return status;
109
-}
110
-
111
-mlx_status
112
-mlx_ocbb_set_event (
113
-	IN mlx_utils *utils,
114
-	IN mlx_uint64			event_data,
115
-	IN mlx_uint8			event_number,
116
-	IN mlx_uint8			event_length,
117
-	IN mlx_uint8			data_length,
118
-	IN mlx_uint8			data_start_offset
119
-	)
120
-{
121
-	mlx_status status = MLX_SUCCESS;
122
-	struct mlx_ocbb_set_event ocbb_event;
123
-
124
-	if (utils == NULL) {
125
-		status = MLX_INVALID_PARAMETER;
126
-		goto bad_param;
127
-	}
128
-
129
-	ocbb_event.data_length = data_length;
130
-	ocbb_event.data_start_offset = data_start_offset;
131
-	ocbb_event.event_number = event_number;
132
-	ocbb_event.event_data = event_data;
133
-	ocbb_event.event_length = event_length;
134
-	status = mlx_icmd_send_command(
135
-			utils,
136
-			OCBB_QUERY_SET_EVENT,
137
-			&ocbb_event,
138
-			sizeof(ocbb_event),
139
-			0
140
-			);
141
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
142
-icmd_err:
143
-bad_param:
144
-	return status;
145
-}

+ 0
- 73
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h View File

@@ -1,73 +0,0 @@
1
-#ifndef MLX_OCBB_H_
2
-#define MLX_OCBB_H_
3
-
4
-/*
5
- * Copyright (C) 2015 Mellanox Technologies Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License as
9
- * published by the Free Software Foundation; either version 2 of the
10
- * License, or any later version.
11
- *
12
- * This program is distributed in the hope that it will be useful, but
13
- * WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
- * 02110-1301, USA.
21
- */
22
-
23
-FILE_LICENCE ( GPL2_OR_LATER );
24
-
25
-#include "mlx_utils.h"
26
-
27
-#define MLX_OCBB_EVENT_DATA_SIZE 2
28
-struct mlx_ocbb_init {
29
-	mlx_uint32 address_hi;
30
-	mlx_uint32 address_lo;
31
-};
32
-
33
-struct mlx_ocbb_query_status {
34
-	mlx_uint32 reserved	:24;
35
-	mlx_uint32 status	:8;
36
-};
37
-
38
-struct mlx_ocbb_set_event {
39
-	mlx_uint64 event_data;
40
-	mlx_uint32 event_number	:8;
41
-	mlx_uint32 event_length	:8;
42
-	mlx_uint32 data_length	:8;
43
-	mlx_uint32 data_start_offset	:8;
44
-};
45
-
46
-mlx_status
47
-mlx_ocbb_init (
48
-	IN mlx_utils *utils,
49
-	IN mlx_uint64 address
50
-	);
51
-
52
-mlx_status
53
-mlx_ocbb_query_header_status (
54
-	IN mlx_utils *utils,
55
-	OUT mlx_uint8 *ocbb_status
56
-	);
57
-
58
-mlx_status
59
-mlx_ocbb_query_etoc_status (
60
-	IN mlx_utils *utils,
61
-	OUT mlx_uint8 *ocbb_status
62
-	);
63
-
64
-mlx_status
65
-mlx_ocbb_set_event (
66
-	IN mlx_utils *utils,
67
-	IN mlx_uint64			EventData,
68
-	IN mlx_uint8			EventNumber,
69
-	IN mlx_uint8			EventLength,
70
-	IN mlx_uint8			DataLength,
71
-	IN mlx_uint8			DataStartOffset
72
-	);
73
-#endif /* MLX_OCBB_H_ */

+ 0
- 5
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h View File

@@ -31,11 +31,6 @@ typedef enum {
31 31
   REG_ACCESS_WRITE = 2,
32 32
 } REG_ACCESS_OPT;
33 33
 
34
-typedef enum {
35
-  TLV_ACCESS_DEFAULT_DIS = 0,
36
-  TLV_ACCESS_DEFAULT_EN = 1,
37
-} NV_DEFAULT_OPT;
38
-
39 34
 #define REG_ID_NVDA  0x9024
40 35
 #define REG_ID_NVDI  0x9025
41 36
 #define REG_ID_NVIA 0x9029

+ 0
- 84
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c View File

@@ -1,84 +0,0 @@
1
-/*
2
- * Copyright (C) 2015 Mellanox Technologies Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License as
6
- * published by the Free Software Foundation; either version 2 of the
7
- * License, or any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
- * 02110-1301, USA.
18
- */
19
-
20
-FILE_LICENCE ( GPL2_OR_LATER );
21
-
22
-#include "mlx_wol_rol.h"
23
-#include "mlx_icmd.h"
24
-#include "mlx_memory.h"
25
-#include "mlx_bail.h"
26
-
27
-mlx_status
28
-mlx_set_wol (
29
-	IN mlx_utils *utils,
30
-	IN mlx_uint8 wol_mask
31
-	)
32
-{
33
-	mlx_status status = MLX_SUCCESS;
34
-	struct mlx_wol_rol wol_rol;
35
-
36
-	if (utils == NULL) {
37
-		status = MLX_INVALID_PARAMETER;
38
-		goto bad_param;
39
-	}
40
-
41
-	mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
42
-	wol_rol.wol_mode_valid = TRUE;
43
-	wol_rol.wol_mode = wol_mask;
44
-	status = mlx_icmd_send_command(
45
-			utils,
46
-			SET_WOL_ROL,
47
-			&wol_rol,
48
-			sizeof(wol_rol),
49
-			0
50
-			);
51
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
52
-icmd_err:
53
-bad_param:
54
-	return status;
55
-}
56
-
57
-mlx_status
58
-mlx_query_wol (
59
-	IN mlx_utils *utils,
60
-	OUT mlx_uint8 *wol_mask
61
-	)
62
-{
63
-	mlx_status status = MLX_SUCCESS;
64
-	struct mlx_wol_rol wol_rol;
65
-
66
-	if (utils == NULL || wol_mask == NULL) {
67
-		status = MLX_INVALID_PARAMETER;
68
-		goto bad_param;
69
-	}
70
-
71
-	mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
72
-	status = mlx_icmd_send_command(
73
-			utils,
74
-			QUERY_WOL_ROL,
75
-			&wol_rol,
76
-			0,
77
-			sizeof(wol_rol)
78
-			);
79
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
80
-	*wol_mask = wol_rol.wol_mode;
81
-icmd_err:
82
-bad_param:
83
-	return status;
84
-}

+ 0
- 61
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h View File

@@ -1,61 +0,0 @@
1
-#ifndef MLX_WOL_ROL_H_
2
-#define MLX_WOL_ROL_H_
3
-
4
-/*
5
- * Copyright (C) 2015 Mellanox Technologies Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License as
9
- * published by the Free Software Foundation; either version 2 of the
10
- * License, or any later version.
11
- *
12
- * This program is distributed in the hope that it will be useful, but
13
- * WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
- * 02110-1301, USA.
21
- */
22
-
23
-FILE_LICENCE ( GPL2_OR_LATER );
24
-
25
-
26
-#include "mlx_utils.h"
27
-
28
-typedef enum {
29
-	WOL_MODE_DISABLE = 0x0,
30
-	WOL_MODE_SECURE = 0x2,
31
-	WOL_MODE_MAGIC = 0x4,
32
-	WOL_MODE_ARP = 0x8,
33
-	WOL_MODE_BC = 0x10,
34
-	WOL_MODE_MC = 0x20,
35
-	WOL_MODE_UC = 0x40,
36
-	WOL_MODE_PHY = 0x80,
37
-} WOL_MODE;
38
-
39
-struct mlx_wol_rol {
40
-	mlx_uint32 reserved0	:32;
41
-	mlx_uint32 reserved1	:32;
42
-	mlx_uint32 wol_mode		:8;
43
-	mlx_uint32 rol_mode		:8;
44
-	mlx_uint32 reserved3	:14;
45
-	mlx_uint32 wol_mode_valid	:1;
46
-	mlx_uint32 rol_mode_valid	:1;
47
-};
48
-
49
-mlx_status
50
-mlx_set_wol (
51
-	IN mlx_utils *utils,
52
-	IN mlx_uint8 wol_mask
53
-	);
54
-
55
-mlx_status
56
-mlx_query_wol (
57
-	IN mlx_utils *utils,
58
-	OUT mlx_uint8 *wol_mask
59
-	);
60
-
61
-#endif /* MLX_WOL_ROL_H_ */

+ 0
- 9
src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c View File

@@ -1,9 +0,0 @@
1
-MlxDebugLogImpl()
2
-		{
3
-	DBGC((DEBUG),"");
4
-		}
5
-MlxInfoLogImpl()
6
-{
7
-	DBGC((INFO),"");
8
-			}
9
-}

+ 1
- 1
src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c View File

@@ -107,7 +107,7 @@ mlx_pci_mem_read(
107 107
 		status = MLX_INVALID_PARAMETER;
108 108
 		goto bail;
109 109
 	}
110
-	status = mlx_pci_mem_read_priv(utils, bar_index, width, offset, count, buffer);
110
+	status = mlx_pci_mem_read_priv(utils, width,bar_index, offset, count, buffer);
111 111
 bail:
112 112
 	return status;
113 113
 }

Loading…
Cancel
Save