Browse Source

[golan] Bug fixes and improved paging allocation method

Updates:
- revert Support for clear interrupt via BAR

Signed-off-by: Raed Salem <raeds@mellanox.com>
Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Raed Salem 7 years ago
parent
commit
1ff1eebcf7

+ 1
- 0
src/Makefile View File

89
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac
89
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac
90
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds
90
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds
91
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed
91
 SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed
92
+SRCDIRS		+= drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu
92
 SRCDIRS		+= drivers/infiniband/mlx_nodnic/src
93
 SRCDIRS		+= drivers/infiniband/mlx_nodnic/src
93
 SRCDIRS		+= drivers/usb
94
 SRCDIRS		+= drivers/usb
94
 SRCDIRS		+= interface/pxe interface/efi interface/smbios
95
 SRCDIRS		+= interface/pxe interface/efi interface/smbios

+ 22
- 7
src/drivers/infiniband/flexboot_nodnic.c View File

44
 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
44
 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
45
 #include "mlx_utils/include/public/mlx_pci_gw.h"
45
 #include "mlx_utils/include/public/mlx_pci_gw.h"
46
 #include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
46
 #include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
47
+#include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h"
47
 
48
 
48
 /***************************************************************************
49
 /***************************************************************************
49
  *
50
  *
823
 		netdev_rx_err ( netdev, iobuf, -ENOTTY );
824
 		netdev_rx_err ( netdev, iobuf, -ENOTTY );
824
 		return;
825
 		return;
825
 	}
826
 	}
827
+
826
 	netdev_rx ( netdev, iobuf );
828
 	netdev_rx ( netdev, iobuf );
827
 }
829
 }
828
 
830
 
907
 	list_del(&port->eth_qp->send.list);
909
 	list_del(&port->eth_qp->send.list);
908
 	list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
910
 	list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
909
 	port->eth_qp->recv.cq = port->eth_cq;
911
 	port->eth_qp->recv.cq = port->eth_cq;
912
+	port->cmdsn = 0;
910
 	list_del(&port->eth_qp->recv.list);
913
 	list_del(&port->eth_qp->recv.list);
911
 	list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
914
 	list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
912
 
915
 
1445
 	struct pci_device *pci = flexboot_nodnic->pci;
1448
 	struct pci_device *pci = flexboot_nodnic->pci;
1446
 	nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1449
 	nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1447
 
1450
 
1448
-	if ( ! flexboot_nodnic->device_priv.utils ) {
1449
-		uar->virt = NULL;
1450
-		DBGC ( flexboot_nodnic, "%s: mlx_utils is not initialized \n", __FUNCTION__ );
1451
-		return -EINVAL;
1452
-	}
1453
-
1454
 	if  ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
1451
 	if  ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
1455
 		DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
1452
 		DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
1456
 		return -ENOTSUP;
1453
 		return -ENOTSUP;
1467
 	return status;
1464
 	return status;
1468
 }
1465
 }
1469
 
1466
 
1467
+static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
1468
+       nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1469
+
1470
+       if ( uar->virt ) {
1471
+               iounmap( uar->virt );
1472
+               uar->virt = NULL;
1473
+       }
1474
+
1475
+       return MLX_SUCCESS;
1476
+}
1477
+
1478
+
1470
 int flexboot_nodnic_probe ( struct pci_device *pci,
1479
 int flexboot_nodnic_probe ( struct pci_device *pci,
1471
 		struct flexboot_nodnic_callbacks *callbacks,
1480
 		struct flexboot_nodnic_callbacks *callbacks,
1472
 		void *drv_priv __unused ) {
1481
 		void *drv_priv __unused ) {
1508
 	MLX_FATAL_CHECK_STATUS(status, get_cap_err,
1517
 	MLX_FATAL_CHECK_STATUS(status, get_cap_err,
1509
 					"nodnic_device_get_cap failed");
1518
 					"nodnic_device_get_cap failed");
1510
 
1519
 
1520
+	if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) {
1521
+                MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" );
1522
+        }
1523
+
1511
 	status =  flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
1524
 	status =  flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
1512
 	MLX_FATAL_CHECK_STATUS(status, err_set_masking,
1525
 	MLX_FATAL_CHECK_STATUS(status, err_set_masking,
1513
 						"flexboot_nodnic_set_port_masking failed");
1526
 						"flexboot_nodnic_set_port_masking failed");
1522
 						"flexboot_nodnic_thin_init_ports failed");
1535
 						"flexboot_nodnic_thin_init_ports failed");
1523
 
1536
 
1524
 	if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
1537
 	if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
1525
-		DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_pci_init failed"
1538
+		DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed"
1526
 				" ( status = %d )\n",__FUNCTION__, status );
1539
 				" ( status = %d )\n",__FUNCTION__, status );
1527
 	}
1540
 	}
1528
 
1541
 
1550
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1563
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1551
 reg_err:
1564
 reg_err:
1552
 err_set_ports_types:
1565
 err_set_ports_types:
1566
+	flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1553
 err_thin_init_ports:
1567
 err_thin_init_ports:
1554
 err_alloc_ibdev:
1568
 err_alloc_ibdev:
1555
 err_set_masking:
1569
 err_set_masking:
1568
 	struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
1582
 	struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
1569
 	nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
1583
 	nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
1570
 
1584
 
1585
+	flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1571
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1586
 	flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1572
 	nodnic_device_teardown( device_priv );
1587
 	nodnic_device_teardown( device_priv );
1573
 	free_mlx_utils ( & device_priv->utils );
1588
 	free_mlx_utils ( & device_priv->utils );

+ 1
- 0
src/drivers/infiniband/flexboot_nodnic.h View File

42
 #define FLEXBOOT_NODNIC_PAGE_SHIFT	12
42
 #define FLEXBOOT_NODNIC_PAGE_SHIFT	12
43
 #define	FLEXBOOT_NODNIC_PAGE_SIZE		(1 << FLEXBOOT_NODNIC_PAGE_SHIFT)
43
 #define	FLEXBOOT_NODNIC_PAGE_SIZE		(1 << FLEXBOOT_NODNIC_PAGE_SHIFT)
44
 #define FLEXBOOT_NODNIC_PAGE_MASK		(FLEXBOOT_NODNIC_PAGE_SIZE - 1)
44
 #define FLEXBOOT_NODNIC_PAGE_MASK		(FLEXBOOT_NODNIC_PAGE_SIZE - 1)
45
+#define EN_DEFAULT_ADMIN_MTU 1522
45
 
46
 
46
 /* Port protocol */
47
 /* Port protocol */
47
 enum flexboot_nodnic_protocol {
48
 enum flexboot_nodnic_protocol {

+ 73
- 139
src/drivers/infiniband/golan.c View File

42
 #include "mlx_utils/include/public/mlx_bail.h"
42
 #include "mlx_utils/include/public/mlx_bail.h"
43
 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
43
 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
44
 
44
 
45
+
45
 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
46
 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
47
+
46
 /******************************************************************************/
48
 /******************************************************************************/
47
 /************* Very simple memory management for umalloced pages **************/
49
 /************* Very simple memory management for umalloced pages **************/
48
 /******* Temporary solution until full memory management is implemented *******/
50
 /******* Temporary solution until full memory management is implemented *******/
49
 /******************************************************************************/
51
 /******************************************************************************/
52
+
50
 struct golan_page {
53
 struct golan_page {
51
 	struct list_head list;
54
 	struct list_head list;
52
 	userptr_t addr;
55
 	userptr_t addr;
53
 };
56
 };
54
 
57
 
55
-static void golan_free_pages ( struct list_head *head ) {
56
-	struct golan_page *page, *tmp;
57
-	list_for_each_entry_safe ( page, tmp, head, list ) {
58
-		list_del ( &page->list );
59
-		ufree ( page->addr );
60
-		free ( page );
58
+static void golan_free_fw_areas ( struct golan *golan ) {
59
+	int i;
60
+
61
+	for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
62
+		if ( golan->fw_areas[i].area ) {
63
+			ufree ( golan->fw_areas[i].area );
64
+			golan->fw_areas[i].area = UNULL;
65
+		}
61
 	}
66
 	}
62
 }
67
 }
63
 
68
 
64
-static int golan_init_pages ( struct list_head *head ) {
65
-	int rc = 0;
69
+static int golan_init_fw_areas ( struct golan *golan ) {
70
+	int rc = 0, i =  0;
66
 
71
 
67
-	if ( !head ) {
72
+	if ( ! golan ) {
68
 		rc = -EINVAL;
73
 		rc = -EINVAL;
69
-		goto err_golan_init_pages_bad_param;
74
+		goto err_golan_init_fw_areas_bad_param;
70
 	}
75
 	}
71
 
76
 
72
-	INIT_LIST_HEAD ( head );
73
-	return rc;
77
+	for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
78
+		golan->fw_areas[i].area = UNULL;
74
 
79
 
75
-err_golan_init_pages_bad_param:
76
 	return rc;
80
 	return rc;
77
-}
78
-
79
-static userptr_t golan_get_page ( struct list_head *head ) {
80
-	struct golan_page *page;
81
-	userptr_t addr;
82
-
83
-	if ( list_empty ( head ) ) {
84
-		addr = umalloc ( GOLAN_PAGE_SIZE );
85
-		if ( addr == UNULL ) {
86
-			goto err_golan_iget_page_alloc_page;
87
-		}
88
-	} else {
89
-		page = list_first_entry ( head, struct golan_page, list );
90
-		list_del ( &page->list );
91
-		addr = page->addr;
92
-		free ( page );
93
-	}
94
-err_golan_iget_page_alloc_page:
95
-	return addr;
96
-}
97
-
98
-static int golan_return_page ( struct list_head *head,
99
-		userptr_t addr ) {
100
-	struct golan_page *new_entry;
101
-	int rc = 0;
102
-
103
-	if ( ! head ) {
104
-		rc = -EINVAL;
105
-		goto err_golan_return_page_bad_param;
106
-	}
107
-	new_entry = zalloc ( sizeof ( *new_entry ) );
108
-	if ( new_entry == NULL ) {
109
-		rc = -ENOMEM;
110
-		goto err_golan_return_page_alloc_page;
111
-	}
112
-	new_entry->addr = addr;
113
-	list_add_tail( &new_entry->list, head );
114
 
81
 
115
-err_golan_return_page_alloc_page:
116
-err_golan_return_page_bad_param:
82
+	err_golan_init_fw_areas_bad_param:
117
 	return rc;
83
 	return rc;
118
 }
84
 }
85
+
119
 /******************************************************************************/
86
 /******************************************************************************/
120
 
87
 
121
 const char *golan_qp_state_as_string[] = {
88
 const char *golan_qp_state_as_string[] = {
177
 	return sum;
144
 	return sum;
178
 }
145
 }
179
 
146
 
180
-static inline int verify_block_sig(struct golan_cmd_prot_block *block)
181
-{
182
-	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
183
-		return -EINVAL;
184
-
185
-	if (xor8_buf(block, sizeof(*block)) != 0xff)
186
-		return -EINVAL;
187
-	return 0;
188
-}
189
-
190
 static inline const char *cmd_status_str(u8 status)
147
 static inline const char *cmd_status_str(u8 status)
191
 {
148
 {
192
 	switch (status) {
149
 	switch (status) {
258
 	cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
215
 	cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
259
 }
216
 }
260
 
217
 
261
-/**
262
-  * Get Golan FW
263
-  */
264
-static int fw_ver_and_cmdif ( struct golan *golan ) {
265
-	DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
266
-		golan->iseg->fw_rev,
267
-		golan->iseg->cmdif_rev_fw_sub,
268
-		fw_rev_maj ( golan ), fw_rev_min ( golan ),
269
-		fw_rev_sub ( golan ), cmdif_rev ( golan));
270
-
271
-	if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
272
-		DBGC (golan ,"CMDIF %d not supported current is %d\n",
273
-			cmdif_rev ( golan ), PXE_CMDIF_REF);
274
-		return 1;
275
-	}
276
-	return 0;
277
-}
278
-
279
 static inline void show_out_status(uint32_t *out)
218
 static inline void show_out_status(uint32_t *out)
280
 {
219
 {
281
 	DBG("%x\n", be32_to_cpu(out[0]));
220
 	DBG("%x\n", be32_to_cpu(out[0]));
466
 
405
 
467
 	while ( pages > 0 ) {
406
 	while ( pages > 0 ) {
468
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
407
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
469
-		unsigned i;
470
 		struct golan_cmd_layout	*cmd;
408
 		struct golan_cmd_layout	*cmd;
471
 		struct golan_manage_pages_inbox *in;
409
 		struct golan_manage_pages_inbox *in;
472
-		struct golan_manage_pages_outbox_data *out;
473
 
410
 
474
 		size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
411
 		size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
475
 		size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
412
 		size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
485
 		in->num_entries = cpu_to_be32(pas_num);
422
 		in->num_entries = cpu_to_be32(pas_num);
486
 
423
 
487
 		if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
424
 		if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
488
-			out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
489
 			out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
425
 			out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
490
-			for (i = 0; i < out_num_entries; ++i) {
491
-				golan_return_page ( &golan->pages, ( BE64_BUS_2_USR( out->pas[i] ) ) );
492
-			}
493
 		} else {
426
 		} else {
494
 			if ( rc == -EBUSY ) {
427
 			if ( rc == -EBUSY ) {
495
 				DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
428
 				DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
506
 		pages -= out_num_entries;
439
 		pages -= out_num_entries;
507
 	}
440
 	}
508
 	DBGC( golan , "%s Pages handled\n", __FUNCTION__);
441
 	DBGC( golan , "%s Pages handled\n", __FUNCTION__);
509
-	return 0;
442
+	return rc;
510
 }
443
 }
511
 
444
 
512
-static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
445
+static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
446
+		, __be16 func_id,struct golan_firmware_area *fw_area) {
513
 	struct mbox *mailbox;
447
 	struct mbox *mailbox;
514
 	int size_ibox = 0;
448
 	int size_ibox = 0;
515
 	int size_obox = 0;
449
 	int size_obox = 0;
516
 	int rc = 0;
450
 	int rc = 0;
451
+	userptr_t next_page_addr = UNULL;
517
 
452
 
518
 	DBGC(golan, "%s\n", __FUNCTION__);
453
 	DBGC(golan, "%s\n", __FUNCTION__);
519
-
454
+	if ( ! fw_area->area ) {
455
+		fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
456
+		if ( fw_area->area == UNULL ) {
457
+			rc = -ENOMEM;
458
+			DBGC (golan ,"Failed to allocated %d pages \n",pages);
459
+			goto err_golan_alloc_fw_area;
460
+		}
461
+		fw_area->npages = pages;
462
+	}
463
+	assert ( fw_area->npages == pages );
464
+	next_page_addr = fw_area->area;
520
 	while ( pages > 0 ) {
465
 	while ( pages > 0 ) {
521
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
466
 		uint32_t pas_num = min(pages, MAX_PASE_MBOX);
522
 		unsigned i, j;
467
 		unsigned i, j;
538
 		in->func_id 	= func_id; /* Already BE */
483
 		in->func_id 	= func_id; /* Already BE */
539
 		in->num_entries = cpu_to_be32(pas_num);
484
 		in->num_entries = cpu_to_be32(pas_num);
540
 
485
 
541
-		for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
542
-			if ( ! ( addr = golan_get_page ( & golan->pages ) ) ) {
543
-				rc = -ENOMEM;
544
-				DBGC (golan ,"Couldnt allocated page \n");
545
-				goto malloc_dma_failed;
546
-			}
486
+		for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
487
+				next_page_addr += GOLAN_PAGE_SIZE ) {
488
+			addr = next_page_addr;
547
 			if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
489
 			if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
548
 				DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
490
 				DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
549
 			}
491
 			}
563
 						get_cmd( golan , MEM_CMD_IDX )->status_own,
505
 						get_cmd( golan , MEM_CMD_IDX )->status_own,
564
 						be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
506
 						be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
565
 			}
507
 			}
566
-			golan_return_page ( &golan->pages ,addr );
567
 			goto err_send_command;
508
 			goto err_send_command;
568
 		}
509
 		}
569
 	}
510
 	}
571
 	return 0;
512
 	return 0;
572
 
513
 
573
 err_send_command:
514
 err_send_command:
574
-malloc_dma_failed:
515
+err_golan_alloc_fw_area:
575
 	/* Go over In box and free pages */
516
 	/* Go over In box and free pages */
576
 	/* Send Error to FW */
517
 	/* Send Error to FW */
577
 	/* What is next - Disable HCA? */
518
 	/* What is next - Disable HCA? */
609
 	total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
550
 	total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
610
 
551
 
611
 	if ( mode == GOLAN_PAGES_GIVE ) {
552
 	if ( mode == GOLAN_PAGES_GIVE ) {
612
-		rc = golan_provide_pages(golan, total_pages, func_id);
553
+		rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
613
 	} else {
554
 	} else {
614
 		rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
555
 		rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
615
 		golan->total_dma_pages = 0;
556
 		golan->total_dma_pages = 0;
799
 	struct golan_cmd_layout	*cmd;
740
 	struct golan_cmd_layout	*cmd;
800
 	struct golan_create_eq_mbox_out *out;
741
 	struct golan_create_eq_mbox_out *out;
801
 	int rc, i;
742
 	int rc, i;
802
-	userptr_t addr;
803
 
743
 
804
 	eq->cons_index	= 0;
744
 	eq->cons_index	= 0;
805
 	eq->size	= GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
745
 	eq->size	= GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
806
-	addr		= golan_get_page ( &golan->pages );
807
-	if (!addr) {
746
+	eq->eqes	= malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
747
+	if (!eq->eqes) {
808
 		rc = -ENOMEM;
748
 		rc = -ENOMEM;
809
 		goto err_create_eq_eqe_alloc;
749
 		goto err_create_eq_eqe_alloc;
810
 	}
750
 	}
811
-	eq->eqes		= (struct golan_eqe *)user_to_virt(addr, 0);
812
 
751
 
813
 	/* Set EQEs ownership bit to HW ownership */
752
 	/* Set EQEs ownership bit to HW ownership */
814
 	for (i = 0; i < GOLAN_NUM_EQES; ++i) {
753
 	for (i = 0; i < GOLAN_NUM_EQES; ++i) {
823
 	in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
762
 	in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
824
 
763
 
825
 	/* Fill the physical address of the page */
764
 	/* Fill the physical address of the page */
826
-	in->pas[0]		= USR_2_BE64_BUS(addr);
765
+	in->pas[0]		= VIRT_2_BE64_BUS( eq->eqes );
827
 	in->ctx.log_sz_usr_page	= cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
766
 	in->ctx.log_sz_usr_page	= cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
828
 	DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
767
 	DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
829
 	in->events_mask		= cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
768
 	in->events_mask		= cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
842
 	return 0;
781
 	return 0;
843
 
782
 
844
 err_create_eq_cmd:
783
 err_create_eq_cmd:
845
-	golan_return_page ( & golan->pages, virt_to_user ( eq->eqes ) );
784
+	free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
846
 err_create_eq_eqe_alloc:
785
 err_create_eq_eqe_alloc:
847
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
786
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
848
 	return rc;
787
 	return rc;
867
 	rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
806
 	rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
868
 	GOLAN_PRINT_RC_AND_CMD_STATUS;
807
 	GOLAN_PRINT_RC_AND_CMD_STATUS;
869
 
808
 
870
-	golan_return_page ( &golan->pages, virt_to_user ( golan->eq.eqes ) );
809
+	free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
871
 	golan->eq.eqn = 0;
810
 	golan->eq.eqn = 0;
872
 
811
 
873
 	DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
812
 	DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
1016
 	struct golan_create_cq_mbox_out *out;
955
 	struct golan_create_cq_mbox_out *out;
1017
 	int	rc;
956
 	int	rc;
1018
 	unsigned int i;
957
 	unsigned int i;
1019
-	userptr_t addr;
1020
 
958
 
1021
 	golan_cq = zalloc(sizeof(*golan_cq));
959
 	golan_cq = zalloc(sizeof(*golan_cq));
1022
 	if (!golan_cq) {
960
 	if (!golan_cq) {
1031
 		goto err_create_cq_db_alloc;
969
 		goto err_create_cq_db_alloc;
1032
 	}
970
 	}
1033
 
971
 
1034
-	addr = golan_get_page ( &golan->pages );
1035
-	if (!addr) {
972
+	golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
973
+	if (!golan_cq->cqes) {
1036
 		rc = -ENOMEM;
974
 		rc = -ENOMEM;
1037
 		goto err_create_cq_cqe_alloc;
975
 		goto err_create_cq_cqe_alloc;
1038
 	}
976
 	}
1039
-	golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
1040
 
977
 
1041
 	/* Set CQEs ownership bit to HW ownership */
978
 	/* Set CQEs ownership bit to HW ownership */
1042
 	for (i = 0; i < cq->num_cqes; ++i) {
979
 	for (i = 0; i < cq->num_cqes; ++i) {
1053
 	in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
990
 	in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1054
 
991
 
1055
 	/* Fill the physical address of the page */
992
 	/* Fill the physical address of the page */
1056
-	in->pas[0]		= USR_2_BE64_BUS(addr);
993
+	in->pas[0]		= VIRT_2_BE64_BUS( golan_cq->cqes );
1057
 	in->ctx.cqe_sz_flags	= GOLAN_CQE_SIZE_64 << 5;
994
 	in->ctx.cqe_sz_flags	= GOLAN_CQE_SIZE_64 << 5;
1058
 	in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
995
 	in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
1059
 	in->ctx.c_eqn		= cpu_to_be16(golan->eq.eqn);
996
 	in->ctx.c_eqn		= cpu_to_be16(golan->eq.eqn);
1071
 	return 0;
1008
 	return 0;
1072
 
1009
 
1073
 err_create_cq_cmd:
1010
 err_create_cq_cmd:
1074
-	golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
1011
+	free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
1075
 err_create_cq_cqe_alloc:
1012
 err_create_cq_cqe_alloc:
1076
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1013
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1077
 err_create_cq_db_alloc:
1014
 err_create_cq_db_alloc:
1108
 	cq->cqn = 0;
1045
 	cq->cqn = 0;
1109
 
1046
 
1110
 	ib_cq_set_drvdata(cq, NULL);
1047
 	ib_cq_set_drvdata(cq, NULL);
1111
-	golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
1048
+	free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
1112
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1049
 	free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1113
 	free(golan_cq);
1050
 	free(golan_cq);
1114
 
1051
 
1154
 	struct golan_cmd_layout *cmd;
1091
 	struct golan_cmd_layout *cmd;
1155
 	struct golan_wqe_data_seg *data;
1092
 	struct golan_wqe_data_seg *data;
1156
 	struct golan_create_qp_mbox_out *out;
1093
 	struct golan_create_qp_mbox_out *out;
1157
-	userptr_t addr;
1158
 	uint32_t wqe_size_in_bytes;
1094
 	uint32_t wqe_size_in_bytes;
1159
 	uint32_t max_qp_size_in_wqes;
1095
 	uint32_t max_qp_size_in_wqes;
1160
 	unsigned int i;
1096
 	unsigned int i;
1202
 	golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1138
 	golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1203
 
1139
 
1204
 	/* allocate dma memory for WQEs (1 page is enough) - should change it */
1140
 	/* allocate dma memory for WQEs (1 page is enough) - should change it */
1205
-	addr = golan_get_page ( &golan->pages );
1206
-	if (!addr) {
1141
+	golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
1142
+	if (!golan_qp->wqes) {
1207
 		rc = -ENOMEM;
1143
 		rc = -ENOMEM;
1208
 		goto err_create_qp_wqe_alloc;
1144
 		goto err_create_qp_wqe_alloc;
1209
 	}
1145
 	}
1210
-	golan_qp->wqes		= user_to_virt(addr, 0);
1211
 	golan_qp->rq.wqes	= golan_qp->wqes;
1146
 	golan_qp->rq.wqes	= golan_qp->wqes;
1212
 	golan_qp->sq.wqes	= golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1147
 	golan_qp->sq.wqes	= golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1213
 			//(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
1148
 			//(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
1241
 	in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1176
 	in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1242
 
1177
 
1243
 	/* Fill the physical address of the page */
1178
 	/* Fill the physical address of the page */
1244
-	in->pas[0]			= USR_2_BE64_BUS(addr);
1179
+	in->pas[0]			= VIRT_2_BE64_BUS(golan_qp->wqes);
1245
 	in->ctx.qp_counter_set_usr_page	= cpu_to_be32(golan->uar.index);
1180
 	in->ctx.qp_counter_set_usr_page	= cpu_to_be32(golan->uar.index);
1246
 
1181
 
1247
 	in->ctx.flags_pd 	= cpu_to_be32(golan->pdn);
1182
 	in->ctx.flags_pd 	= cpu_to_be32(golan->pdn);
1280
 err_create_qp_cmd:
1215
 err_create_qp_cmd:
1281
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1216
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1282
 err_create_qp_db_alloc:
1217
 err_create_qp_db_alloc:
1283
-	golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
1218
+	free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1284
 err_create_qp_wqe_alloc:
1219
 err_create_qp_wqe_alloc:
1285
 err_create_qp_sq_size:
1220
 err_create_qp_sq_size:
1286
 err_create_qp_sq_wqe_size:
1221
 err_create_qp_sq_wqe_size:
1488
 
1423
 
1489
 	ib_qp_set_drvdata(qp, NULL);
1424
 	ib_qp_set_drvdata(qp, NULL);
1490
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1425
 	free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1491
-	golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
1426
+	free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1492
 	free(golan_qp);
1427
 	free(golan_qp);
1493
 
1428
 
1494
 	DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
1429
 	DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
1526
 	unsigned long			wqe_idx;
1461
 	unsigned long			wqe_idx;
1527
 	struct golan_wqe_data_seg	*data		= NULL;
1462
 	struct golan_wqe_data_seg	*data		= NULL;
1528
 	struct golan_wqe_ctrl_seg	*ctrl		= NULL;
1463
 	struct golan_wqe_ctrl_seg	*ctrl		= NULL;
1529
-//	static uint8_t			toggle		= 0;
1530
 
1464
 
1531
 
1465
 
1532
 	wqe_idx_mask = (qp->send.num_wqes - 1);
1466
 	wqe_idx_mask = (qp->send.num_wqes - 1);
1576
 	golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1510
 	golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1577
 	golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1511
 	golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1578
 	wmb();
1512
 	wmb();
1579
-	writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
1580
-//			((toggle++ & 0x1) ? 0x100 : 0x0));
1513
+	writeq(*((__be64 *)ctrl), golan->uar.virt
1514
+			+ ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
1515
+					: DB_BUFFER0_ODD_OFFSET ) );
1581
 	return 0;
1516
 	return 0;
1582
 }
1517
 }
1583
 
1518
 
1702
 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1637
 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1703
 	struct golan *golan = ib_get_drvdata ( ibdev );
1638
 	struct golan *golan = ib_get_drvdata ( ibdev );
1704
 	struct golan_cmd_layout	*cmd;
1639
 	struct golan_cmd_layout	*cmd;
1705
-	//struct golan_query_hca_vport_pkey_data *pkey_table;
1706
 	struct golan_query_hca_vport_pkey_inbox *in;
1640
 	struct golan_query_hca_vport_pkey_inbox *in;
1707
 	int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1641
 	int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1708
 	int rc;
1642
 	int rc;
1719
 	rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1653
 	rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1720
 	GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1654
 	GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1721
 
1655
 
1722
-	//pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1723
-
1724
 	return 0;
1656
 	return 0;
1725
 err_query_vport_pkey_cmd:
1657
 err_query_vport_pkey_cmd:
1726
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1658
 	DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
2100
 				   cqn, eqe->data.cq_err.syndrome);
2032
 				   cqn, eqe->data.cq_err.syndrome);
2101
 //			mlx5_cq_event(dev, cqn, eqe->type);
2033
 //			mlx5_cq_event(dev, cqn, eqe->type);
2102
 			break;
2034
 			break;
2035
+		/*
2036
+		 * currently the driver do not support dynamic memory request
2037
+		 * during FW run, a follow up change will allocate FW pages once and
2038
+		 * never release them till driver shutdown, this change will not support
2039
+		 * this request as currently this request is not issued anyway.
2103
 		case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2040
 		case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2104
 			{
2041
 			{
2105
-				/* we should check if we get this event while we
2106
-				 * waiting for a command */
2042
+				// we should check if we get this event while we
2043
+				// waiting for a command
2107
 				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2044
 				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2108
 				s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2045
 				s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2109
 
2046
 
2112
 				golan_provide_pages(golan, npages, func_id);
2049
 				golan_provide_pages(golan, npages, func_id);
2113
 			}
2050
 			}
2114
 			break;
2051
 			break;
2052
+		*/
2115
 		default:
2053
 		default:
2116
 			DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2054
 			DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2117
 				   eqe->type, eq->eqn);
2055
 				   eqe->type, eq->eqn);
2231
 
2169
 
2232
 static inline void golan_bring_down(struct golan *golan)
2170
 static inline void golan_bring_down(struct golan *golan)
2233
 {
2171
 {
2234
-
2235
 	DBGC(golan, "%s: start\n", __FUNCTION__);
2172
 	DBGC(golan, "%s: start\n", __FUNCTION__);
2236
 
2173
 
2237
 	if (~golan->flags & GOLAN_OPEN) {
2174
 	if (~golan->flags & GOLAN_OPEN) {
2413
 		goto err_golan_alloc;
2350
 		goto err_golan_alloc;
2414
 	}
2351
 	}
2415
 
2352
 
2416
-	if ( golan_init_pages( &golan->pages ) ) {
2353
+	/* at POST stage some BIOSes have limited available dynamic memory */
2354
+	if ( golan_init_fw_areas ( golan ) ) {
2417
 		rc = -ENOMEM;
2355
 		rc = -ENOMEM;
2418
 		goto err_golan_golan_init_pages;
2356
 		goto err_golan_golan_init_pages;
2419
 	}
2357
 	}
2423
 	golan->pci = pci;
2361
 	golan->pci = pci;
2424
 	golan_pci_init( golan );
2362
 	golan_pci_init( golan );
2425
 	/* config command queues */
2363
 	/* config command queues */
2426
-	if ( fw_ver_and_cmdif( golan ) ) {
2427
-		rc = -1;
2428
-		goto err_fw_ver_cmdif;
2429
-	}
2430
-
2431
 	if ( golan_bring_up( golan ) ) {
2364
 	if ( golan_bring_up( golan ) ) {
2432
 		DBGC (golan ,"golan bringup failed\n");
2365
 		DBGC (golan ,"golan bringup failed\n");
2433
 		rc = -1;
2366
 		rc = -1;
2482
 err_utils_init:
2415
 err_utils_init:
2483
 	golan_bring_down ( golan );
2416
 	golan_bring_down ( golan );
2484
 err_golan_bringup:
2417
 err_golan_bringup:
2485
-err_fw_ver_cmdif:
2486
 	iounmap( golan->iseg );
2418
 	iounmap( golan->iseg );
2487
-	golan_free_pages( &golan->pages );
2419
+	golan_free_fw_areas ( golan );
2488
 err_golan_golan_init_pages:
2420
 err_golan_golan_init_pages:
2489
 	free ( golan );
2421
 	free ( golan );
2490
 err_golan_alloc:
2422
 err_golan_alloc:
2513
 		free_mlx_utils ( & golan->utils );
2445
 		free_mlx_utils ( & golan->utils );
2514
 	}
2446
 	}
2515
 	iounmap( golan->iseg );
2447
 	iounmap( golan->iseg );
2516
-	golan_free_pages( &golan->pages );
2448
+	golan_free_fw_areas ( golan );
2517
 	free(golan);
2449
 	free(golan);
2518
 }
2450
 }
2519
 
2451
 
2528
 			( struct shomron_nodnic_eth_send_wqe * )wqbb;
2460
 			( struct shomron_nodnic_eth_send_wqe * )wqbb;
2529
 	struct shomronprm_wqe_segment_ctrl_send *ctrl;
2461
 	struct shomronprm_wqe_segment_ctrl_send *ctrl;
2530
 
2462
 
2531
-	if ( ! ibdev || ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2463
+	if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2532
 		DBG("%s: Invalid parameters\n",__FUNCTION__);
2464
 		DBG("%s: Invalid parameters\n",__FUNCTION__);
2533
 		status = MLX_FAILED;
2465
 		status = MLX_FAILED;
2534
 		goto err;
2466
 		goto err;
2535
 	}
2467
 	}
2536
 	wmb();
2468
 	wmb();
2537
 	ctrl = & eth_wqe->ctrl;
2469
 	ctrl = & eth_wqe->ctrl;
2538
-	writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt + 0x800);
2470
+	writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
2471
+			( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
2472
+			: DB_BUFFER0_EVEN_OFFSET ) );
2539
 err:
2473
 err:
2540
 	return status;
2474
 	return status;
2541
 }
2475
 }

+ 16
- 0
src/drivers/infiniband/golan.h View File

111
     unsigned long	phys;
111
     unsigned long	phys;
112
 };
112
 };
113
 
113
 
114
+
115
+struct golan_firmware_area {
116
+	/* length of area in pages */
117
+	uint32_t npages;
118
+	/** Firmware area in external memory
119
+	 *
120
+	 * This is allocated when first needed, and freed only on
121
+	 * final teardown, in order to avoid memory map changes at
122
+	 * runtime.
123
+	 */
124
+	userptr_t area;
125
+};
114
 /* Queue Pair */
126
 /* Queue Pair */
115
 #define GOLAN_SEND_WQE_BB_SIZE			64
127
 #define GOLAN_SEND_WQE_BB_SIZE			64
116
 #define GOLAN_SEND_UD_WQE_SIZE			sizeof(struct golan_send_wqe_ud)
128
 #define GOLAN_SEND_UD_WQE_SIZE			sizeof(struct golan_send_wqe_ud)
204
 #define GOLAN_EQE_SIZE				sizeof(struct golan_eqe)
216
 #define GOLAN_EQE_SIZE				sizeof(struct golan_eqe)
205
 #define GOLAN_NUM_EQES 				8
217
 #define GOLAN_NUM_EQES 				8
206
 #define GOLAN_EQ_DOORBELL_OFFSET		0x40
218
 #define GOLAN_EQ_DOORBELL_OFFSET		0x40
219
+#define DB_BUFFER0_EVEN_OFFSET	0x800
220
+#define DB_BUFFER0_ODD_OFFSET	0x900
207
 
221
 
208
 #define GOLAN_EQ_MAP_ALL_EVENTS					\
222
 #define GOLAN_EQ_MAP_ALL_EVENTS					\
209
 	((1 << GOLAN_EVENT_TYPE_PATH_MIG         	)|	\
223
 	((1 << GOLAN_EVENT_TYPE_PATH_MIG         	)|	\
323
 	mlx_utils		*utils;
337
 	mlx_utils		*utils;
324
 
338
 
325
 	struct golan_port		ports[GOLAN_MAX_PORTS];
339
 	struct golan_port		ports[GOLAN_MAX_PORTS];
340
+#define GOLAN_FW_AREAS_NUM 2
341
+	struct golan_firmware_area fw_areas[GOLAN_FW_AREAS_NUM];
326
 };
342
 };
327
 
343
 
328
 #endif /* _GOLAN_H_*/
344
 #endif /* _GOLAN_H_*/

+ 1
- 7
src/drivers/infiniband/mlx_nodnic/src/mlx_device.c View File

169
 	mlx_status 			status = MLX_SUCCESS;
169
 	mlx_status 			status = MLX_SUCCESS;
170
 	mlx_uint32			disable = 1;
170
 	mlx_uint32			disable = 1;
171
 #ifndef DEVICE_CX3
171
 #ifndef DEVICE_CX3
172
-#define NODNIC_CLEAR_INT_BAR_OFFSET 0x100C
173
-	if ( device_priv->device_cap.support_bar_cq_ctrl ) {
174
-		status = mlx_pci_mem_write ( device_priv->utils, MlxPciWidthUint32, 0,
175
-			( mlx_uint64 ) ( NODNIC_CLEAR_INT_BAR_OFFSET ), 1, &disable );
176
-	} else {
177
-		status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
178
-	}
172
+	status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
179
 	MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
173
 	MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
180
 #else
174
 #else
181
 	mlx_utils *utils = device_priv->utils;
175
 	mlx_utils *utils = device_priv->utils;

+ 36
- 2
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c View File

20
 FILE_LICENCE ( GPL2_OR_LATER );
20
 FILE_LICENCE ( GPL2_OR_LATER );
21
 
21
 
22
 #include "mlx_mtu.h"
22
 #include "mlx_mtu.h"
23
-#include "mlx_memory.h"
24
-#include "mlx_bail.h"
23
+#include "../../include/public/mlx_memory.h"
24
+#include "../../include/public/mlx_bail.h"
25
 
25
 
26
 mlx_status
26
 mlx_status
27
 mlx_get_max_mtu(
27
 mlx_get_max_mtu(
58
 bad_param:
58
 bad_param:
59
 	return status;
59
 	return status;
60
 }
60
 }
61
+
62
+mlx_status
63
+mlx_set_admin_mtu(
64
+		IN mlx_utils 	*utils,
65
+		IN mlx_uint8 	port_num,
66
+		IN mlx_uint32 	admin_mtu
67
+		)
68
+{
69
+	mlx_status status = MLX_SUCCESS;
70
+	struct mlx_mtu mtu;
71
+	mlx_uint32 reg_status;
72
+
73
+	if (utils == NULL) {
74
+		status = MLX_INVALID_PARAMETER;
75
+		goto bad_param;
76
+	}
77
+
78
+	mlx_memory_set(utils, &mtu, 0, sizeof(mtu));
79
+
80
+	mtu.local_port = port_num;
81
+	mtu.admin_mtu = admin_mtu;
82
+
83
+	status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_WRITE, &mtu,
84
+			sizeof(mtu), &reg_status);
85
+	MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
86
+	if (reg_status != 0) {
87
+		MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
88
+		status = MLX_FAILED;
89
+		goto reg_err;
90
+	}
91
+reg_err:
92
+bad_param:
93
+	return status;
94
+}

+ 8
- 2
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h View File

22
 
22
 
23
 FILE_LICENCE ( GPL2_OR_LATER );
23
 FILE_LICENCE ( GPL2_OR_LATER );
24
 
24
 
25
-#include "mlx_reg_access.h"
26
-#include "mlx_utils.h"
25
+#include "../../include/public/mlx_utils.h"
26
+#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
27
 
27
 
28
 #define BYTE_TO_BIT	0x8
28
 #define BYTE_TO_BIT	0x8
29
 
29
 
49
 		OUT mlx_uint32 	*max_mtu
49
 		OUT mlx_uint32 	*max_mtu
50
 		);
50
 		);
51
 
51
 
52
+mlx_status
53
+mlx_set_admin_mtu(
54
+		IN mlx_utils 	*utils,
55
+		IN mlx_uint8 	port_num,
56
+		IN mlx_uint32 	admin_mtu
57
+		);
52
 #endif /* MLX_MTU_H_ */
58
 #endif /* MLX_MTU_H_ */

+ 4
- 4
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c View File

39
 		TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE),
39
 		TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE),
40
 		TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE),
40
 		TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE),
41
 		TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE),
41
 		TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE),
42
-		TlvMappingEntry(0x2020, 0x2020, NVRAM_TLV_CLASS_PHYSICAL_PORT, FALSE),
43
 		TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE),
42
 		TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE),
44
 		TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE),
43
 		TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE),
45
 		TlvMappingEntry(0x2006, 0x206, NVRAM_TLV_CLASS_HOST, FALSE),
44
 		TlvMappingEntry(0x2006, 0x206, NVRAM_TLV_CLASS_HOST, FALSE),
67
 		TlvMappingEntry(0x110, 0x110, NVRAM_TLV_CLASS_HOST, FALSE),
66
 		TlvMappingEntry(0x110, 0x110, NVRAM_TLV_CLASS_HOST, FALSE),
68
 		TlvMappingEntry(0x192, 0x192, NVRAM_TLV_CLASS_GLOBAL, FALSE),
67
 		TlvMappingEntry(0x192, 0x192, NVRAM_TLV_CLASS_GLOBAL, FALSE),
69
 		TlvMappingEntry(0x101, 0x101, NVRAM_TLV_CLASS_GLOBAL, TRUE),
68
 		TlvMappingEntry(0x101, 0x101, NVRAM_TLV_CLASS_GLOBAL, TRUE),
69
+		TlvMappingEntry(0x194, 0x194, NVRAM_TLV_CLASS_GLOBAL, FALSE),
70
 		TlvMappingEntry(0, 0, 0, 0),
70
 		TlvMappingEntry(0, 0, 0, 0),
71
 };
71
 };
72
 
72
 
239
 		IN REG_ACCESS_OPT opt,
239
 		IN REG_ACCESS_OPT opt,
240
 		IN mlx_size data_size,
240
 		IN mlx_size data_size,
241
 		IN NV_DEFAULT_OPT def_en,
241
 		IN NV_DEFAULT_OPT def_en,
242
+		IN NVDA_WRITER_ID writer_id,
242
 		IN OUT mlx_uint8 *version,
243
 		IN OUT mlx_uint8 *version,
243
 		IN OUT mlx_void *data
244
 		IN OUT mlx_void *data
244
 		)
245
 		)
263
 	data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32);
264
 	data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32);
264
 	mlx_memory_set(utils, &nvda, 0, sizeof(nvda));
265
 	mlx_memory_set(utils, &nvda, 0, sizeof(nvda));
265
 	nvda.nv_header.length = data_size_align_to_dword;
266
 	nvda.nv_header.length = data_size_align_to_dword;
266
-	nvda.nv_header.rd_en = 0;
267
-	nvda.nv_header.def_en = def_en;
268
-	nvda.nv_header.over_en = 1;
267
+	nvda.nv_header.access_mode = def_en;
269
 	nvda.nv_header.version = *version;
268
 	nvda.nv_header.version = *version;
269
+	nvda.nv_header.writer_id = writer_id;
270
 
270
 
271
 	nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type);
271
 	nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type);
272
 
272
 

+ 23
- 13
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h View File

31
 	NVRAM_TLV_CLASS_HOST = 3,
31
 	NVRAM_TLV_CLASS_HOST = 3,
32
 } NVRAM_CLASS_CODE;
32
 } NVRAM_CLASS_CODE;
33
 
33
 
34
+typedef enum {
35
+	NVDA_NV_HEADER_WRITER_ID_UEFI_HII  = 0x6,
36
+	NVDA_NV_HEADER_WRITER_ID_FLEXBOOT = 0x8,
37
+} NVDA_WRITER_ID;
38
+
39
+typedef enum {
40
+  TLV_ACCESS_DEFAULT_DIS = 0,
41
+  TLV_ACCESS_CURRENT = 1,
42
+  TLV_ACCESS_DEFAULT_EN = 2,
43
+} NV_DEFAULT_OPT;
44
+
34
 struct nvconfig_tlv_type_per_port {
45
 struct nvconfig_tlv_type_per_port {
35
 	 mlx_uint32 param_idx	:16;
46
 	 mlx_uint32 param_idx	:16;
36
 	 mlx_uint32 port		:8;
47
 	 mlx_uint32 port		:8;
78
 	 mlx_uint32 length		:9; /*Size of configuration item data in bytes between 0..256 */
89
 	 mlx_uint32 length		:9; /*Size of configuration item data in bytes between 0..256 */
79
 	 mlx_uint32 reserved0	:3;
90
 	 mlx_uint32 reserved0	:3;
80
 	 mlx_uint32 version		:4; /* Configuration item version */
91
 	 mlx_uint32 version		:4; /* Configuration item version */
81
-	 mlx_uint32 reserved1	:7;
82
-
83
-	 mlx_uint32 def_en		:1; /*Choose whether to access the default value or the user-defined value.
84
-									0x0 Read or write the user-defined value.
85
-									0x1 Read the default value (only valid for reads).*/
86
-
87
-	 mlx_uint32 rd_en		:1; /*enables reading the TLV by lower priorities
88
-									0 - TLV can be read by the subsequent lifecycle priorities.
89
-									1 - TLV cannot be read by the subsequent lifecycle priorities. */
90
-	 mlx_uint32 over_en		:1; /*enables overwriting the TLV by lower priorities
91
-									0 - Can only be overwritten by the current lifecycle priority
92
-									1 - Allowed to be overwritten by subsequent lifecycle priorities */
92
+	 mlx_uint32 writer_id	:5;
93
+	 mlx_uint32 reserved1	:1;
94
+
95
+	 mlx_uint32 access_mode	:2; /*Defines which value of the Configuration Item will be accessed.
96
+								0x0: NEXT - Next value to be applied
97
+								0x1: CURRENT - Currently set values (only valid for Query operation) Supported only if NVGC.nvda_read_current_settings==1.
98
+								0x2: FACTORY - Default factory values (only valid for Query operation). Supported only if NVGC.nvda_read_factory_settings==1.*/
99
+
100
+	 mlx_uint32 reserved2	:2;
93
 	 mlx_uint32 header_type	:2;
101
 	 mlx_uint32 header_type	:2;
94
-	 mlx_uint32 priority		:2;
102
+	 mlx_uint32 reserved3	:2;
95
 	 mlx_uint32 valid	:2;
103
 	 mlx_uint32 valid	:2;
96
 /* -------------- */
104
 /* -------------- */
97
 	 union nvconfig_tlv_type tlv_type;;
105
 	 union nvconfig_tlv_type tlv_type;;
98
 /* -------------- */
106
 /* -------------- */
99
 	mlx_uint32 crc			:16;
107
 	mlx_uint32 crc			:16;
100
 	mlx_uint32 reserved		:16;
108
 	mlx_uint32 reserved		:16;
109
+
101
 };
110
 };
102
 
111
 
103
 #define NVCONFIG_MAX_TLV_SIZE 256
112
 #define NVCONFIG_MAX_TLV_SIZE 256
149
 		IN REG_ACCESS_OPT opt,
158
 		IN REG_ACCESS_OPT opt,
150
 		IN mlx_size data_size,
159
 		IN mlx_size data_size,
151
 		IN NV_DEFAULT_OPT def_en,
160
 		IN NV_DEFAULT_OPT def_en,
161
+		IN NVDA_WRITER_ID writer_id,
152
 		IN OUT mlx_uint8 *version,
162
 		IN OUT mlx_uint8 *version,
153
 		IN OUT mlx_void *data
163
 		IN OUT mlx_void *data
154
 		);
164
 		);

+ 14
- 3
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c View File

386
 	mlx_uint8 version = 0;
386
 	mlx_uint8 version = 0;
387
 
387
 
388
 	status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ,
388
 	status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ,
389
-			data_size, TLV_ACCESS_DEFAULT_EN, &version, data);
389
+			data_size, TLV_ACCESS_DEFAULT_EN, 0,
390
+			&version, data);
390
 	MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
391
 	MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
391
 				"nvconfig_nvdata_access failed ");
392
 				"nvconfig_nvdata_access failed ");
392
 	for (index = 0; index * 4 < data_size; index++) {
393
 	for (index = 0; index * 4 < data_size; index++) {
493
 		)
494
 		)
494
 {
495
 {
495
 	mlx_status status = MLX_SUCCESS;
496
 	mlx_status status = MLX_SUCCESS;
497
+	mlx_uint8 version = 0;
498
+	mlx_uint32 index;
496
 
499
 
497
 	if (utils == NULL || rom_ini == NULL) {
500
 	if (utils == NULL || rom_ini == NULL) {
498
 		status = MLX_INVALID_PARAMETER;
501
 		status = MLX_INVALID_PARAMETER;
501
 	}
504
 	}
502
 	mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini));
505
 	mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini));
503
 
506
 
504
-	status = nvconfig_nvdata_default_access(utils, 0, GLOBAL_ROM_INI_TYPE,
505
-			sizeof(*rom_ini), rom_ini);
507
+	status = nvconfig_nvdata_access(utils, 0, GLOBAL_ROM_INI_TYPE, REG_ACCESS_READ,
508
+			sizeof(*rom_ini), TLV_ACCESS_DEFAULT_DIS, 0,
509
+			&version, rom_ini);
510
+	MLX_CHECK_STATUS(NULL, status, bad_param,
511
+				"nvconfig_nvdata_access failed ");
512
+	for (index = 0; index * 4 < sizeof(*rom_ini); index++) {
513
+		mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) rom_ini)[index]),
514
+				((mlx_uint32 *) rom_ini) + index);
515
+	}
516
+
506
 bad_param:
517
 bad_param:
507
 	return status;
518
 	return status;
508
 }
519
 }

+ 0
- 145
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c View File

1
-/*
2
- * Copyright (C) 2015 Mellanox Technologies Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License as
6
- * published by the Free Software Foundation; either version 2 of the
7
- * License, or any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
- * 02110-1301, USA.
18
- */
19
-
20
-FILE_LICENCE ( GPL2_OR_LATER );
21
-
22
-#include "mlx_ocbb.h"
23
-#include "mlx_icmd.h"
24
-#include "mlx_bail.h"
25
-
26
-mlx_status
27
-mlx_ocbb_init (
28
-	IN mlx_utils *utils,
29
-	IN mlx_uint64 address
30
-	)
31
-{
32
-	mlx_status status = MLX_SUCCESS;
33
-	struct mlx_ocbb_init ocbb_init;
34
-	ocbb_init.address_hi = (mlx_uint32)(address >> 32);
35
-	ocbb_init.address_lo = (mlx_uint32)address;
36
-
37
-	if (utils == NULL) {
38
-		status = MLX_INVALID_PARAMETER;
39
-		goto bad_param;
40
-	}
41
-
42
-	status = mlx_icmd_send_command(
43
-			utils,
44
-			OCBB_INIT,
45
-			&ocbb_init,
46
-			sizeof(ocbb_init),
47
-			0
48
-			);
49
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
50
-icmd_err:
51
-bad_param:
52
-	return status;
53
-}
54
-
55
-mlx_status
56
-mlx_ocbb_query_header_status (
57
-	IN mlx_utils *utils,
58
-	OUT mlx_uint8 *ocbb_status
59
-	)
60
-{
61
-	mlx_status status = MLX_SUCCESS;
62
-	struct mlx_ocbb_query_status ocbb_query_status;
63
-
64
-	if (utils == NULL) {
65
-		status = MLX_INVALID_PARAMETER;
66
-		goto bad_param;
67
-	}
68
-
69
-	status = mlx_icmd_send_command(
70
-			utils,
71
-			OCBB_QUERY_HEADER_STATUS,
72
-			&ocbb_query_status,
73
-			0,
74
-			sizeof(ocbb_query_status)
75
-			);
76
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
77
-	*ocbb_status = ocbb_query_status.status;
78
-icmd_err:
79
-bad_param:
80
-	return status;
81
-}
82
-
83
-mlx_status
84
-mlx_ocbb_query_etoc_status (
85
-	IN mlx_utils *utils,
86
-	OUT mlx_uint8 *ocbb_status
87
-	)
88
-{
89
-	mlx_status status = MLX_SUCCESS;
90
-	struct mlx_ocbb_query_status ocbb_query_status;
91
-
92
-	if (utils == NULL) {
93
-		status = MLX_INVALID_PARAMETER;
94
-		goto bad_param;
95
-	}
96
-
97
-	status = mlx_icmd_send_command(
98
-			utils,
99
-			OCBB_QUERY_ETOC_STATUS,
100
-			&ocbb_query_status,
101
-			0,
102
-			sizeof(ocbb_query_status)
103
-			);
104
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
105
-	*ocbb_status = ocbb_query_status.status;
106
-icmd_err:
107
-bad_param:
108
-	return status;
109
-}
110
-
111
-mlx_status
112
-mlx_ocbb_set_event (
113
-	IN mlx_utils *utils,
114
-	IN mlx_uint64			event_data,
115
-	IN mlx_uint8			event_number,
116
-	IN mlx_uint8			event_length,
117
-	IN mlx_uint8			data_length,
118
-	IN mlx_uint8			data_start_offset
119
-	)
120
-{
121
-	mlx_status status = MLX_SUCCESS;
122
-	struct mlx_ocbb_set_event ocbb_event;
123
-
124
-	if (utils == NULL) {
125
-		status = MLX_INVALID_PARAMETER;
126
-		goto bad_param;
127
-	}
128
-
129
-	ocbb_event.data_length = data_length;
130
-	ocbb_event.data_start_offset = data_start_offset;
131
-	ocbb_event.event_number = event_number;
132
-	ocbb_event.event_data = event_data;
133
-	ocbb_event.event_length = event_length;
134
-	status = mlx_icmd_send_command(
135
-			utils,
136
-			OCBB_QUERY_SET_EVENT,
137
-			&ocbb_event,
138
-			sizeof(ocbb_event),
139
-			0
140
-			);
141
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
142
-icmd_err:
143
-bad_param:
144
-	return status;
145
-}

+ 0
- 73
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h View File

1
-#ifndef MLX_OCBB_H_
2
-#define MLX_OCBB_H_
3
-
4
-/*
5
- * Copyright (C) 2015 Mellanox Technologies Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License as
9
- * published by the Free Software Foundation; either version 2 of the
10
- * License, or any later version.
11
- *
12
- * This program is distributed in the hope that it will be useful, but
13
- * WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
- * 02110-1301, USA.
21
- */
22
-
23
-FILE_LICENCE ( GPL2_OR_LATER );
24
-
25
-#include "mlx_utils.h"
26
-
27
-#define MLX_OCBB_EVENT_DATA_SIZE 2
28
-struct mlx_ocbb_init {
29
-	mlx_uint32 address_hi;
30
-	mlx_uint32 address_lo;
31
-};
32
-
33
-struct mlx_ocbb_query_status {
34
-	mlx_uint32 reserved	:24;
35
-	mlx_uint32 status	:8;
36
-};
37
-
38
-struct mlx_ocbb_set_event {
39
-	mlx_uint64 event_data;
40
-	mlx_uint32 event_number	:8;
41
-	mlx_uint32 event_length	:8;
42
-	mlx_uint32 data_length	:8;
43
-	mlx_uint32 data_start_offset	:8;
44
-};
45
-
46
-mlx_status
47
-mlx_ocbb_init (
48
-	IN mlx_utils *utils,
49
-	IN mlx_uint64 address
50
-	);
51
-
52
-mlx_status
53
-mlx_ocbb_query_header_status (
54
-	IN mlx_utils *utils,
55
-	OUT mlx_uint8 *ocbb_status
56
-	);
57
-
58
-mlx_status
59
-mlx_ocbb_query_etoc_status (
60
-	IN mlx_utils *utils,
61
-	OUT mlx_uint8 *ocbb_status
62
-	);
63
-
64
-mlx_status
65
-mlx_ocbb_set_event (
66
-	IN mlx_utils *utils,
67
-	IN mlx_uint64			EventData,
68
-	IN mlx_uint8			EventNumber,
69
-	IN mlx_uint8			EventLength,
70
-	IN mlx_uint8			DataLength,
71
-	IN mlx_uint8			DataStartOffset
72
-	);
73
-#endif /* MLX_OCBB_H_ */

+ 0
- 5
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h View File

31
   REG_ACCESS_WRITE = 2,
31
   REG_ACCESS_WRITE = 2,
32
 } REG_ACCESS_OPT;
32
 } REG_ACCESS_OPT;
33
 
33
 
34
-typedef enum {
35
-  TLV_ACCESS_DEFAULT_DIS = 0,
36
-  TLV_ACCESS_DEFAULT_EN = 1,
37
-} NV_DEFAULT_OPT;
38
-
39
 #define REG_ID_NVDA  0x9024
34
 #define REG_ID_NVDA  0x9024
40
 #define REG_ID_NVDI  0x9025
35
 #define REG_ID_NVDI  0x9025
41
 #define REG_ID_NVIA 0x9029
36
 #define REG_ID_NVIA 0x9029

+ 0
- 84
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c View File

1
-/*
2
- * Copyright (C) 2015 Mellanox Technologies Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License as
6
- * published by the Free Software Foundation; either version 2 of the
7
- * License, or any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but
10
- * WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
- * General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17
- * 02110-1301, USA.
18
- */
19
-
20
-FILE_LICENCE ( GPL2_OR_LATER );
21
-
22
-#include "mlx_wol_rol.h"
23
-#include "mlx_icmd.h"
24
-#include "mlx_memory.h"
25
-#include "mlx_bail.h"
26
-
27
-mlx_status
28
-mlx_set_wol (
29
-	IN mlx_utils *utils,
30
-	IN mlx_uint8 wol_mask
31
-	)
32
-{
33
-	mlx_status status = MLX_SUCCESS;
34
-	struct mlx_wol_rol wol_rol;
35
-
36
-	if (utils == NULL) {
37
-		status = MLX_INVALID_PARAMETER;
38
-		goto bad_param;
39
-	}
40
-
41
-	mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
42
-	wol_rol.wol_mode_valid = TRUE;
43
-	wol_rol.wol_mode = wol_mask;
44
-	status = mlx_icmd_send_command(
45
-			utils,
46
-			SET_WOL_ROL,
47
-			&wol_rol,
48
-			sizeof(wol_rol),
49
-			0
50
-			);
51
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
52
-icmd_err:
53
-bad_param:
54
-	return status;
55
-}
56
-
57
-mlx_status
58
-mlx_query_wol (
59
-	IN mlx_utils *utils,
60
-	OUT mlx_uint8 *wol_mask
61
-	)
62
-{
63
-	mlx_status status = MLX_SUCCESS;
64
-	struct mlx_wol_rol wol_rol;
65
-
66
-	if (utils == NULL || wol_mask == NULL) {
67
-		status = MLX_INVALID_PARAMETER;
68
-		goto bad_param;
69
-	}
70
-
71
-	mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
72
-	status = mlx_icmd_send_command(
73
-			utils,
74
-			QUERY_WOL_ROL,
75
-			&wol_rol,
76
-			0,
77
-			sizeof(wol_rol)
78
-			);
79
-	MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
80
-	*wol_mask = wol_rol.wol_mode;
81
-icmd_err:
82
-bad_param:
83
-	return status;
84
-}

+ 0
- 61
src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h View File

1
-#ifndef MLX_WOL_ROL_H_
2
-#define MLX_WOL_ROL_H_
3
-
4
-/*
5
- * Copyright (C) 2015 Mellanox Technologies Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License as
9
- * published by the Free Software Foundation; either version 2 of the
10
- * License, or any later version.
11
- *
12
- * This program is distributed in the hope that it will be useful, but
13
- * WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
- * 02110-1301, USA.
21
- */
22
-
23
-FILE_LICENCE ( GPL2_OR_LATER );
24
-
25
-
26
-#include "mlx_utils.h"
27
-
28
-typedef enum {
29
-	WOL_MODE_DISABLE = 0x0,
30
-	WOL_MODE_SECURE = 0x2,
31
-	WOL_MODE_MAGIC = 0x4,
32
-	WOL_MODE_ARP = 0x8,
33
-	WOL_MODE_BC = 0x10,
34
-	WOL_MODE_MC = 0x20,
35
-	WOL_MODE_UC = 0x40,
36
-	WOL_MODE_PHY = 0x80,
37
-} WOL_MODE;
38
-
39
-struct mlx_wol_rol {
40
-	mlx_uint32 reserved0	:32;
41
-	mlx_uint32 reserved1	:32;
42
-	mlx_uint32 wol_mode		:8;
43
-	mlx_uint32 rol_mode		:8;
44
-	mlx_uint32 reserved3	:14;
45
-	mlx_uint32 wol_mode_valid	:1;
46
-	mlx_uint32 rol_mode_valid	:1;
47
-};
48
-
49
-mlx_status
50
-mlx_set_wol (
51
-	IN mlx_utils *utils,
52
-	IN mlx_uint8 wol_mask
53
-	);
54
-
55
-mlx_status
56
-mlx_query_wol (
57
-	IN mlx_utils *utils,
58
-	OUT mlx_uint8 *wol_mask
59
-	);
60
-
61
-#endif /* MLX_WOL_ROL_H_ */

+ 0
- 9
src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c View File

1
-MlxDebugLogImpl()
2
-		{
3
-	DBGC((DEBUG),"");
4
-		}
5
-MlxInfoLogImpl()
6
-{
7
-	DBGC((INFO),"");
8
-			}
9
-}

+ 1
- 1
src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c View File

107
 		status = MLX_INVALID_PARAMETER;
107
 		status = MLX_INVALID_PARAMETER;
108
 		goto bail;
108
 		goto bail;
109
 	}
109
 	}
110
-	status = mlx_pci_mem_read_priv(utils, bar_index, width, offset, count, buffer);
110
+	status = mlx_pci_mem_read_priv(utils, width,bar_index, offset, count, buffer);
111
 bail:
111
 bail:
112
 	return status;
112
 	return status;
113
 }
113
 }

Loading…
Cancel
Save