|
@@ -42,80 +42,47 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
42
|
42
|
#include "mlx_utils/include/public/mlx_bail.h"
|
43
|
43
|
#include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
|
44
|
44
|
|
|
45
|
+
|
45
|
46
|
#define DEVICE_IS_CIB( device ) ( device == 0x1011 )
|
|
47
|
+
|
46
|
48
|
/******************************************************************************/
|
47
|
49
|
/************* Very simple memory management for umalloced pages **************/
|
48
|
50
|
/******* Temporary solution until full memory management is implemented *******/
|
49
|
51
|
/******************************************************************************/
|
|
52
|
+
|
50
|
53
|
struct golan_page {
|
51
|
54
|
struct list_head list;
|
52
|
55
|
userptr_t addr;
|
53
|
56
|
};
|
54
|
57
|
|
55
|
|
-static void golan_free_pages ( struct list_head *head ) {
|
56
|
|
- struct golan_page *page, *tmp;
|
57
|
|
- list_for_each_entry_safe ( page, tmp, head, list ) {
|
58
|
|
- list_del ( &page->list );
|
59
|
|
- ufree ( page->addr );
|
60
|
|
- free ( page );
|
|
58
|
+static void golan_free_fw_areas ( struct golan *golan ) {
|
|
59
|
+ int i;
|
|
60
|
+
|
|
61
|
+ for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
|
|
62
|
+ if ( golan->fw_areas[i].area ) {
|
|
63
|
+ ufree ( golan->fw_areas[i].area );
|
|
64
|
+ golan->fw_areas[i].area = UNULL;
|
|
65
|
+ }
|
61
|
66
|
}
|
62
|
67
|
}
|
63
|
68
|
|
64
|
|
-static int golan_init_pages ( struct list_head *head ) {
|
65
|
|
- int rc = 0;
|
|
69
|
+static int golan_init_fw_areas ( struct golan *golan ) {
|
|
70
|
+ int rc = 0, i = 0;
|
66
|
71
|
|
67
|
|
- if ( !head ) {
|
|
72
|
+ if ( ! golan ) {
|
68
|
73
|
rc = -EINVAL;
|
69
|
|
- goto err_golan_init_pages_bad_param;
|
|
74
|
+ goto err_golan_init_fw_areas_bad_param;
|
70
|
75
|
}
|
71
|
76
|
|
72
|
|
- INIT_LIST_HEAD ( head );
|
73
|
|
- return rc;
|
|
77
|
+ for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
|
|
78
|
+ golan->fw_areas[i].area = UNULL;
|
74
|
79
|
|
75
|
|
-err_golan_init_pages_bad_param:
|
76
|
80
|
return rc;
|
77
|
|
-}
|
78
|
|
-
|
79
|
|
-static userptr_t golan_get_page ( struct list_head *head ) {
|
80
|
|
- struct golan_page *page;
|
81
|
|
- userptr_t addr;
|
82
|
|
-
|
83
|
|
- if ( list_empty ( head ) ) {
|
84
|
|
- addr = umalloc ( GOLAN_PAGE_SIZE );
|
85
|
|
- if ( addr == UNULL ) {
|
86
|
|
- goto err_golan_iget_page_alloc_page;
|
87
|
|
- }
|
88
|
|
- } else {
|
89
|
|
- page = list_first_entry ( head, struct golan_page, list );
|
90
|
|
- list_del ( &page->list );
|
91
|
|
- addr = page->addr;
|
92
|
|
- free ( page );
|
93
|
|
- }
|
94
|
|
-err_golan_iget_page_alloc_page:
|
95
|
|
- return addr;
|
96
|
|
-}
|
97
|
|
-
|
98
|
|
-static int golan_return_page ( struct list_head *head,
|
99
|
|
- userptr_t addr ) {
|
100
|
|
- struct golan_page *new_entry;
|
101
|
|
- int rc = 0;
|
102
|
|
-
|
103
|
|
- if ( ! head ) {
|
104
|
|
- rc = -EINVAL;
|
105
|
|
- goto err_golan_return_page_bad_param;
|
106
|
|
- }
|
107
|
|
- new_entry = zalloc ( sizeof ( *new_entry ) );
|
108
|
|
- if ( new_entry == NULL ) {
|
109
|
|
- rc = -ENOMEM;
|
110
|
|
- goto err_golan_return_page_alloc_page;
|
111
|
|
- }
|
112
|
|
- new_entry->addr = addr;
|
113
|
|
- list_add_tail( &new_entry->list, head );
|
114
|
81
|
|
115
|
|
-err_golan_return_page_alloc_page:
|
116
|
|
-err_golan_return_page_bad_param:
|
|
82
|
+ err_golan_init_fw_areas_bad_param:
|
117
|
83
|
return rc;
|
118
|
84
|
}
|
|
85
|
+
|
119
|
86
|
/******************************************************************************/
|
120
|
87
|
|
121
|
88
|
const char *golan_qp_state_as_string[] = {
|
|
@@ -177,16 +144,6 @@ static inline u8 xor8_buf(void *buf, int len)
|
177
|
144
|
return sum;
|
178
|
145
|
}
|
179
|
146
|
|
180
|
|
-static inline int verify_block_sig(struct golan_cmd_prot_block *block)
|
181
|
|
-{
|
182
|
|
- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
|
183
|
|
- return -EINVAL;
|
184
|
|
-
|
185
|
|
- if (xor8_buf(block, sizeof(*block)) != 0xff)
|
186
|
|
- return -EINVAL;
|
187
|
|
- return 0;
|
188
|
|
-}
|
189
|
|
-
|
190
|
147
|
static inline const char *cmd_status_str(u8 status)
|
191
|
148
|
{
|
192
|
149
|
switch (status) {
|
|
@@ -258,24 +215,6 @@ static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
|
258
|
215
|
cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
|
259
|
216
|
}
|
260
|
217
|
|
261
|
|
-/**
|
262
|
|
- * Get Golan FW
|
263
|
|
- */
|
264
|
|
-static int fw_ver_and_cmdif ( struct golan *golan ) {
|
265
|
|
- DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
|
266
|
|
- golan->iseg->fw_rev,
|
267
|
|
- golan->iseg->cmdif_rev_fw_sub,
|
268
|
|
- fw_rev_maj ( golan ), fw_rev_min ( golan ),
|
269
|
|
- fw_rev_sub ( golan ), cmdif_rev ( golan));
|
270
|
|
-
|
271
|
|
- if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
|
272
|
|
- DBGC (golan ,"CMDIF %d not supported current is %d\n",
|
273
|
|
- cmdif_rev ( golan ), PXE_CMDIF_REF);
|
274
|
|
- return 1;
|
275
|
|
- }
|
276
|
|
- return 0;
|
277
|
|
-}
|
278
|
|
-
|
279
|
218
|
static inline void show_out_status(uint32_t *out)
|
280
|
219
|
{
|
281
|
220
|
DBG("%x\n", be32_to_cpu(out[0]));
|
|
@@ -466,10 +405,8 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
466
|
405
|
|
467
|
406
|
while ( pages > 0 ) {
|
468
|
407
|
uint32_t pas_num = min(pages, MAX_PASE_MBOX);
|
469
|
|
- unsigned i;
|
470
|
408
|
struct golan_cmd_layout *cmd;
|
471
|
409
|
struct golan_manage_pages_inbox *in;
|
472
|
|
- struct golan_manage_pages_outbox_data *out;
|
473
|
410
|
|
474
|
411
|
size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
|
475
|
412
|
size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
|
|
@@ -485,11 +422,7 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
485
|
422
|
in->num_entries = cpu_to_be32(pas_num);
|
486
|
423
|
|
487
|
424
|
if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
|
488
|
|
- out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
|
489
|
425
|
out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
|
490
|
|
- for (i = 0; i < out_num_entries; ++i) {
|
491
|
|
- golan_return_page ( &golan->pages, ( BE64_BUS_2_USR( out->pas[i] ) ) );
|
492
|
|
- }
|
493
|
426
|
} else {
|
494
|
427
|
if ( rc == -EBUSY ) {
|
495
|
428
|
DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
|
|
@@ -506,17 +439,29 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
506
|
439
|
pages -= out_num_entries;
|
507
|
440
|
}
|
508
|
441
|
DBGC( golan , "%s Pages handled\n", __FUNCTION__);
|
509
|
|
- return 0;
|
|
442
|
+ return rc;
|
510
|
443
|
}
|
511
|
444
|
|
512
|
|
-static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
|
|
445
|
+static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
|
|
446
|
+ , __be16 func_id,struct golan_firmware_area *fw_area) {
|
513
|
447
|
struct mbox *mailbox;
|
514
|
448
|
int size_ibox = 0;
|
515
|
449
|
int size_obox = 0;
|
516
|
450
|
int rc = 0;
|
|
451
|
+ userptr_t next_page_addr = UNULL;
|
517
|
452
|
|
518
|
453
|
DBGC(golan, "%s\n", __FUNCTION__);
|
519
|
|
-
|
|
454
|
+ if ( ! fw_area->area ) {
|
|
455
|
+ fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
|
|
456
|
+ if ( fw_area->area == UNULL ) {
|
|
457
|
+ rc = -ENOMEM;
|
|
458
|
+ DBGC (golan ,"Failed to allocated %d pages \n",pages);
|
|
459
|
+ goto err_golan_alloc_fw_area;
|
|
460
|
+ }
|
|
461
|
+ fw_area->npages = pages;
|
|
462
|
+ }
|
|
463
|
+ assert ( fw_area->npages == pages );
|
|
464
|
+ next_page_addr = fw_area->area;
|
520
|
465
|
while ( pages > 0 ) {
|
521
|
466
|
uint32_t pas_num = min(pages, MAX_PASE_MBOX);
|
522
|
467
|
unsigned i, j;
|
|
@@ -538,12 +483,9 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
538
|
483
|
in->func_id = func_id; /* Already BE */
|
539
|
484
|
in->num_entries = cpu_to_be32(pas_num);
|
540
|
485
|
|
541
|
|
- for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
|
542
|
|
- if ( ! ( addr = golan_get_page ( & golan->pages ) ) ) {
|
543
|
|
- rc = -ENOMEM;
|
544
|
|
- DBGC (golan ,"Couldnt allocated page \n");
|
545
|
|
- goto malloc_dma_failed;
|
546
|
|
- }
|
|
486
|
+ for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
|
|
487
|
+ next_page_addr += GOLAN_PAGE_SIZE ) {
|
|
488
|
+ addr = next_page_addr;
|
547
|
489
|
if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
|
548
|
490
|
DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
|
549
|
491
|
}
|
|
@@ -563,7 +505,6 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
563
|
505
|
get_cmd( golan , MEM_CMD_IDX )->status_own,
|
564
|
506
|
be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
|
565
|
507
|
}
|
566
|
|
- golan_return_page ( &golan->pages ,addr );
|
567
|
508
|
goto err_send_command;
|
568
|
509
|
}
|
569
|
510
|
}
|
|
@@ -571,7 +512,7 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
571
|
512
|
return 0;
|
572
|
513
|
|
573
|
514
|
err_send_command:
|
574
|
|
-malloc_dma_failed:
|
|
515
|
+err_golan_alloc_fw_area:
|
575
|
516
|
/* Go over In box and free pages */
|
576
|
517
|
/* Send Error to FW */
|
577
|
518
|
/* What is next - Disable HCA? */
|
|
@@ -609,7 +550,7 @@ static inline int golan_handle_pages(struct golan *golan,
|
609
|
550
|
total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
|
610
|
551
|
|
611
|
552
|
if ( mode == GOLAN_PAGES_GIVE ) {
|
612
|
|
- rc = golan_provide_pages(golan, total_pages, func_id);
|
|
553
|
+ rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
|
613
|
554
|
} else {
|
614
|
555
|
rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
|
615
|
556
|
golan->total_dma_pages = 0;
|
|
@@ -799,16 +740,14 @@ static int golan_create_eq(struct golan *golan)
|
799
|
740
|
struct golan_cmd_layout *cmd;
|
800
|
741
|
struct golan_create_eq_mbox_out *out;
|
801
|
742
|
int rc, i;
|
802
|
|
- userptr_t addr;
|
803
|
743
|
|
804
|
744
|
eq->cons_index = 0;
|
805
|
745
|
eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
|
806
|
|
- addr = golan_get_page ( &golan->pages );
|
807
|
|
- if (!addr) {
|
|
746
|
+ eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
|
747
|
+ if (!eq->eqes) {
|
808
|
748
|
rc = -ENOMEM;
|
809
|
749
|
goto err_create_eq_eqe_alloc;
|
810
|
750
|
}
|
811
|
|
- eq->eqes = (struct golan_eqe *)user_to_virt(addr, 0);
|
812
|
751
|
|
813
|
752
|
/* Set EQEs ownership bit to HW ownership */
|
814
|
753
|
for (i = 0; i < GOLAN_NUM_EQES; ++i) {
|
|
@@ -823,7 +762,7 @@ static int golan_create_eq(struct golan *golan)
|
823
|
762
|
in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
824
|
763
|
|
825
|
764
|
/* Fill the physical address of the page */
|
826
|
|
- in->pas[0] = USR_2_BE64_BUS(addr);
|
|
765
|
+ in->pas[0] = VIRT_2_BE64_BUS( eq->eqes );
|
827
|
766
|
in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
|
828
|
767
|
DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
|
829
|
768
|
in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
|
|
@@ -842,7 +781,7 @@ static int golan_create_eq(struct golan *golan)
|
842
|
781
|
return 0;
|
843
|
782
|
|
844
|
783
|
err_create_eq_cmd:
|
845
|
|
- golan_return_page ( & golan->pages, virt_to_user ( eq->eqes ) );
|
|
784
|
+ free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
|
846
|
785
|
err_create_eq_eqe_alloc:
|
847
|
786
|
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
848
|
787
|
return rc;
|
|
@@ -867,7 +806,7 @@ static void golan_destory_eq(struct golan *golan)
|
867
|
806
|
rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
|
868
|
807
|
GOLAN_PRINT_RC_AND_CMD_STATUS;
|
869
|
808
|
|
870
|
|
- golan_return_page ( &golan->pages, virt_to_user ( golan->eq.eqes ) );
|
|
809
|
+ free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
871
|
810
|
golan->eq.eqn = 0;
|
872
|
811
|
|
873
|
812
|
DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
|
|
@@ -1016,7 +955,6 @@ static int golan_create_cq(struct ib_device *ibdev,
|
1016
|
955
|
struct golan_create_cq_mbox_out *out;
|
1017
|
956
|
int rc;
|
1018
|
957
|
unsigned int i;
|
1019
|
|
- userptr_t addr;
|
1020
|
958
|
|
1021
|
959
|
golan_cq = zalloc(sizeof(*golan_cq));
|
1022
|
960
|
if (!golan_cq) {
|
|
@@ -1031,12 +969,11 @@ static int golan_create_cq(struct ib_device *ibdev,
|
1031
|
969
|
goto err_create_cq_db_alloc;
|
1032
|
970
|
}
|
1033
|
971
|
|
1034
|
|
- addr = golan_get_page ( &golan->pages );
|
1035
|
|
- if (!addr) {
|
|
972
|
+ golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
|
973
|
+ if (!golan_cq->cqes) {
|
1036
|
974
|
rc = -ENOMEM;
|
1037
|
975
|
goto err_create_cq_cqe_alloc;
|
1038
|
976
|
}
|
1039
|
|
- golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
|
1040
|
977
|
|
1041
|
978
|
/* Set CQEs ownership bit to HW ownership */
|
1042
|
979
|
for (i = 0; i < cq->num_cqes; ++i) {
|
|
@@ -1053,7 +990,7 @@ static int golan_create_cq(struct ib_device *ibdev,
|
1053
|
990
|
in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
1054
|
991
|
|
1055
|
992
|
/* Fill the physical address of the page */
|
1056
|
|
- in->pas[0] = USR_2_BE64_BUS(addr);
|
|
993
|
+ in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes );
|
1057
|
994
|
in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
|
1058
|
995
|
in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
|
1059
|
996
|
in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
|
|
@@ -1071,7 +1008,7 @@ static int golan_create_cq(struct ib_device *ibdev,
|
1071
|
1008
|
return 0;
|
1072
|
1009
|
|
1073
|
1010
|
err_create_cq_cmd:
|
1074
|
|
- golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
|
|
1011
|
+ free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
1075
|
1012
|
err_create_cq_cqe_alloc:
|
1076
|
1013
|
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
1077
|
1014
|
err_create_cq_db_alloc:
|
|
@@ -1108,7 +1045,7 @@ static void golan_destroy_cq(struct ib_device *ibdev,
|
1108
|
1045
|
cq->cqn = 0;
|
1109
|
1046
|
|
1110
|
1047
|
ib_cq_set_drvdata(cq, NULL);
|
1111
|
|
- golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
|
|
1048
|
+ free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
1112
|
1049
|
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
1113
|
1050
|
free(golan_cq);
|
1114
|
1051
|
|
|
@@ -1154,7 +1091,6 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
1154
|
1091
|
struct golan_cmd_layout *cmd;
|
1155
|
1092
|
struct golan_wqe_data_seg *data;
|
1156
|
1093
|
struct golan_create_qp_mbox_out *out;
|
1157
|
|
- userptr_t addr;
|
1158
|
1094
|
uint32_t wqe_size_in_bytes;
|
1159
|
1095
|
uint32_t max_qp_size_in_wqes;
|
1160
|
1096
|
unsigned int i;
|
|
@@ -1202,12 +1138,11 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
1202
|
1138
|
golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
|
1203
|
1139
|
|
1204
|
1140
|
/* allocate dma memory for WQEs (1 page is enough) - should change it */
|
1205
|
|
- addr = golan_get_page ( &golan->pages );
|
1206
|
|
- if (!addr) {
|
|
1141
|
+ golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
|
1142
|
+ if (!golan_qp->wqes) {
|
1207
|
1143
|
rc = -ENOMEM;
|
1208
|
1144
|
goto err_create_qp_wqe_alloc;
|
1209
|
1145
|
}
|
1210
|
|
- golan_qp->wqes = user_to_virt(addr, 0);
|
1211
|
1146
|
golan_qp->rq.wqes = golan_qp->wqes;
|
1212
|
1147
|
golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
|
1213
|
1148
|
//(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
|
|
@@ -1241,7 +1176,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
1241
|
1176
|
in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
1242
|
1177
|
|
1243
|
1178
|
/* Fill the physical address of the page */
|
1244
|
|
- in->pas[0] = USR_2_BE64_BUS(addr);
|
|
1179
|
+ in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes);
|
1245
|
1180
|
in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
|
1246
|
1181
|
|
1247
|
1182
|
in->ctx.flags_pd = cpu_to_be32(golan->pdn);
|
|
@@ -1280,7 +1215,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
1280
|
1215
|
err_create_qp_cmd:
|
1281
|
1216
|
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
1282
|
1217
|
err_create_qp_db_alloc:
|
1283
|
|
- golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
|
|
1218
|
+ free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
1284
|
1219
|
err_create_qp_wqe_alloc:
|
1285
|
1220
|
err_create_qp_sq_size:
|
1286
|
1221
|
err_create_qp_sq_wqe_size:
|
|
@@ -1488,7 +1423,7 @@ static void golan_destroy_qp(struct ib_device *ibdev,
|
1488
|
1423
|
|
1489
|
1424
|
ib_qp_set_drvdata(qp, NULL);
|
1490
|
1425
|
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
1491
|
|
- golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
|
|
1426
|
+ free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
1492
|
1427
|
free(golan_qp);
|
1493
|
1428
|
|
1494
|
1429
|
DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
|
|
@@ -1526,7 +1461,6 @@ static int golan_post_send(struct ib_device *ibdev,
|
1526
|
1461
|
unsigned long wqe_idx;
|
1527
|
1462
|
struct golan_wqe_data_seg *data = NULL;
|
1528
|
1463
|
struct golan_wqe_ctrl_seg *ctrl = NULL;
|
1529
|
|
-// static uint8_t toggle = 0;
|
1530
|
1464
|
|
1531
|
1465
|
|
1532
|
1466
|
wqe_idx_mask = (qp->send.num_wqes - 1);
|
|
@@ -1576,8 +1510,9 @@ static int golan_post_send(struct ib_device *ibdev,
|
1576
|
1510
|
golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
|
1577
|
1511
|
golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
|
1578
|
1512
|
wmb();
|
1579
|
|
- writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
|
1580
|
|
-// ((toggle++ & 0x1) ? 0x100 : 0x0));
|
|
1513
|
+ writeq(*((__be64 *)ctrl), golan->uar.virt
|
|
1514
|
+ + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
|
|
1515
|
+ : DB_BUFFER0_ODD_OFFSET ) );
|
1581
|
1516
|
return 0;
|
1582
|
1517
|
}
|
1583
|
1518
|
|
|
@@ -1702,7 +1637,6 @@ err_query_vport_gid_cmd:
|
1702
|
1637
|
static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
|
1703
|
1638
|
struct golan *golan = ib_get_drvdata ( ibdev );
|
1704
|
1639
|
struct golan_cmd_layout *cmd;
|
1705
|
|
- //struct golan_query_hca_vport_pkey_data *pkey_table;
|
1706
|
1640
|
struct golan_query_hca_vport_pkey_inbox *in;
|
1707
|
1641
|
int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
|
1708
|
1642
|
int rc;
|
|
@@ -1719,8 +1653,6 @@ static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
|
1719
|
1653
|
rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
|
1720
|
1654
|
GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
|
1721
|
1655
|
|
1722
|
|
- //pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
|
1723
|
|
-
|
1724
|
1656
|
return 0;
|
1725
|
1657
|
err_query_vport_pkey_cmd:
|
1726
|
1658
|
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
|
@@ -2100,10 +2032,15 @@ static void golan_poll_eq(struct ib_device *ibdev)
|
2100
|
2032
|
cqn, eqe->data.cq_err.syndrome);
|
2101
|
2033
|
// mlx5_cq_event(dev, cqn, eqe->type);
|
2102
|
2034
|
break;
|
|
2035
|
+ /*
|
|
2036
|
+ * currently the driver do not support dynamic memory request
|
|
2037
|
+ * during FW run, a follow up change will allocate FW pages once and
|
|
2038
|
+ * never release them till driver shutdown, this change will not support
|
|
2039
|
+ * this request as currently this request is not issued anyway.
|
2103
|
2040
|
case GOLAN_EVENT_TYPE_PAGE_REQUEST:
|
2104
|
2041
|
{
|
2105
|
|
- /* we should check if we get this event while we
|
2106
|
|
- * waiting for a command */
|
|
2042
|
+ // we should check if we get this event while we
|
|
2043
|
+ // waiting for a command
|
2107
|
2044
|
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
2108
|
2045
|
s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
|
2109
|
2046
|
|
|
@@ -2112,6 +2049,7 @@ static void golan_poll_eq(struct ib_device *ibdev)
|
2112
|
2049
|
golan_provide_pages(golan, npages, func_id);
|
2113
|
2050
|
}
|
2114
|
2051
|
break;
|
|
2052
|
+ */
|
2115
|
2053
|
default:
|
2116
|
2054
|
DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
|
2117
|
2055
|
eqe->type, eq->eqn);
|
|
@@ -2231,7 +2169,6 @@ static int golan_register_ibdev(struct golan_port *port)
|
2231
|
2169
|
|
2232
|
2170
|
static inline void golan_bring_down(struct golan *golan)
|
2233
|
2171
|
{
|
2234
|
|
-
|
2235
|
2172
|
DBGC(golan, "%s: start\n", __FUNCTION__);
|
2236
|
2173
|
|
2237
|
2174
|
if (~golan->flags & GOLAN_OPEN) {
|
|
@@ -2413,7 +2350,8 @@ static int golan_probe_normal ( struct pci_device *pci ) {
|
2413
|
2350
|
goto err_golan_alloc;
|
2414
|
2351
|
}
|
2415
|
2352
|
|
2416
|
|
- if ( golan_init_pages( &golan->pages ) ) {
|
|
2353
|
+ /* at POST stage some BIOSes have limited available dynamic memory */
|
|
2354
|
+ if ( golan_init_fw_areas ( golan ) ) {
|
2417
|
2355
|
rc = -ENOMEM;
|
2418
|
2356
|
goto err_golan_golan_init_pages;
|
2419
|
2357
|
}
|
|
@@ -2423,11 +2361,6 @@ static int golan_probe_normal ( struct pci_device *pci ) {
|
2423
|
2361
|
golan->pci = pci;
|
2424
|
2362
|
golan_pci_init( golan );
|
2425
|
2363
|
/* config command queues */
|
2426
|
|
- if ( fw_ver_and_cmdif( golan ) ) {
|
2427
|
|
- rc = -1;
|
2428
|
|
- goto err_fw_ver_cmdif;
|
2429
|
|
- }
|
2430
|
|
-
|
2431
|
2364
|
if ( golan_bring_up( golan ) ) {
|
2432
|
2365
|
DBGC (golan ,"golan bringup failed\n");
|
2433
|
2366
|
rc = -1;
|
|
@@ -2482,9 +2415,8 @@ err_golan_probe_alloc_ibdev:
|
2482
|
2415
|
err_utils_init:
|
2483
|
2416
|
golan_bring_down ( golan );
|
2484
|
2417
|
err_golan_bringup:
|
2485
|
|
-err_fw_ver_cmdif:
|
2486
|
2418
|
iounmap( golan->iseg );
|
2487
|
|
- golan_free_pages( &golan->pages );
|
|
2419
|
+ golan_free_fw_areas ( golan );
|
2488
|
2420
|
err_golan_golan_init_pages:
|
2489
|
2421
|
free ( golan );
|
2490
|
2422
|
err_golan_alloc:
|
|
@@ -2513,7 +2445,7 @@ static void golan_remove_normal ( struct pci_device *pci ) {
|
2513
|
2445
|
free_mlx_utils ( & golan->utils );
|
2514
|
2446
|
}
|
2515
|
2447
|
iounmap( golan->iseg );
|
2516
|
|
- golan_free_pages( &golan->pages );
|
|
2448
|
+ golan_free_fw_areas ( golan );
|
2517
|
2449
|
free(golan);
|
2518
|
2450
|
}
|
2519
|
2451
|
|
|
@@ -2528,14 +2460,16 @@ static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
|
2528
|
2460
|
( struct shomron_nodnic_eth_send_wqe * )wqbb;
|
2529
|
2461
|
struct shomronprm_wqe_segment_ctrl_send *ctrl;
|
2530
|
2462
|
|
2531
|
|
- if ( ! ibdev || ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
|
|
2463
|
+ if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
|
2532
|
2464
|
DBG("%s: Invalid parameters\n",__FUNCTION__);
|
2533
|
2465
|
status = MLX_FAILED;
|
2534
|
2466
|
goto err;
|
2535
|
2467
|
}
|
2536
|
2468
|
wmb();
|
2537
|
2469
|
ctrl = & eth_wqe->ctrl;
|
2538
|
|
- writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt + 0x800);
|
|
2470
|
+ writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
|
|
2471
|
+ ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
|
|
2472
|
+ : DB_BUFFER0_EVEN_OFFSET ) );
|
2539
|
2473
|
err:
|
2540
|
2474
|
return status;
|
2541
|
2475
|
}
|