You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mtnic.c 42KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. FILE_LICENCE ( GPL2_ONLY );
  34. #include <strings.h>
  35. #include <errno.h>
  36. #include <ipxe/malloc.h>
  37. #include <ipxe/umalloc.h>
  38. #include <byteswap.h>
  39. #include <unistd.h>
  40. #include <ipxe/io.h>
  41. #include <ipxe/pci.h>
  42. #include <ipxe/ethernet.h>
  43. #include <ipxe/netdevice.h>
  44. #include <ipxe/iobuf.h>
  45. #include "mtnic.h"
  46. /*
  47. mtnic.c - iPXE driver for Mellanox 10Gig ConnectX EN
  48. */
  49. /********************************************************************
  50. *
  51. * MTNIC allocation functions
  52. *
  53. *********************************************************************/
  54. /**
  55. * mtnic_alloc_aligned
  56. *
  57. * @v unsigned int size size
  58. * @v void **va virtual address
  59. * @v u32 *pa physical address
  60. * @v u32 aligment aligment
  61. *
  62. * Function allocate aligned buffer and put it's virtual address in 'va'
  63. * and it's physical aligned address in 'pa'
  64. */
  65. static int
  66. mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned int alignment)
  67. {
  68. *va = malloc_dma(size, alignment);
  69. if (!*va) {
  70. return -EADDRINUSE;
  71. }
  72. *pa = (u32)virt_to_bus(*va);
  73. return 0;
  74. }
  75. /**
  76. *
  77. * mtnic alloc command interface
  78. *
  79. */
  80. static int
  81. mtnic_alloc_cmdif(struct mtnic *mtnic)
  82. {
  83. u32 bar = mtnic_pci_dev.dev.bar[0];
  84. mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
  85. if ( !mtnic->hcr ) {
  86. DBG("Couldn't map command register\n");
  87. return -EADDRINUSE;
  88. }
  89. mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE);
  90. if ( !mtnic->cmd.buf ) {
  91. DBG("Error in allocating buffer for command interface\n");
  92. return -EADDRINUSE;
  93. }
  94. return 0;
  95. }
  96. /**
  97. * Free RX io buffers
  98. */
  99. static void
  100. mtnic_free_io_buffers(struct mtnic_ring *ring)
  101. {
  102. int index;
  103. for (; ring->cons <= ring->prod; ++ring->cons) {
  104. index = ring->cons & ring->size_mask;
  105. if ( ring->iobuf[index] ) {
  106. free_iob(ring->iobuf[index]);
  107. }
  108. }
  109. }
  110. /**
  111. *
  112. * mtnic alloc and attach io buffers
  113. *
  114. */
  115. static int
  116. mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring,
  117. unsigned int size)
  118. {
  119. struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
  120. u32 index;
  121. while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
  122. index = ring->prod & ring->size_mask;
  123. ring->iobuf[index] = alloc_iob(size);
  124. if (!ring->iobuf[index]) {
  125. if (ring->prod <= (ring->cons + 1)) {
  126. DBG ( "Dropping packet, buffer is full\n" );
  127. }
  128. break;
  129. }
  130. /* Attach io_buffer to descriptor */
  131. rx_desc_ptr = ring->buf +
  132. (sizeof(struct mtnic_rx_desc) * index);
  133. rx_desc_ptr->data.count = cpu_to_be32(size);
  134. rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be;
  135. rx_desc_ptr->data.addr_l = cpu_to_be32(
  136. virt_to_bus(ring->iobuf[index]->data));
  137. ++ ring->prod;
  138. }
  139. /* Update RX producer index (PI) */
  140. ring->db->count = cpu_to_be32(ring->prod & 0xffff);
  141. return 0;
  142. }
  143. /**
  144. * mtnic alloc ring
  145. *
  146. * Alloc and configure TX or RX ring
  147. *
  148. */
  149. static int
  150. mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring,
  151. u32 size, u16 stride, u16 cq, u8 is_rx)
  152. {
  153. unsigned int i;
  154. int err;
  155. struct mtnic_rx_desc *rx_desc;
  156. struct mtnic_tx_desc *tx_desc;
  157. ring->size = size; /* Number of descriptors */
  158. ring->size_mask = size - 1;
  159. ring->stride = stride; /* Size of each entry */
  160. ring->cq = cq; /* CQ number associated with this ring */
  161. ring->cons = 0;
  162. ring->prod = 0;
  163. /* Alloc descriptors buffer */
  164. ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
  165. sizeof(struct mtnic_tx_desc));
  166. err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
  167. &ring->dma, PAGE_SIZE);
  168. if (err) {
  169. DBG("Failed allocating descriptor ring sizeof %x\n",
  170. ring->buf_size);
  171. return -EADDRINUSE;
  172. }
  173. memset(ring->buf, 0, ring->buf_size);
  174. DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
  175. "buf_size:%x dma:%lx\n",
  176. is_rx ? "Rx" : "Tx", ring, ring->buf, ring->size,
  177. ring->buf_size, ring->dma);
  178. if (is_rx) { /* RX ring */
  179. /* Alloc doorbell */
  180. err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
  181. (void *)&ring->db, &ring->db_dma, 32);
  182. if (err) {
  183. DBG("Failed allocating Rx ring doorbell record\n");
  184. free_dma(ring->buf, ring->buf_size);
  185. return -EADDRINUSE;
  186. }
  187. /* ==- Configure Descriptor -== */
  188. /* Init ctrl seg of rx desc */
  189. for (i = 0; i < UNITS_BUFFER_SIZE; ++i) {
  190. rx_desc = ring->buf +
  191. (sizeof(struct mtnic_rx_desc) * i);
  192. /* Pre-link descriptor */
  193. rx_desc->next = cpu_to_be16(i + 1);
  194. }
  195. /*The last ctrl descriptor is '0' and points to the first one*/
  196. /* Alloc IO_BUFFERS */
  197. err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE );
  198. if (err) {
  199. DBG("ERROR Allocating io buffer\n");
  200. free_dma(ring->buf, ring->buf_size);
  201. return -EADDRINUSE;
  202. }
  203. } else { /* TX ring */
  204. /* Set initial ownership of all Tx Desc' to SW (1) */
  205. for (i = 0; i < ring->size; i++) {
  206. tx_desc = ring->buf + ring->stride * i;
  207. tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_BIT_DESC_OWN);
  208. }
  209. /* DB */
  210. ring->db_offset = cpu_to_be32(
  211. ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8);
  212. /* Map Tx+CQ doorbells */
  213. DBG("Mapping TxCQ doorbell at offset:0x%x\n",
  214. priv->mtnic->fw.txcq_db_offset);
  215. ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
  216. priv->mtnic->fw.txcq_db_offset, PAGE_SIZE);
  217. if (!ring->txcq_db) {
  218. DBG("Couldn't map txcq doorbell, aborting...\n");
  219. free_dma(ring->buf, ring->buf_size);
  220. return -EADDRINUSE;
  221. }
  222. }
  223. return 0;
  224. }
  225. /**
  226. * mtnic alloc CQ
  227. *
  228. * Alloc and configure CQ.
  229. *
  230. */
  231. static int
  232. mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
  233. u8 is_rx, u32 size, u32 offset_ind)
  234. {
  235. int err ;
  236. unsigned int i;
  237. cq->num = num;
  238. cq->dev = dev;
  239. cq->size = size;
  240. cq->last = 0;
  241. cq->is_rx = is_rx;
  242. cq->offset_ind = offset_ind;
  243. /* Alloc doorbell */
  244. err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
  245. (void *)&cq->db, &cq->db_dma, 32);
  246. if (err) {
  247. DBG("Failed allocating CQ doorbell record\n");
  248. return -EADDRINUSE;
  249. }
  250. memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
  251. /* Alloc CQEs buffer */
  252. cq->buf_size = size * sizeof(struct mtnic_cqe);
  253. err = mtnic_alloc_aligned(cq->buf_size,
  254. (void *)&cq->buf, &cq->dma, PAGE_SIZE);
  255. if (err) {
  256. DBG("Failed allocating CQ buffer\n");
  257. free_dma(cq->db, sizeof(struct mtnic_cq_db_record));
  258. return -EADDRINUSE;
  259. }
  260. memset(cq->buf, 0, cq->buf_size);
  261. DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
  262. "dma:%lx db:%p db_dma:%lx\n"
  263. "cqn offset:%x \n", cq, cq->size, cq->buf,
  264. cq->buf_size, cq->dma, cq->db,
  265. cq->db_dma, offset_ind);
  266. /* Set ownership of all CQEs to HW */
  267. DBG("Setting HW ownership for CQ:%d\n", num);
  268. for (i = 0; i < cq->size; i++) {
  269. /* Initial HW ownership is 1 */
  270. cq->buf[i].op_tr_own = MTNIC_BIT_CQ_OWN;
  271. }
  272. return 0;
  273. }
  274. /**
  275. * mtnic_alloc_resources
  276. *
  277. * Alloc and configure CQs, Tx, Rx
  278. */
  279. unsigned int
  280. mtnic_alloc_resources(struct net_device *dev)
  281. {
  282. struct mtnic_port *priv = netdev_priv(dev);
  283. int err;
  284. int cq_ind = 0;
  285. int cq_offset = priv->mtnic->fw.cq_offset;
  286. /* Alloc 1st CQ */
  287. err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
  288. UNITS_BUFFER_SIZE, cq_offset + cq_ind);
  289. if (err) {
  290. DBG("Failed allocating Rx CQ\n");
  291. return -EADDRINUSE;
  292. }
  293. /* Alloc RX */
  294. err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
  295. sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
  296. if (err) {
  297. DBG("Failed allocating Rx Ring\n");
  298. goto cq0_error;
  299. }
  300. ++cq_ind;
  301. /* alloc 2nd CQ */
  302. err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
  303. UNITS_BUFFER_SIZE, cq_offset + cq_ind);
  304. if (err) {
  305. DBG("Failed allocating Tx CQ\n");
  306. goto rx_error;
  307. }
  308. /* Alloc TX */
  309. err = mtnic_alloc_ring(priv, &priv->tx_ring, UNITS_BUFFER_SIZE,
  310. sizeof(struct mtnic_tx_desc), cq_ind, /* TX */ 0);
  311. if (err) {
  312. DBG("Failed allocating Tx ring\n");
  313. goto cq1_error;
  314. }
  315. return 0;
  316. cq1_error:
  317. free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
  318. free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
  319. rx_error:
  320. free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
  321. free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
  322. mtnic_free_io_buffers(&priv->rx_ring);
  323. cq0_error:
  324. free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
  325. free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
  326. return -EADDRINUSE;
  327. }
  328. /**
  329. * mtnic alloc_eq
  330. *
  331. * Note: EQ is not used by the driver but must be allocated
  332. */
  333. static int
  334. mtnic_alloc_eq(struct mtnic *mtnic)
  335. {
  336. int err;
  337. unsigned int i;
  338. struct mtnic_eqe *eqe_desc = NULL;
  339. /* Allocating doorbell */
  340. mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
  341. mtnic->fw.eq_db_offset, sizeof(u32));
  342. if (!mtnic->eq_db) {
  343. DBG("Couldn't map EQ doorbell, aborting...\n");
  344. return -EADDRINUSE;
  345. }
  346. /* Allocating buffer */
  347. mtnic->eq.size = NUM_EQES;
  348. mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe);
  349. err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf,
  350. &mtnic->eq.dma, PAGE_SIZE);
  351. if (err) {
  352. DBG("Failed allocating EQ buffer\n");
  353. iounmap(mtnic->eq_db);
  354. return -EADDRINUSE;
  355. }
  356. memset(mtnic->eq.buf, 0, mtnic->eq.buf_size);
  357. for (i = 0; i < mtnic->eq.size; i++)
  358. eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i);
  359. eqe_desc->own |= MTNIC_BIT_EQE_OWN;
  360. mdelay(20);
  361. return 0;
  362. }
  363. /********************************************************************
  364. *
  365. * Mtnic commands functions
  366. * -=-=-=-=-=-=-=-=-=-=-=-=
  367. *
  368. *
  369. *
  370. *********************************************************************/
  371. static inline int
  372. cmdif_go_bit(struct mtnic *mtnic)
  373. {
  374. struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
  375. u32 status;
  376. int i;
  377. for (i = 0; i < TBIT_RETRIES; i++) {
  378. status = be32_to_cpu(readl(&hcr->status_go_opcode));
  379. if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
  380. (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
  381. /* Read expected t-bit - now return go-bit value */
  382. return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
  383. }
  384. }
  385. DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES);
  386. return -EBUSY; /* Return busy... */
  387. }
  388. /* Base Command interface */
  389. static int
  390. mtnic_cmd(struct mtnic *mtnic, void *in_imm,
  391. void *out_imm, u32 in_modifier, u16 op)
  392. {
  393. struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
  394. int err = 0;
  395. u32 out_param_h = 0;
  396. u32 out_param_l = 0;
  397. u32 in_param_h = 0;
  398. u32 in_param_l = 0;
  399. static u16 token = 0x8000;
  400. u32 status;
  401. unsigned int timeout = 0;
  402. token++;
  403. if ( cmdif_go_bit ( mtnic ) ) {
  404. DBG("GO BIT BUSY:%p.\n", hcr + 6);
  405. err = -EBUSY;
  406. goto out;
  407. }
  408. if (in_imm) {
  409. in_param_h = *((u32*)in_imm);
  410. in_param_l = *((u32*)in_imm + 1);
  411. } else {
  412. in_param_l = cpu_to_be32(mtnic->cmd.mapping);
  413. }
  414. out_param_l = cpu_to_be32(mtnic->cmd.mapping);
  415. /* writing to MCR */
  416. writel(in_param_h, &hcr->in_param_h);
  417. writel(in_param_l, &hcr->in_param_l);
  418. writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
  419. writel(out_param_h, &hcr->out_param_h);
  420. writel(out_param_l, &hcr->out_param_l);
  421. writel((u32)cpu_to_be32(token << 16), &hcr->token);
  422. wmb();
  423. /* flip toggle bit before each write to the HCR */
  424. mtnic->cmd.tbit = !mtnic->cmd.tbit;
  425. writel( ( u32 )
  426. cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
  427. ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ),
  428. &hcr->status_go_opcode);
  429. while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) {
  430. mdelay ( 1 );
  431. ++timeout;
  432. }
  433. if ( cmdif_go_bit ( mtnic ) ) {
  434. DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
  435. err = -EBUSY;
  436. goto out;
  437. }
  438. if (out_imm) {
  439. *((u32 *)out_imm) = readl(&hcr->out_param_h);
  440. *((u32 *)out_imm + 1) = readl(&hcr->out_param_l);
  441. }
  442. status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
  443. if (status) {
  444. DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
  445. op, token, status);
  446. return status;
  447. }
  448. out:
  449. return err;
  450. }
  451. /* MAP PAGES wrapper */
  452. static int
  453. mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages)
  454. {
  455. unsigned int j;
  456. u32 addr;
  457. unsigned int len;
  458. u32 *page_arr = mtnic->cmd.buf;
  459. int nent = 0;
  460. int err = 0;
  461. memset(page_arr, 0, PAGE_SIZE);
  462. len = PAGE_SIZE * pages.num;
  463. pages.buf = (u32 *)umalloc(PAGE_SIZE * (pages.num + 1));
  464. addr = PAGE_SIZE + ((virt_to_bus(pages.buf) & 0xfffff000) + PAGE_SIZE);
  465. DBG("Mapping pages: size: %x address: %p\n", pages.num, pages.buf);
  466. if (addr & (PAGE_MASK)) {
  467. DBG("Got FW area not aligned to %d (%llx/%x)\n",
  468. PAGE_SIZE, (u64) addr, len);
  469. return -EADDRINUSE;
  470. }
  471. /* Function maps each PAGE seperately */
  472. for (j = 0; j < len; j+= PAGE_SIZE) {
  473. page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
  474. if (++nent == MTNIC_MAILBOX_SIZE / 16) {
  475. err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
  476. if (err)
  477. return -EIO;
  478. nent = 0;
  479. }
  480. }
  481. if (nent) {
  482. err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
  483. }
  484. return err;
  485. }
  486. /*
  487. * Query FW
  488. */
  489. static int
  490. mtnic_QUERY_FW ( struct mtnic *mtnic )
  491. {
  492. int err;
  493. struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf;
  494. err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
  495. if (err)
  496. return -EIO;
  497. /* Get FW and interface versions */
  498. mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
  499. ((u64) be16_to_cpu(cmd->rev_min) << 16) |
  500. (u64) be16_to_cpu(cmd->rev_smin);
  501. mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
  502. /* Get offset for internal error reports (debug) */
  503. mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
  504. mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
  505. DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset);
  506. /* Get number of required FW (4k) pages */
  507. mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
  508. return 0;
  509. }
  510. static int
  511. mtnic_OPEN_NIC(struct mtnic *mtnic)
  512. {
  513. struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf;
  514. u32 extra_pages[2] = {0};
  515. int err;
  516. memset(open_nic, 0, sizeof *open_nic);
  517. /* port 1 */
  518. open_nic->log_rx_p1 = 0;
  519. open_nic->log_cq_p1 = 1;
  520. open_nic->log_tx_p1 = 0;
  521. open_nic->steer_p1 = MTNIC_IF_STEER_RSS;
  522. /* MAC + VLAN - leave reserved */
  523. /* port 2 */
  524. open_nic->log_rx_p2 = 0;
  525. open_nic->log_cq_p2 = 1;
  526. open_nic->log_tx_p2 = 0;
  527. open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
  528. /* MAC + VLAN - leave reserved */
  529. err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
  530. mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
  531. DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num);
  532. return err;
  533. }
  534. static int
  535. mtnic_CONFIG_RX(struct mtnic *mtnic)
  536. {
  537. struct mtnic_if_config_rx_in_imm config_rx;
  538. memset(&config_rx, 0, sizeof config_rx);
  539. return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
  540. }
  541. static int
  542. mtnic_CONFIG_TX(struct mtnic *mtnic)
  543. {
  544. struct mtnic_if_config_send_in_imm config_tx;
  545. config_tx.enph_gpf = 0;
  546. return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
  547. }
  548. static int
  549. mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state)
  550. {
  551. struct mtnic_if_heart_beat_out_imm heart_beat;
  552. int err;
  553. u32 flags;
  554. err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
  555. if (!err) {
  556. flags = be32_to_cpu(heart_beat.flags);
  557. if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
  558. DBG("Internal error detected\n");
  559. return -EIO;
  560. }
  561. *link_state = flags &
  562. ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
  563. }
  564. return err;
  565. }
  566. /*
  567. * Port commands
  568. */
  569. static int
  570. mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring)
  571. {
  572. struct mtnic_if_set_port_default_ring_in_imm def_ring;
  573. memset(&def_ring, 0, sizeof(def_ring));
  574. def_ring.ring = ring;
  575. return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1,
  576. MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
  577. }
  578. static int
  579. mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port)
  580. {
  581. memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
  582. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  583. MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
  584. }
  585. static int
  586. mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port)
  587. {
  588. memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
  589. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  590. MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
  591. }
  592. /*
  593. * Config commands
  594. */
  595. static int
  596. mtnic_CONFIG_CQ(struct mtnic_port *priv, int port,
  597. u16 cq_ind, struct mtnic_cq *cq)
  598. {
  599. struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf;
  600. memset(config_cq, 0, sizeof *config_cq);
  601. config_cq->cq = cq_ind;
  602. config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
  603. config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
  604. config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
  605. config_cq->page_address[1] = cpu_to_be32(cq->dma);
  606. DBG("config cq address: %x dma_address: %lx"
  607. "offset: %d size %d index: %d\n"
  608. , config_cq->page_address[1],cq->dma,
  609. config_cq->offset, config_cq->size, config_cq->cq );
  610. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  611. MTNIC_IF_CMD_CONFIG_CQ);
  612. }
  613. static int
  614. mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port,
  615. u16 ring_ind, struct mtnic_ring *ring)
  616. {
  617. struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf;
  618. memset(config_tx_ring, 0, sizeof *config_tx_ring);
  619. config_tx_ring->ring = cpu_to_be16(ring_ind);
  620. config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
  621. config_tx_ring->cq = cpu_to_be16(ring->cq);
  622. config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
  623. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  624. MTNIC_IF_CMD_CONFIG_TX_RING);
  625. }
  626. static int
  627. mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port,
  628. u16 ring_ind, struct mtnic_ring *ring)
  629. {
  630. struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf;
  631. memset(config_rx_ring, 0, sizeof *config_rx_ring);
  632. config_rx_ring->ring = ring_ind;
  633. MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
  634. MTNIC_MASK_CONFIG_RX_RING_SIZE);
  635. MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
  636. MTNIC_MASK_CONFIG_RX_RING_STRIDE);
  637. config_rx_ring->cq = cpu_to_be16(ring->cq);
  638. config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
  639. DBG("Config RX ring starting at address:%lx\n", ring->dma);
  640. config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
  641. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  642. MTNIC_IF_CMD_CONFIG_RX_RING);
  643. }
  644. static int
  645. mtnic_CONFIG_EQ(struct mtnic *mtnic)
  646. {
  647. struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf;
  648. if (mtnic->eq.dma & (PAGE_MASK)) {
  649. DBG("misalligned eq buffer:%lx\n",
  650. mtnic->eq.dma);
  651. return -EADDRINUSE;
  652. }
  653. memset(eq, 0, sizeof *eq);
  654. MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
  655. MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
  656. MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
  657. eq->page_address[1] = cpu_to_be32(mtnic->eq.dma);
  658. return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
  659. }
  660. static int
  661. mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac)
  662. {
  663. struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
  664. u32 modifier = ((u32) port + 1) << 16;
  665. memset(&ring_addr, 0, sizeof(ring_addr));
  666. ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
  667. ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
  668. ring_addr.flags_vlan_id |= cpu_to_be16(
  669. MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
  670. return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
  671. }
  672. static int
  673. mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state)
  674. {
  675. struct mtnic_if_set_port_state_in_imm port_state;
  676. port_state.state = state ? cpu_to_be32(
  677. MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
  678. port_state.reserved = 0;
  679. return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1,
  680. MTNIC_IF_CMD_SET_PORT_STATE);
  681. }
  682. static int
  683. mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu)
  684. {
  685. struct mtnic_if_set_port_mtu_in_imm set_mtu;
  686. memset(&set_mtu, 0, sizeof(set_mtu));
  687. set_mtu.mtu = cpu_to_be16(mtu);
  688. return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1,
  689. MTNIC_IF_CMD_SET_PORT_MTU);
  690. }
  691. /*
  692. static int
  693. mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
  694. {
  695. struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
  696. // When no vlans are configured we disable the filter
  697. // (i.e., pass all vlans) because we ignore them anyhow
  698. memset(vlan_filter, 0xff, sizeof(*vlan_filter));
  699. return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
  700. MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
  701. }
  702. */
  703. static int
  704. mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index)
  705. {
  706. struct mtnic_if_release_resource_in_imm rel;
  707. memset(&rel, 0, sizeof rel);
  708. rel.index = index;
  709. rel.type = type;
  710. return mtnic_cmd ( priv->mtnic,
  711. &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ?
  712. 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE );
  713. }
  714. static int
  715. mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result)
  716. {
  717. struct mtnic_if_query_cap_in_imm cap;
  718. u32 out_imm[2];
  719. int err;
  720. memset(&cap, 0, sizeof cap);
  721. cap.cap_index = index;
  722. cap.cap_modifier = mod;
  723. err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
  724. *((u32*)result) = be32_to_cpu(*(out_imm+1));
  725. *((u32*)result + 1) = be32_to_cpu(*out_imm);
  726. DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
  727. " error:%d\n", index, mod, *result, err);
  728. return err;
  729. }
  730. #define DO_QUERY_CAP(cap, mod, var) \
  731. err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
  732. if (err) \
  733. return err; \
  734. (var) = result
  735. static int
  736. mtnic_query_num_ports(struct mtnic *mtnic)
  737. {
  738. int err = 0;
  739. u64 result;
  740. DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports);
  741. return 0;
  742. }
  743. static int
  744. mtnic_query_mac(struct mtnic *mtnic)
  745. {
  746. int err = 0;
  747. int i;
  748. u64 result;
  749. for (i = 0; i < mtnic->fw.num_ports; i++) {
  750. DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]);
  751. }
  752. return 0;
  753. }
  754. static int
  755. mtnic_query_offsets(struct mtnic *mtnic)
  756. {
  757. int err;
  758. int i;
  759. u64 result;
  760. DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
  761. MTNIC_IF_MEM_TYPE_SNOOP,
  762. mtnic->fw.mem_type_snoop_be);
  763. mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be);
  764. DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset);
  765. DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset);
  766. for (i = 0; i < mtnic->fw.num_ports; i++) {
  767. DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset);
  768. DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]);
  769. DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]);
  770. DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset);
  771. DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]);
  772. DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]);
  773. }
  774. mdelay(20);
  775. return 0;
  776. }
  777. /********************************************************************
  778. *
  779. * MTNIC initalization functions
  780. *
  781. *
  782. *
  783. *
  784. *********************************************************************/
  785. /**
  786. * Reset device
  787. */
  788. void
  789. mtnic_reset ( void )
  790. {
  791. void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET,
  792. 4 );
  793. writel ( cpu_to_be32 ( 1 ), reset );
  794. iounmap ( reset );
  795. }
  796. /**
  797. * Restore PCI config
  798. */
  799. static int
  800. restore_config(void)
  801. {
  802. int i;
  803. int rc;
  804. for (i = 0; i < 64; ++i) {
  805. if (i != 22 && i != 23) {
  806. rc = pci_write_config_dword(mtnic_pci_dev.dev.dev,
  807. i << 2,
  808. mtnic_pci_dev.dev.
  809. dev_config_space[i]);
  810. if (rc)
  811. return rc;
  812. }
  813. }
  814. return 0;
  815. }
  816. /**
  817. * Init PCI configuration
  818. */
  819. static int
  820. mtnic_init_pci(struct pci_device *dev)
  821. {
  822. int i;
  823. int err;
  824. /* save bars */
  825. DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn);
  826. for (i = 0; i < 6; ++i) {
  827. mtnic_pci_dev.dev.bar[i] =
  828. pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
  829. DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
  830. }
  831. /* save config space */
  832. for (i = 0; i < 64; ++i) {
  833. err = pci_read_config_dword(dev, i << 2,
  834. &mtnic_pci_dev.dev.
  835. dev_config_space[i]);
  836. if (err) {
  837. DBG("Can not save configuration space");
  838. return err;
  839. }
  840. }
  841. mtnic_pci_dev.dev.dev = dev;
  842. return 0;
  843. }
  844. /**
  845. * Initial hardware
  846. */
  847. static inline
  848. int mtnic_init_card(struct mtnic *mtnic)
  849. {
  850. int err = 0;
  851. /* Alloc command interface */
  852. err = mtnic_alloc_cmdif ( mtnic );
  853. if (err) {
  854. DBG("Failed to init command interface, aborting\n");
  855. return -EADDRINUSE;
  856. }
  857. /**
  858. * Bring up HW
  859. */
  860. err = mtnic_QUERY_FW ( mtnic );
  861. if (err) {
  862. DBG("QUERY_FW command failed, aborting\n");
  863. goto cmd_error;
  864. }
  865. DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev);
  866. /* Allocate memory for FW and start it */
  867. err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages);
  868. if (err) {
  869. DBG("Eror In MAP_FW\n");
  870. if (mtnic->fw.fw_pages.buf)
  871. ufree((intptr_t)mtnic->fw.fw_pages.buf);
  872. goto cmd_error;
  873. }
  874. /* Run firmware */
  875. err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
  876. if (err) {
  877. DBG("Eror In RUN FW\n");
  878. goto map_fw_error;
  879. }
  880. DBG("FW version:%d.%d.%d\n",
  881. (u16) (mtnic->fw_ver >> 32),
  882. (u16) ((mtnic->fw_ver >> 16) & 0xffff),
  883. (u16) (mtnic->fw_ver & 0xffff));
  884. /* Query num ports */
  885. err = mtnic_query_num_ports(mtnic);
  886. if (err) {
  887. DBG("Insufficient resources, aborting\n");
  888. goto map_fw_error;
  889. }
  890. /* Open NIC */
  891. err = mtnic_OPEN_NIC(mtnic);
  892. if (err) {
  893. DBG("Failed opening NIC, aborting\n");
  894. goto map_fw_error;
  895. }
  896. /* Allocate and map pages worksace */
  897. err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages);
  898. if (err) {
  899. DBG("Couldn't allocate %x FW extra pages, aborting\n",
  900. mtnic->fw.extra_pages.num);
  901. if (mtnic->fw.extra_pages.buf)
  902. ufree((intptr_t)mtnic->fw.extra_pages.buf);
  903. goto map_fw_error;
  904. }
  905. /* Get device information */
  906. err = mtnic_query_mac(mtnic);
  907. if (err) {
  908. DBG("Insufficient resources in quesry mac, aborting\n");
  909. goto map_fw_error;
  910. }
  911. /* Get device offsets */
  912. err = mtnic_query_offsets(mtnic);
  913. if (err) {
  914. DBG("Failed retrieving resource offests, aborting\n");
  915. ufree((intptr_t)mtnic->fw.extra_pages.buf);
  916. goto map_extra_error;
  917. }
  918. /* Alloc EQ */
  919. err = mtnic_alloc_eq(mtnic);
  920. if (err) {
  921. DBG("Failed init shared resources. error: %d\n", err);
  922. goto map_extra_error;
  923. }
  924. /* Configure HW */
  925. err = mtnic_CONFIG_EQ(mtnic);
  926. if (err) {
  927. DBG("Failed configuring EQ\n");
  928. goto eq_error;
  929. }
  930. err = mtnic_CONFIG_RX(mtnic);
  931. if (err) {
  932. DBG("Failed Rx configuration\n");
  933. goto eq_error;
  934. }
  935. err = mtnic_CONFIG_TX(mtnic);
  936. if (err) {
  937. DBG("Failed Tx configuration\n");
  938. goto eq_error;
  939. }
  940. return 0;
  941. eq_error:
  942. iounmap(mtnic->eq_db);
  943. free_dma(mtnic->eq.buf, mtnic->eq.buf_size);
  944. map_extra_error:
  945. ufree((intptr_t)mtnic->fw.extra_pages.buf);
  946. map_fw_error:
  947. ufree((intptr_t)mtnic->fw.fw_pages.buf);
  948. cmd_error:
  949. iounmap(mtnic->hcr);
  950. free_dma(mtnic->cmd.buf, PAGE_SIZE);
  951. return -EADDRINUSE;
  952. }
  953. /*******************************************************************
  954. *
  955. * Process functions
  956. *
  957. * process compliations of TX and RX
  958. *
  959. *
  960. ********************************************************************/
  961. void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev,
  962. struct mtnic_cq *cq)
  963. {
  964. struct mtnic_cqe *cqe = cq->buf;
  965. struct mtnic_ring *ring = &priv->tx_ring;
  966. u16 index;
  967. index = cq->last & (cq->size-1);
  968. cqe = &cq->buf[index];
  969. /* Owner bit changes every round */
  970. while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
  971. netdev_tx_complete (dev, ring->iobuf[index]);
  972. ++cq->last;
  973. index = cq->last & (cq->size-1);
  974. cqe = &cq->buf[index];
  975. }
  976. /* Update consumer index */
  977. cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
  978. wmb(); /* ensure HW sees CQ consumer before we post new buffers */
  979. ring->cons = cq->last;
  980. }
  981. int mtnic_process_rx_cq(struct mtnic_port *priv,
  982. struct net_device *dev,
  983. struct mtnic_cq *cq)
  984. {
  985. struct mtnic_cqe *cqe;
  986. struct mtnic_ring *ring = &priv->rx_ring;
  987. int index;
  988. int err;
  989. struct io_buffer *rx_iob;
  990. unsigned int length;
  991. /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
  992. * descriptor offset can be deduced from the CQE index instead of
  993. * reading 'cqe->index' */
  994. index = cq->last & (cq->size-1);
  995. cqe = &cq->buf[index];
  996. /* Process all completed CQEs */
  997. while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
  998. /* Drop packet on bad receive or bad checksum */
  999. if ((cqe->op_tr_own & 0x1f) == MTNIC_OPCODE_ERROR) {
  1000. DBG("CQE completed with error - vendor \n");
  1001. free_iob(ring->iobuf[index]);
  1002. goto next;
  1003. }
  1004. if (cqe->enc_bf & MTNIC_BIT_BAD_FCS) {
  1005. DBG("Accepted packet with bad FCS\n");
  1006. free_iob(ring->iobuf[index]);
  1007. goto next;
  1008. }
  1009. /*
  1010. * Packet is OK - process it.
  1011. */
  1012. length = be32_to_cpu(cqe->byte_cnt);
  1013. rx_iob = ring->iobuf[index];
  1014. iob_put(rx_iob, length);
  1015. /* Add this packet to the receive queue. */
  1016. netdev_rx(dev, rx_iob);
  1017. ring->iobuf[index] = NULL;
  1018. next:
  1019. ++cq->last;
  1020. index = cq->last & (cq->size-1);
  1021. cqe = &cq->buf[index];
  1022. }
  1023. /* Update consumer index */
  1024. cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
  1025. wmb(); /* ensure HW sees CQ consumer before we post new buffers */
  1026. ring->cons = cq->last;
  1027. if (ring->prod - ring->cons < (MAX_GAP_PROD_CONS)) {
  1028. err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
  1029. if (err) {
  1030. DBG("ERROR Allocating io buffer");
  1031. return -EADDRINUSE;
  1032. }
  1033. }
  1034. return 0;
  1035. }
  1036. /********************************************************************
  1037. *
  1038. * net_device functions
  1039. *
  1040. *
  1041. * open, poll, close, probe, disable, irq
  1042. *
  1043. *********************************************************************/
  1044. static int
  1045. mtnic_open(struct net_device *dev)
  1046. {
  1047. struct mtnic_port *priv = netdev_priv(dev);
  1048. int err = 0;
  1049. struct mtnic_ring *ring;
  1050. struct mtnic_cq *cq;
  1051. int cq_ind = 0;
  1052. u32 dev_link_state;
  1053. int link_check;
  1054. DBG("starting port:%d, MAC Address: 0x%12llx\n",
  1055. priv->port, priv->mtnic->fw.mac[priv->port]);
  1056. /* Alloc and configure CQs, TX, RX */
  1057. err = mtnic_alloc_resources ( dev );
  1058. if (err) {
  1059. DBG("Error allocating resources\n");
  1060. return -EADDRINUSE;
  1061. }
  1062. /* Pass CQs configuration to HW */
  1063. for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
  1064. cq = &priv->cq[cq_ind];
  1065. err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
  1066. if (err) {
  1067. DBG("Failed configuring CQ:%d error %d\n",
  1068. cq_ind, err);
  1069. if (cq_ind)
  1070. goto cq_error;
  1071. else
  1072. goto allocation_error;
  1073. }
  1074. /* Update consumer index */
  1075. cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
  1076. }
  1077. /* Pass Tx configuration to HW */
  1078. ring = &priv->tx_ring;
  1079. err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
  1080. if (err) {
  1081. DBG("Failed configuring Tx ring:0\n");
  1082. goto cq_error;
  1083. }
  1084. /* Pass RX configuration to HW */
  1085. ring = &priv->rx_ring;
  1086. err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
  1087. if (err) {
  1088. DBG("Failed configuring Rx ring:0\n");
  1089. goto tx_error;
  1090. }
  1091. /* Configure Rx steering */
  1092. err = mtnic_CONFIG_PORT_RSS_STEER(priv, priv->port);
  1093. if (!err)
  1094. err = mtnic_SET_PORT_RSS_INDIRECTION(priv, priv->port);
  1095. if (err) {
  1096. DBG("Failed configuring RSS steering\n");
  1097. goto rx_error;
  1098. }
  1099. /* Set the port default ring to ring 0 */
  1100. err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
  1101. if (err) {
  1102. DBG("Failed setting default ring\n");
  1103. goto rx_error;
  1104. }
  1105. /* Set Mac address */
  1106. err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]);
  1107. if (err) {
  1108. DBG("Failed setting default MAC address\n");
  1109. goto rx_error;
  1110. }
  1111. /* Set MTU */
  1112. err = mtnic_SET_PORT_MTU(priv, priv->port, DEF_MTU);
  1113. if (err) {
  1114. DBG("Failed setting MTU\n");
  1115. goto rx_error;
  1116. }
  1117. /* Configure VLAN filter */
  1118. /* By adding this function, The second port won't accept packets
  1119. err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
  1120. if (err) {
  1121. DBG("Failed configuring VLAN filter\n");
  1122. goto rx_error;
  1123. }
  1124. */
  1125. /* Bring up physical link */
  1126. err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
  1127. if (err) {
  1128. DBG("Failed bringing up port\n");
  1129. goto rx_error;
  1130. }
  1131. /* PORT IS UP */
  1132. priv->state = CARD_UP;
  1133. /* Checking Link is up */
  1134. DBG ( "Checking if link is up\n" );
  1135. for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) {
  1136. /* Let link state stabilize if cable was connected */
  1137. mdelay ( DELAY_LINK_CHECK );
  1138. err = mtnic_HEART_BEAT(priv, &dev_link_state);
  1139. if (err) {
  1140. DBG("Failed getting device link state\n");
  1141. return -ENETDOWN;
  1142. }
  1143. if ( dev_link_state & priv->port ) {
  1144. /* Link is up */
  1145. break;
  1146. }
  1147. }
  1148. if ( ! ( dev_link_state & 0x3 ) ) {
  1149. DBG("Link down, check cables and restart\n");
  1150. netdev_link_down ( dev );
  1151. return -ENETDOWN;
  1152. }
  1153. DBG ( "Link is up!\n" );
  1154. /* Mark as link up */
  1155. netdev_link_up ( dev );
  1156. return 0;
  1157. rx_error:
  1158. err = mtnic_RELEASE_RESOURCE(priv, priv->port,
  1159. MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
  1160. tx_error:
  1161. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1162. MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
  1163. cq_error:
  1164. while (cq_ind) {
  1165. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1166. MTNIC_IF_RESOURCE_TYPE_CQ, --cq_ind);
  1167. }
  1168. if (err)
  1169. DBG("Eror Releasing resources\n");
  1170. allocation_error:
  1171. free_dma(priv->tx_ring.buf, priv->tx_ring.buf_size);
  1172. iounmap(priv->tx_ring.txcq_db);
  1173. free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
  1174. free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
  1175. free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
  1176. free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
  1177. free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
  1178. free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
  1179. mtnic_free_io_buffers(&priv->rx_ring);
  1180. return -ENETDOWN;
  1181. }
  1182. /** Check if we got completion for receive and transmit and
  1183. * check the line with heart_bit command */
  1184. static void
  1185. mtnic_poll ( struct net_device *dev )
  1186. {
  1187. struct mtnic_port *priv = netdev_priv(dev);
  1188. struct mtnic_cq *cq;
  1189. u32 dev_link_state;
  1190. int err;
  1191. unsigned int i;
  1192. /* In case of an old error then return */
  1193. if (priv->state != CARD_UP)
  1194. return;
  1195. /* We do not check the device every call _poll call,
  1196. since it will slow it down */
  1197. if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
  1198. /* Check device */
  1199. err = mtnic_HEART_BEAT(priv, &dev_link_state);
  1200. if (err) {
  1201. DBG("Device has internal error\n");
  1202. priv->state = CARD_LINK_DOWN;
  1203. return;
  1204. }
  1205. if (!(dev_link_state & 0x3)) {
  1206. DBG("Link down, check cables and restart\n");
  1207. priv->state = CARD_LINK_DOWN;
  1208. return;
  1209. }
  1210. }
  1211. /* Polling CQ */
  1212. for (i = 0; i < NUM_CQS; i++) {
  1213. cq = &priv->cq[i]; //Passing on the 2 cqs.
  1214. if (cq->is_rx) {
  1215. err = mtnic_process_rx_cq(priv, cq->dev, cq);
  1216. if (err) {
  1217. priv->state = CARD_LINK_DOWN;
  1218. DBG(" Error allocating RX buffers\n");
  1219. return;
  1220. }
  1221. } else {
  1222. mtnic_process_tx_cq(priv, cq->dev, cq);
  1223. }
  1224. }
  1225. ++ priv->poll_counter;
  1226. }
  1227. static int
  1228. mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
  1229. {
  1230. struct mtnic_port *priv = netdev_priv(dev);
  1231. struct mtnic_ring *ring;
  1232. struct mtnic_tx_desc *tx_desc;
  1233. struct mtnic_data_seg *data;
  1234. u32 index;
  1235. /* In case of an error then return */
  1236. if (priv->state != CARD_UP)
  1237. return -ENETDOWN;
  1238. ring = &priv->tx_ring;
  1239. index = ring->prod & ring->size_mask;
  1240. if ((ring->prod - ring->cons) >= ring->size) {
  1241. DBG("No space left for descriptors!!! cons: %x prod: %x\n",
  1242. ring->cons, ring->prod);
  1243. mdelay(5);
  1244. return -EAGAIN;/* no space left */
  1245. }
  1246. /* get current descriptor */
  1247. tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
  1248. /* Prepare Data Seg */
  1249. data = &tx_desc->data;
  1250. data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
  1251. data->count = cpu_to_be32(iob_len(iobuf));
  1252. data->mem_type = priv->mtnic->fw.mem_type_snoop_be;
  1253. /* Prepare ctrl segement */
  1254. tx_desc->ctrl.size_vlan = cpu_to_be32(2);
  1255. tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
  1256. MTNIC_BIT_NO_ICRC);
  1257. tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
  1258. ((ring->prod & ring->size) ?
  1259. cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
  1260. /* Attach io_buffer */
  1261. ring->iobuf[index] = iobuf;
  1262. /* Update producer index */
  1263. ++ring->prod;
  1264. /* Ring doorbell! */
  1265. wmb();
  1266. writel((u32) ring->db_offset, &ring->txcq_db->send_db);
  1267. return 0;
  1268. }
  1269. static void
  1270. mtnic_close(struct net_device *dev)
  1271. {
  1272. struct mtnic_port *priv = netdev_priv(dev);
  1273. int err = 0;
  1274. DBG("Close called for port:%d\n", priv->port);
  1275. if ( ( priv->state == CARD_UP ) ||
  1276. ( priv->state == CARD_LINK_DOWN ) ) {
  1277. /* Disable port */
  1278. err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
  1279. /*
  1280. * Stop HW associated with this port
  1281. */
  1282. mdelay(5);
  1283. /* Stop RX */
  1284. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1285. MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
  1286. /* Stop TX */
  1287. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1288. MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
  1289. /* Stop CQs */
  1290. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1291. MTNIC_IF_RESOURCE_TYPE_CQ, 0);
  1292. err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
  1293. MTNIC_IF_RESOURCE_TYPE_CQ, 1);
  1294. if (err) {
  1295. DBG("Close reported error %d\n", err);
  1296. }
  1297. mdelay ( 10 );
  1298. /* free memory */
  1299. free_dma(priv->tx_ring.buf, priv->tx_ring.buf_size);
  1300. iounmap(priv->tx_ring.txcq_db);
  1301. free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
  1302. free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
  1303. free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
  1304. free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
  1305. free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
  1306. free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
  1307. /* Free RX buffers */
  1308. mtnic_free_io_buffers(&priv->rx_ring);
  1309. }
  1310. priv->state = CARD_INITIALIZED;
  1311. }
  1312. static void
  1313. mtnic_disable(struct pci_device *pci)
  1314. {
  1315. int err;
  1316. int i;
  1317. struct mtnic *mtnic = pci_get_drvdata(pci);
  1318. struct net_device *dev;
  1319. struct mtnic_port *priv;
  1320. for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
  1321. dev = mtnic->netdev[i];
  1322. priv = netdev_priv(dev);
  1323. /* Just in case */
  1324. if ( ( priv->state == CARD_UP ) ||
  1325. ( priv->state == CARD_LINK_DOWN ) )
  1326. mtnic_close ( dev );
  1327. }
  1328. /* Releasing EQ */
  1329. priv = netdev_priv ( mtnic->netdev[0] );
  1330. err = mtnic_RELEASE_RESOURCE(priv, 1,
  1331. MTNIC_IF_RESOURCE_TYPE_EQ, 0);
  1332. DBG("Calling MTNIC_CLOSE command\n");
  1333. err |= mtnic_cmd(mtnic, NULL, NULL, 0,
  1334. MTNIC_IF_CMD_CLOSE_NIC);
  1335. if (err) {
  1336. DBG("Error Releasing resources %d\n", err);
  1337. }
  1338. free_dma(mtnic->cmd.buf, PAGE_SIZE);
  1339. iounmap(mtnic->hcr);
  1340. ufree((intptr_t)mtnic->fw.fw_pages.buf);
  1341. ufree((intptr_t)mtnic->fw.extra_pages.buf);
  1342. free_dma(mtnic->eq.buf, mtnic->eq.buf_size);
  1343. iounmap(mtnic->eq_db);
  1344. for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
  1345. dev = mtnic->netdev[i];
  1346. unregister_netdev ( dev );
  1347. netdev_nullify ( dev );
  1348. netdev_put ( dev );
  1349. }
  1350. free ( mtnic );
  1351. mtnic_reset ();
  1352. mdelay ( 1000 );
  1353. /* Restore config, if we would like to retry booting */
  1354. restore_config ();
  1355. }
  1356. static void
  1357. mtnic_irq(struct net_device *netdev __unused, int enable __unused)
  1358. {
  1359. /* Not implemented */
  1360. }
  1361. /** mtnic net device operations */
  1362. static struct net_device_operations mtnic_operations = {
  1363. .open = mtnic_open,
  1364. .close = mtnic_close,
  1365. .transmit = mtnic_transmit,
  1366. .poll = mtnic_poll,
  1367. .irq = mtnic_irq,
  1368. };
  1369. static int
  1370. mtnic_probe(struct pci_device *pci,
  1371. const struct pci_device_id *id __unused)
  1372. {
  1373. struct mtnic_port *priv;
  1374. struct mtnic *mtnic;
  1375. int err;
  1376. u64 mac;
  1377. int port_index;
  1378. adjust_pci_device(pci);
  1379. err = mtnic_init_pci(pci);
  1380. if (err) {
  1381. DBG("Error in pci_init\n");
  1382. return -EIO;
  1383. }
  1384. mtnic_reset();
  1385. mdelay(1000);
  1386. err = restore_config();
  1387. if (err) {
  1388. DBG("Error in restoring config\n");
  1389. return err;
  1390. }
  1391. mtnic = zalloc ( sizeof ( *mtnic ) );
  1392. if ( ! mtnic ) {
  1393. DBG ( "Error Allocating mtnic buffer\n" );
  1394. return -EADDRINUSE;
  1395. }
  1396. pci_set_drvdata(pci, mtnic);
  1397. mtnic->pdev = pci;
  1398. /* Initialize hardware */
  1399. err = mtnic_init_card ( mtnic );
  1400. if (err) {
  1401. DBG("Error in init_card\n");
  1402. goto err_init_card;
  1403. }
  1404. for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
  1405. /* Initializing net device */
  1406. mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) );
  1407. if ( mtnic->netdev[port_index] == NULL ) {
  1408. DBG("Net device allocation failed\n");
  1409. goto err_alloc_mtnic;
  1410. }
  1411. /*
  1412. * Initialize driver private data
  1413. */
  1414. mtnic->netdev[port_index]->dev = &pci->dev;
  1415. priv = netdev_priv ( mtnic->netdev[port_index] );
  1416. memset ( priv, 0, sizeof ( struct mtnic_port ) );
  1417. priv->mtnic = mtnic;
  1418. priv->netdev = mtnic->netdev[port_index];
  1419. /* Attach pci device */
  1420. netdev_init(mtnic->netdev[port_index], &mtnic_operations);
  1421. /* Set port number */
  1422. priv->port = port_index;
  1423. /* Set state */
  1424. priv->state = CARD_DOWN;
  1425. }
  1426. int mac_idx;
  1427. for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
  1428. priv = netdev_priv ( mtnic->netdev[port_index] );
  1429. /* Program the MAC address */
  1430. mac = priv->mtnic->fw.mac[port_index];
  1431. for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) {
  1432. mtnic->netdev[port_index]->hw_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF;
  1433. mac = mac >> 8;
  1434. }
  1435. if ( register_netdev ( mtnic->netdev[port_index] ) ) {
  1436. DBG("Netdev registration failed\n");
  1437. priv->state = CARD_INITIALIZED;
  1438. goto err_alloc_mtnic;
  1439. }
  1440. }
  1441. return 0;
  1442. err_alloc_mtnic:
  1443. free ( mtnic );
  1444. err_init_card:
  1445. return -EIO;
  1446. }
  1447. static struct pci_device_id mtnic_nics[] = {
  1448. PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver", 0 ),
  1449. PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver", 0 ),
  1450. PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver", 0 ),
  1451. PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver", 0 ),
  1452. };
  1453. struct pci_driver mtnic_driver __pci_driver = {
  1454. .ids = mtnic_nics,
  1455. .id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
  1456. .probe = mtnic_probe,
  1457. .remove = mtnic_disable,
  1458. };