You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ena.c 25KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039
  1. /*
  2. * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <string.h>
  26. #include <unistd.h>
  27. #include <errno.h>
  28. #include <byteswap.h>
  29. #include <ipxe/netdevice.h>
  30. #include <ipxe/ethernet.h>
  31. #include <ipxe/if_ether.h>
  32. #include <ipxe/iobuf.h>
  33. #include <ipxe/malloc.h>
  34. #include <ipxe/pci.h>
  35. #include "ena.h"
  36. /** @file
  37. *
  38. * Amazon ENA network driver
  39. *
  40. */
  41. /**
  42. * Get direction name (for debugging)
  43. *
  44. * @v direction Direction
  45. * @ret name Direction name
  46. */
  47. static const char * ena_direction ( unsigned int direction ) {
  48. switch ( direction ) {
  49. case ENA_SQ_TX: return "TX";
  50. case ENA_SQ_RX: return "RX";
  51. default: return "<UNKNOWN>";
  52. }
  53. }
  54. /******************************************************************************
  55. *
  56. * Device reset
  57. *
  58. ******************************************************************************
  59. */
  60. /**
  61. * Reset hardware
  62. *
  63. * @v ena ENA device
  64. * @ret rc Return status code
  65. */
  66. static int ena_reset ( struct ena_nic *ena ) {
  67. uint32_t stat;
  68. unsigned int i;
  69. /* Trigger reset */
  70. writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) );
  71. /* Wait for reset to complete */
  72. for ( i = 0 ; i < ENA_RESET_MAX_WAIT_MS ; i++ ) {
  73. /* Check if device is ready */
  74. stat = readl ( ena->regs + ENA_STAT );
  75. if ( stat & ENA_STAT_READY )
  76. return 0;
  77. /* Delay */
  78. mdelay ( 1 );
  79. }
  80. DBGC ( ena, "ENA %p timed out waiting for reset (status %#08x)\n",
  81. ena, stat );
  82. return -ETIMEDOUT;
  83. }
  84. /******************************************************************************
  85. *
  86. * Admin queue
  87. *
  88. ******************************************************************************
  89. */
  90. /**
  91. * Set queue base address
  92. *
  93. * @v ena ENA device
  94. * @v offset Register offset
  95. * @v address Base address
  96. */
  97. static inline void ena_set_base ( struct ena_nic *ena, unsigned int offset,
  98. void *base ) {
  99. physaddr_t phys = virt_to_bus ( base );
  100. /* Program base address registers */
  101. writel ( ( phys & 0xffffffffUL ),
  102. ( ena->regs + offset + ENA_BASE_LO ) );
  103. if ( sizeof ( phys ) > sizeof ( uint32_t ) ) {
  104. writel ( ( ( ( uint64_t ) phys ) >> 32 ),
  105. ( ena->regs + offset + ENA_BASE_HI ) );
  106. } else {
  107. writel ( 0, ( ena->regs + offset + ENA_BASE_HI ) );
  108. }
  109. }
  110. /**
  111. * Set queue capabilities
  112. *
  113. * @v ena ENA device
  114. * @v offset Register offset
  115. * @v count Number of entries
  116. * @v size Size of each entry
  117. */
  118. static inline __attribute__ (( always_inline )) void
  119. ena_set_caps ( struct ena_nic *ena, unsigned int offset, unsigned int count,
  120. size_t size ) {
  121. /* Program capabilities register */
  122. writel ( ENA_CAPS ( count, size ), ( ena->regs + offset ) );
  123. }
  124. /**
  125. * Clear queue capabilities
  126. *
  127. * @v ena ENA device
  128. * @v offset Register offset
  129. */
  130. static inline __attribute__ (( always_inline )) void
  131. ena_clear_caps ( struct ena_nic *ena, unsigned int offset ) {
  132. /* Clear capabilities register */
  133. writel ( 0, ( ena->regs + offset ) );
  134. }
  135. /**
  136. * Create admin queues
  137. *
  138. * @v ena ENA device
  139. * @ret rc Return status code
  140. */
  141. static int ena_create_admin ( struct ena_nic *ena ) {
  142. size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
  143. size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
  144. int rc;
  145. /* Allocate admin completion queue */
  146. ena->acq.rsp = malloc_dma ( acq_len, acq_len );
  147. if ( ! ena->acq.rsp ) {
  148. rc = -ENOMEM;
  149. goto err_alloc_acq;
  150. }
  151. memset ( ena->acq.rsp, 0, acq_len );
  152. /* Allocate admin queue */
  153. ena->aq.req = malloc_dma ( aq_len, aq_len );
  154. if ( ! ena->aq.req ) {
  155. rc = -ENOMEM;
  156. goto err_alloc_aq;
  157. }
  158. memset ( ena->aq.req, 0, aq_len );
  159. /* Program queue addresses and capabilities */
  160. ena_set_base ( ena, ENA_ACQ_BASE, ena->acq.rsp );
  161. ena_set_caps ( ena, ENA_ACQ_CAPS, ENA_ACQ_COUNT,
  162. sizeof ( ena->acq.rsp[0] ) );
  163. ena_set_base ( ena, ENA_AQ_BASE, ena->aq.req );
  164. ena_set_caps ( ena, ENA_AQ_CAPS, ENA_AQ_COUNT,
  165. sizeof ( ena->aq.req[0] ) );
  166. DBGC ( ena, "ENA %p AQ [%08lx,%08lx) ACQ [%08lx,%08lx)\n",
  167. ena, virt_to_phys ( ena->aq.req ),
  168. ( virt_to_phys ( ena->aq.req ) + aq_len ),
  169. virt_to_phys ( ena->acq.rsp ),
  170. ( virt_to_phys ( ena->acq.rsp ) + acq_len ) );
  171. return 0;
  172. ena_clear_caps ( ena, ENA_AQ_CAPS );
  173. ena_clear_caps ( ena, ENA_ACQ_CAPS );
  174. free_dma ( ena->aq.req, aq_len );
  175. err_alloc_aq:
  176. free_dma ( ena->acq.rsp, acq_len );
  177. err_alloc_acq:
  178. return rc;
  179. }
  180. /**
  181. * Destroy admin queues
  182. *
  183. * @v ena ENA device
  184. */
  185. static void ena_destroy_admin ( struct ena_nic *ena ) {
  186. size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
  187. size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
  188. /* Clear queue capabilities */
  189. ena_clear_caps ( ena, ENA_AQ_CAPS );
  190. ena_clear_caps ( ena, ENA_ACQ_CAPS );
  191. wmb();
  192. /* Free queues */
  193. free_dma ( ena->aq.req, aq_len );
  194. free_dma ( ena->acq.rsp, acq_len );
  195. DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
  196. }
  197. /**
  198. * Get next available admin queue request
  199. *
  200. * @v ena ENA device
  201. * @ret req Admin queue request
  202. */
  203. static union ena_aq_req * ena_admin_req ( struct ena_nic *ena ) {
  204. union ena_aq_req *req;
  205. unsigned int index;
  206. /* Get next request */
  207. index = ( ena->aq.prod % ENA_AQ_COUNT );
  208. req = &ena->aq.req[index];
  209. /* Initialise request */
  210. memset ( ( ( ( void * ) req ) + sizeof ( req->header ) ), 0,
  211. ( sizeof ( *req ) - sizeof ( req->header ) ) );
  212. req->header.id = ena->aq.prod;
  213. /* Increment producer counter */
  214. ena->aq.prod++;
  215. return req;
  216. }
  217. /**
  218. * Issue admin queue request
  219. *
  220. * @v ena ENA device
  221. * @v req Admin queue request
  222. * @v rsp Admin queue response to fill in
  223. * @ret rc Return status code
  224. */
  225. static int ena_admin ( struct ena_nic *ena, union ena_aq_req *req,
  226. union ena_acq_rsp **rsp ) {
  227. unsigned int index;
  228. unsigned int i;
  229. int rc;
  230. /* Locate response */
  231. index = ( ena->acq.cons % ENA_ACQ_COUNT );
  232. *rsp = &ena->acq.rsp[index];
  233. /* Mark request as ready */
  234. req->header.flags ^= ENA_AQ_PHASE;
  235. wmb();
  236. DBGC2 ( ena, "ENA %p admin request %#x:\n",
  237. ena, le16_to_cpu ( req->header.id ) );
  238. DBGC2_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
  239. /* Ring doorbell */
  240. writel ( ena->aq.prod, ( ena->regs + ENA_AQ_DB ) );
  241. /* Wait for response */
  242. for ( i = 0 ; i < ENA_ADMIN_MAX_WAIT_MS ; i++ ) {
  243. /* Check for response */
  244. if ( ( (*rsp)->header.flags ^ ena->acq.phase ) & ENA_ACQ_PHASE){
  245. mdelay ( 1 );
  246. continue;
  247. }
  248. DBGC2 ( ena, "ENA %p admin response %#x:\n",
  249. ena, le16_to_cpu ( (*rsp)->header.id ) );
  250. DBGC2_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ));
  251. /* Increment consumer counter */
  252. ena->acq.cons++;
  253. if ( ( ena->acq.cons % ENA_ACQ_COUNT ) == 0 )
  254. ena->acq.phase ^= ENA_ACQ_PHASE;
  255. /* Check command identifier */
  256. if ( (*rsp)->header.id != req->header.id ) {
  257. DBGC ( ena, "ENA %p admin response %#x mismatch:\n",
  258. ena, le16_to_cpu ( (*rsp)->header.id ) );
  259. rc = -EILSEQ;
  260. goto err;
  261. }
  262. /* Check status */
  263. if ( (*rsp)->header.status != 0 ) {
  264. DBGC ( ena, "ENA %p admin response %#x status %d:\n",
  265. ena, le16_to_cpu ( (*rsp)->header.id ),
  266. (*rsp)->header.status );
  267. rc = -EIO;
  268. goto err;
  269. }
  270. /* Success */
  271. return 0;
  272. }
  273. rc = -ETIMEDOUT;
  274. DBGC ( ena, "ENA %p timed out waiting for admin request %#x:\n",
  275. ena, le16_to_cpu ( req->header.id ) );
  276. err:
  277. DBGC_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
  278. DBGC_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ) );
  279. return rc;
  280. }
  281. /**
  282. * Create submission queue
  283. *
  284. * @v ena ENA device
  285. * @v sq Submission queue
  286. * @v cq Corresponding completion queue
  287. * @ret rc Return status code
  288. */
  289. static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
  290. struct ena_cq *cq ) {
  291. union ena_aq_req *req;
  292. union ena_acq_rsp *rsp;
  293. int rc;
  294. /* Allocate submission queue entries */
  295. sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
  296. if ( ! sq->sqe.raw ) {
  297. rc = -ENOMEM;
  298. goto err_alloc;
  299. }
  300. memset ( sq->sqe.raw, 0, sq->len );
  301. /* Construct request */
  302. req = ena_admin_req ( ena );
  303. req->header.opcode = ENA_CREATE_SQ;
  304. req->create_sq.direction = sq->direction;
  305. req->create_sq.policy = cpu_to_le16 ( ENA_SQ_HOST_MEMORY |
  306. ENA_SQ_CONTIGUOUS );
  307. req->create_sq.cq_id = cpu_to_le16 ( cq->id );
  308. req->create_sq.count = cpu_to_le16 ( sq->count );
  309. req->create_sq.address = cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) );
  310. /* Issue request */
  311. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  312. goto err_admin;
  313. /* Parse response */
  314. sq->id = le16_to_cpu ( rsp->create_sq.id );
  315. sq->doorbell = le32_to_cpu ( rsp->create_sq.doorbell );
  316. /* Reset producer counter and phase */
  317. sq->prod = 0;
  318. sq->phase = ENA_SQE_PHASE;
  319. DBGC ( ena, "ENA %p %s SQ%d at [%08lx,%08lx) db +%04x CQ%d\n",
  320. ena, ena_direction ( sq->direction ), sq->id,
  321. virt_to_phys ( sq->sqe.raw ),
  322. ( virt_to_phys ( sq->sqe.raw ) + sq->len ),
  323. sq->doorbell, cq->id );
  324. return 0;
  325. err_admin:
  326. free_dma ( sq->sqe.raw, sq->len );
  327. err_alloc:
  328. return rc;
  329. }
  330. /**
  331. * Destroy submission queue
  332. *
  333. * @v ena ENA device
  334. * @v sq Submission queue
  335. * @ret rc Return status code
  336. */
  337. static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) {
  338. union ena_aq_req *req;
  339. union ena_acq_rsp *rsp;
  340. int rc;
  341. /* Construct request */
  342. req = ena_admin_req ( ena );
  343. req->header.opcode = ENA_DESTROY_SQ;
  344. req->destroy_sq.id = cpu_to_le16 ( sq->id );
  345. req->destroy_sq.direction = sq->direction;
  346. /* Issue request */
  347. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  348. return rc;
  349. /* Free submission queue entries */
  350. free_dma ( sq->sqe.raw, sq->len );
  351. DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
  352. ena, ena_direction ( sq->direction ), sq->id );
  353. return 0;
  354. }
  355. /**
  356. * Create completion queue
  357. *
  358. * @v ena ENA device
  359. * @v cq Completion queue
  360. * @ret rc Return status code
  361. */
  362. static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
  363. union ena_aq_req *req;
  364. union ena_acq_rsp *rsp;
  365. int rc;
  366. /* Allocate completion queue entries */
  367. cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
  368. if ( ! cq->cqe.raw ) {
  369. rc = -ENOMEM;
  370. goto err_alloc;
  371. }
  372. memset ( cq->cqe.raw, 0, cq->len );
  373. /* Construct request */
  374. req = ena_admin_req ( ena );
  375. req->header.opcode = ENA_CREATE_CQ;
  376. req->create_cq.size = cq->size;
  377. req->create_cq.count = cpu_to_le16 ( cq->requested );
  378. req->create_cq.address = cpu_to_le64 ( virt_to_bus ( cq->cqe.raw ) );
  379. /* Issue request */
  380. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  381. goto err_admin;
  382. /* Parse response */
  383. cq->id = le16_to_cpu ( rsp->create_cq.id );
  384. cq->actual = le16_to_cpu ( rsp->create_cq.count );
  385. cq->doorbell = le32_to_cpu ( rsp->create_cq.doorbell );
  386. cq->mask = ( cq->actual - 1 );
  387. if ( cq->actual != cq->requested ) {
  388. DBGC ( ena, "ENA %p CQ%d requested %d actual %d\n",
  389. ena, cq->id, cq->requested, cq->actual );
  390. }
  391. /* Reset consumer counter and phase */
  392. cq->cons = 0;
  393. cq->phase = ENA_CQE_PHASE;
  394. DBGC ( ena, "ENA %p CQ%d at [%08lx,%08lx) db +%04x\n",
  395. ena, cq->id, virt_to_phys ( cq->cqe.raw ),
  396. ( virt_to_phys ( cq->cqe.raw ) + cq->len ), cq->doorbell );
  397. return 0;
  398. err_admin:
  399. free_dma ( cq->cqe.raw, cq->len );
  400. err_alloc:
  401. return rc;
  402. }
  403. /**
  404. * Destroy completion queue
  405. *
  406. * @v ena ENA device
  407. * @v cq Completion queue
  408. * @ret rc Return status code
  409. */
  410. static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
  411. union ena_aq_req *req;
  412. union ena_acq_rsp *rsp;
  413. int rc;
  414. /* Construct request */
  415. req = ena_admin_req ( ena );
  416. req->header.opcode = ENA_DESTROY_CQ;
  417. req->destroy_cq.id = cpu_to_le16 ( cq->id );
  418. /* Issue request */
  419. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  420. return rc;
  421. /* Free completion queue entries */
  422. free_dma ( cq->cqe.raw, cq->len );
  423. DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
  424. return 0;
  425. }
  426. /**
  427. * Create queue pair
  428. *
  429. * @v ena ENA device
  430. * @v qp Queue pair
  431. * @ret rc Return status code
  432. */
  433. static int ena_create_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
  434. int rc;
  435. /* Create completion queue */
  436. if ( ( rc = ena_create_cq ( ena, &qp->cq ) ) != 0 )
  437. goto err_create_cq;
  438. /* Create submission queue */
  439. if ( ( rc = ena_create_sq ( ena, &qp->sq, &qp->cq ) ) != 0 )
  440. goto err_create_sq;
  441. return 0;
  442. ena_destroy_sq ( ena, &qp->sq );
  443. err_create_sq:
  444. ena_destroy_cq ( ena, &qp->cq );
  445. err_create_cq:
  446. return rc;
  447. }
  448. /**
  449. * Destroy queue pair
  450. *
  451. * @v ena ENA device
  452. * @v qp Queue pair
  453. * @ret rc Return status code
  454. */
  455. static int ena_destroy_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
  456. /* Destroy submission queue */
  457. ena_destroy_sq ( ena, &qp->sq );
  458. /* Destroy completion queue */
  459. ena_destroy_cq ( ena, &qp->cq );
  460. return 0;
  461. }
  462. /**
  463. * Get feature
  464. *
  465. * @v ena ENA device
  466. * @v id Feature identifier
  467. * @v feature Feature to fill in
  468. * @ret rc Return status code
  469. */
  470. static int ena_get_feature ( struct ena_nic *ena, unsigned int id,
  471. union ena_feature **feature ) {
  472. union ena_aq_req *req;
  473. union ena_acq_rsp *rsp;
  474. int rc;
  475. /* Construct request */
  476. req = ena_admin_req ( ena );
  477. req->header.opcode = ENA_GET_FEATURE;
  478. req->get_feature.id = id;
  479. /* Issue request */
  480. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  481. return rc;
  482. /* Parse response */
  483. *feature = &rsp->get_feature.feature;
  484. return 0;
  485. }
  486. /**
  487. * Get device attributes
  488. *
  489. * @v netdev Network device
  490. * @ret rc Return status code
  491. */
  492. static int ena_get_device_attributes ( struct net_device *netdev ) {
  493. struct ena_nic *ena = netdev->priv;
  494. union ena_feature *feature;
  495. int rc;
  496. /* Get device attributes */
  497. if ( ( rc = ena_get_feature ( ena, ENA_DEVICE_ATTRIBUTES,
  498. &feature ) ) != 0 )
  499. return rc;
  500. /* Extract MAC address */
  501. memcpy ( netdev->hw_addr, feature->device.mac, ETH_ALEN );
  502. /* Extract MTU */
  503. netdev->max_pkt_len = le32_to_cpu ( feature->device.mtu );
  504. DBGC ( ena, "ENA %p MAC %s MTU %zd\n",
  505. ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len );
  506. return 0;
  507. }
  508. /**
  509. * Get statistics (for debugging)
  510. *
  511. * @v ena ENA device
  512. * @ret rc Return status code
  513. */
  514. static int ena_get_stats ( struct ena_nic *ena ) {
  515. union ena_aq_req *req;
  516. union ena_acq_rsp *rsp;
  517. struct ena_get_stats_rsp *stats;
  518. int rc;
  519. /* Do nothing unless debug messages are enabled */
  520. if ( ! DBG_LOG )
  521. return 0;
  522. /* Construct request */
  523. req = ena_admin_req ( ena );
  524. req->header.opcode = ENA_GET_STATS;
  525. req->get_stats.type = ENA_STATS_TYPE_BASIC;
  526. req->get_stats.scope = ENA_STATS_SCOPE_ETH;
  527. req->get_stats.device = ENA_DEVICE_MINE;
  528. /* Issue request */
  529. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  530. return rc;
  531. /* Parse response */
  532. stats = &rsp->get_stats;
  533. DBGC ( ena, "ENA %p TX bytes %#llx packets %#llx\n", ena,
  534. ( ( unsigned long long ) le64_to_cpu ( stats->tx_bytes ) ),
  535. ( ( unsigned long long ) le64_to_cpu ( stats->tx_packets ) ) );
  536. DBGC ( ena, "ENA %p RX bytes %#llx packets %#llx drops %#llx\n", ena,
  537. ( ( unsigned long long ) le64_to_cpu ( stats->rx_bytes ) ),
  538. ( ( unsigned long long ) le64_to_cpu ( stats->rx_packets ) ),
  539. ( ( unsigned long long ) le64_to_cpu ( stats->rx_drops ) ) );
  540. return 0;
  541. }
  542. /******************************************************************************
  543. *
  544. * Network device interface
  545. *
  546. ******************************************************************************
  547. */
  548. /**
  549. * Refill receive queue
  550. *
  551. * @v netdev Network device
  552. */
  553. static void ena_refill_rx ( struct net_device *netdev ) {
  554. struct ena_nic *ena = netdev->priv;
  555. struct io_buffer *iobuf;
  556. struct ena_rx_sqe *sqe;
  557. unsigned int index;
  558. physaddr_t address;
  559. size_t len = netdev->max_pkt_len;
  560. unsigned int refilled = 0;
  561. /* Refill queue */
  562. while ( ( ena->rx.sq.prod - ena->rx.cq.cons ) < ENA_RX_COUNT ) {
  563. /* Allocate I/O buffer */
  564. iobuf = alloc_iob ( len );
  565. if ( ! iobuf ) {
  566. /* Wait for next refill */
  567. break;
  568. }
  569. /* Get next submission queue entry */
  570. index = ( ena->rx.sq.prod % ENA_RX_COUNT );
  571. sqe = &ena->rx.sq.sqe.rx[index];
  572. /* Construct submission queue entry */
  573. address = virt_to_bus ( iobuf->data );
  574. sqe->len = cpu_to_le16 ( len );
  575. sqe->id = cpu_to_le16 ( ena->rx.sq.prod );
  576. sqe->address = cpu_to_le64 ( address );
  577. wmb();
  578. sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
  579. ena->rx.sq.phase );
  580. /* Increment producer counter */
  581. ena->rx.sq.prod++;
  582. if ( ( ena->rx.sq.prod % ENA_RX_COUNT ) == 0 )
  583. ena->rx.sq.phase ^= ENA_SQE_PHASE;
  584. /* Record I/O buffer */
  585. assert ( ena->rx_iobuf[index] == NULL );
  586. ena->rx_iobuf[index] = iobuf;
  587. DBGC2 ( ena, "ENA %p RX %d at [%08llx,%08llx)\n", ena, sqe->id,
  588. ( ( unsigned long long ) address ),
  589. ( ( unsigned long long ) address + len ) );
  590. refilled++;
  591. }
  592. /* Ring doorbell, if applicable */
  593. if ( refilled ) {
  594. wmb();
  595. writel ( ena->rx.sq.prod, ( ena->regs + ena->rx.sq.doorbell ) );
  596. }
  597. }
  598. /**
  599. * Discard unused receive I/O buffers
  600. *
  601. * @v ena ENA device
  602. */
  603. static void ena_empty_rx ( struct ena_nic *ena ) {
  604. unsigned int i;
  605. for ( i = 0 ; i < ENA_RX_COUNT ; i++ ) {
  606. if ( ena->rx_iobuf[i] )
  607. free_iob ( ena->rx_iobuf[i] );
  608. ena->rx_iobuf[i] = NULL;
  609. }
  610. }
  611. /**
  612. * Open network device
  613. *
  614. * @v netdev Network device
  615. * @ret rc Return status code
  616. */
  617. static int ena_open ( struct net_device *netdev ) {
  618. struct ena_nic *ena = netdev->priv;
  619. int rc;
  620. /* Create transmit queue pair */
  621. if ( ( rc = ena_create_qp ( ena, &ena->tx ) ) != 0 )
  622. goto err_create_tx;
  623. /* Create receive queue pair */
  624. if ( ( rc = ena_create_qp ( ena, &ena->rx ) ) != 0 )
  625. goto err_create_rx;
  626. /* Refill receive queue */
  627. ena_refill_rx ( netdev );
  628. return 0;
  629. ena_destroy_qp ( ena, &ena->rx );
  630. err_create_rx:
  631. ena_destroy_qp ( ena, &ena->tx );
  632. err_create_tx:
  633. return rc;
  634. }
  635. /**
  636. * Close network device
  637. *
  638. * @v netdev Network device
  639. */
  640. static void ena_close ( struct net_device *netdev ) {
  641. struct ena_nic *ena = netdev->priv;
  642. /* Dump statistics (for debugging) */
  643. ena_get_stats ( ena );
  644. /* Destroy receive queue pair */
  645. ena_destroy_qp ( ena, &ena->rx );
  646. /* Discard any unused receive buffers */
  647. ena_empty_rx ( ena );
  648. /* Destroy transmit queue pair */
  649. ena_destroy_qp ( ena, &ena->tx );
  650. }
  651. /**
  652. * Transmit packet
  653. *
  654. * @v netdev Network device
  655. * @v iobuf I/O buffer
  656. * @ret rc Return status code
  657. */
  658. static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
  659. struct ena_nic *ena = netdev->priv;
  660. struct ena_tx_sqe *sqe;
  661. unsigned int index;
  662. physaddr_t address;
  663. size_t len;
  664. /* Get next submission queue entry */
  665. if ( ( ena->tx.sq.prod - ena->tx.cq.cons ) >= ENA_TX_COUNT ) {
  666. DBGC ( ena, "ENA %p out of transmit descriptors\n", ena );
  667. return -ENOBUFS;
  668. }
  669. index = ( ena->tx.sq.prod % ENA_TX_COUNT );
  670. sqe = &ena->tx.sq.sqe.tx[index];
  671. /* Construct submission queue entry */
  672. address = virt_to_bus ( iobuf->data );
  673. len = iob_len ( iobuf );
  674. sqe->len = cpu_to_le16 ( len );
  675. sqe->id = ena->tx.sq.prod;
  676. sqe->address = cpu_to_le64 ( address );
  677. wmb();
  678. sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
  679. ena->tx.sq.phase );
  680. wmb();
  681. /* Increment producer counter */
  682. ena->tx.sq.prod++;
  683. if ( ( ena->tx.sq.prod % ENA_TX_COUNT ) == 0 )
  684. ena->tx.sq.phase ^= ENA_SQE_PHASE;
  685. /* Ring doorbell */
  686. writel ( ena->tx.sq.prod, ( ena->regs + ena->tx.sq.doorbell ) );
  687. DBGC2 ( ena, "ENA %p TX %d at [%08llx,%08llx)\n", ena, sqe->id,
  688. ( ( unsigned long long ) address ),
  689. ( ( unsigned long long ) address + len ) );
  690. return 0;
  691. }
  692. /**
  693. * Poll for completed transmissions
  694. *
  695. * @v netdev Network device
  696. */
  697. static void ena_poll_tx ( struct net_device *netdev ) {
  698. struct ena_nic *ena = netdev->priv;
  699. struct ena_tx_cqe *cqe;
  700. unsigned int index;
  701. /* Check for completed packets */
  702. while ( ena->tx.cq.cons != ena->tx.sq.prod ) {
  703. /* Get next completion queue entry */
  704. index = ( ena->tx.cq.cons & ena->tx.cq.mask );
  705. cqe = &ena->tx.cq.cqe.tx[index];
  706. /* Stop if completion queue entry is empty */
  707. if ( ( cqe->flags ^ ena->tx.cq.phase ) & ENA_CQE_PHASE )
  708. return;
  709. DBGC2 ( ena, "ENA %p TX %d complete\n", ena,
  710. ( le16_to_cpu ( cqe->id ) >> 2 /* Don't ask */ ) );
  711. /* Increment consumer counter */
  712. ena->tx.cq.cons++;
  713. if ( ! ( ena->tx.cq.cons & ena->tx.cq.mask ) )
  714. ena->tx.cq.phase ^= ENA_CQE_PHASE;
  715. /* Complete transmit */
  716. netdev_tx_complete_next ( netdev );
  717. }
  718. }
  719. /**
  720. * Poll for received packets
  721. *
  722. * @v netdev Network device
  723. */
  724. static void ena_poll_rx ( struct net_device *netdev ) {
  725. struct ena_nic *ena = netdev->priv;
  726. struct ena_rx_cqe *cqe;
  727. struct io_buffer *iobuf;
  728. unsigned int index;
  729. size_t len;
  730. /* Check for received packets */
  731. while ( ena->rx.cq.cons != ena->rx.sq.prod ) {
  732. /* Get next completion queue entry */
  733. index = ( ena->rx.cq.cons % ENA_RX_COUNT );
  734. cqe = &ena->rx.cq.cqe.rx[index];
  735. /* Stop if completion queue entry is empty */
  736. if ( ( cqe->flags ^ ena->rx.cq.phase ) & ENA_CQE_PHASE )
  737. return;
  738. /* Increment consumer counter */
  739. ena->rx.cq.cons++;
  740. if ( ! ( ena->rx.cq.cons & ena->rx.cq.mask ) )
  741. ena->rx.cq.phase ^= ENA_CQE_PHASE;
  742. /* Populate I/O buffer */
  743. iobuf = ena->rx_iobuf[index];
  744. ena->rx_iobuf[index] = NULL;
  745. len = le16_to_cpu ( cqe->len );
  746. iob_put ( iobuf, len );
  747. /* Hand off to network stack */
  748. DBGC2 ( ena, "ENA %p RX %d complete (length %zd)\n",
  749. ena, le16_to_cpu ( cqe->id ), len );
  750. netdev_rx ( netdev, iobuf );
  751. }
  752. }
  753. /**
  754. * Poll for completed and received packets
  755. *
  756. * @v netdev Network device
  757. */
  758. static void ena_poll ( struct net_device *netdev ) {
  759. /* Poll for transmit completions */
  760. ena_poll_tx ( netdev );
  761. /* Poll for receive completions */
  762. ena_poll_rx ( netdev );
  763. /* Refill receive ring */
  764. ena_refill_rx ( netdev );
  765. }
  766. /** ENA network device operations */
  767. static struct net_device_operations ena_operations = {
  768. .open = ena_open,
  769. .close = ena_close,
  770. .transmit = ena_transmit,
  771. .poll = ena_poll,
  772. };
  773. /******************************************************************************
  774. *
  775. * PCI interface
  776. *
  777. ******************************************************************************
  778. */
  779. /**
  780. * Probe PCI device
  781. *
  782. * @v pci PCI device
  783. * @ret rc Return status code
  784. */
  785. static int ena_probe ( struct pci_device *pci ) {
  786. struct net_device *netdev;
  787. struct ena_nic *ena;
  788. int rc;
  789. /* Allocate and initialise net device */
  790. netdev = alloc_etherdev ( sizeof ( *ena ) );
  791. if ( ! netdev ) {
  792. rc = -ENOMEM;
  793. goto err_alloc;
  794. }
  795. netdev_init ( netdev, &ena_operations );
  796. ena = netdev->priv;
  797. pci_set_drvdata ( pci, netdev );
  798. netdev->dev = &pci->dev;
  799. memset ( ena, 0, sizeof ( *ena ) );
  800. ena->acq.phase = ENA_ACQ_PHASE;
  801. ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT,
  802. sizeof ( ena->tx.cq.cqe.tx[0] ) );
  803. ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT,
  804. sizeof ( ena->tx.sq.sqe.tx[0] ) );
  805. ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT,
  806. sizeof ( ena->rx.cq.cqe.rx[0] ) );
  807. ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT,
  808. sizeof ( ena->rx.sq.sqe.rx[0] ) );
  809. /* Fix up PCI device */
  810. adjust_pci_device ( pci );
  811. /* Map registers */
  812. ena->regs = ioremap ( pci->membase, ENA_BAR_SIZE );
  813. if ( ! ena->regs ) {
  814. rc = -ENODEV;
  815. goto err_ioremap;
  816. }
  817. /* Reset the NIC */
  818. if ( ( rc = ena_reset ( ena ) ) != 0 )
  819. goto err_reset;
  820. /* Create admin queues */
  821. if ( ( rc = ena_create_admin ( ena ) ) != 0 )
  822. goto err_create_admin;
  823. /* Fetch MAC address */
  824. if ( ( rc = ena_get_device_attributes ( netdev ) ) != 0 )
  825. goto err_get_device_attributes;
  826. /* Register network device */
  827. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  828. goto err_register_netdev;
  829. /* Mark as link up, since we have no way to test link state on
  830. * this hardware.
  831. */
  832. netdev_link_up ( netdev );
  833. return 0;
  834. unregister_netdev ( netdev );
  835. err_register_netdev:
  836. err_get_device_attributes:
  837. ena_destroy_admin ( ena );
  838. err_create_admin:
  839. ena_reset ( ena );
  840. err_reset:
  841. iounmap ( ena->regs );
  842. err_ioremap:
  843. netdev_nullify ( netdev );
  844. netdev_put ( netdev );
  845. err_alloc:
  846. return rc;
  847. }
  848. /**
  849. * Remove PCI device
  850. *
  851. * @v pci PCI device
  852. */
  853. static void ena_remove ( struct pci_device *pci ) {
  854. struct net_device *netdev = pci_get_drvdata ( pci );
  855. struct ena_nic *ena = netdev->priv;
  856. /* Unregister network device */
  857. unregister_netdev ( netdev );
  858. /* Destroy admin queues */
  859. ena_destroy_admin ( ena );
  860. /* Reset card */
  861. ena_reset ( ena );
  862. /* Free network device */
  863. iounmap ( ena->regs );
  864. netdev_nullify ( netdev );
  865. netdev_put ( netdev );
  866. }
  867. /** ENA PCI device IDs */
  868. static struct pci_device_id ena_nics[] = {
  869. PCI_ROM ( 0x1d0f, 0xec20, "ena-vf", "ENA VF", 0 ),
  870. PCI_ROM ( 0x1d0f, 0xec21, "ena-vf-llq", "ENA VF (LLQ)", 0 ),
  871. };
  872. /** ENA PCI driver */
  873. struct pci_driver ena_driver __pci_driver = {
  874. .ids = ena_nics,
  875. .id_count = ( sizeof ( ena_nics ) / sizeof ( ena_nics[0] ) ),
  876. .probe = ena_probe,
  877. .remove = ena_remove,
  878. };