Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <string.h>
  26. #include <unistd.h>
  27. #include <errno.h>
  28. #include <byteswap.h>
  29. #include <ipxe/netdevice.h>
  30. #include <ipxe/ethernet.h>
  31. #include <ipxe/if_ether.h>
  32. #include <ipxe/iobuf.h>
  33. #include <ipxe/malloc.h>
  34. #include <ipxe/pci.h>
  35. #include "ena.h"
  36. /** @file
  37. *
  38. * Amazon ENA network driver
  39. *
  40. */
  41. /**
  42. * Get direction name (for debugging)
  43. *
  44. * @v direction Direction
  45. * @ret name Direction name
  46. */
  47. static const char * ena_direction ( unsigned int direction ) {
  48. switch ( direction ) {
  49. case ENA_SQ_TX: return "TX";
  50. case ENA_SQ_RX: return "RX";
  51. default: return "<UNKNOWN>";
  52. }
  53. }
  54. /******************************************************************************
  55. *
  56. * Device reset
  57. *
  58. ******************************************************************************
  59. */
  60. /**
  61. * Reset hardware
  62. *
  63. * @v ena ENA device
  64. * @ret rc Return status code
  65. */
  66. static int ena_reset ( struct ena_nic *ena ) {
  67. uint32_t stat;
  68. unsigned int i;
  69. /* Trigger reset */
  70. writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) );
  71. /* Wait for reset to complete */
  72. for ( i = 0 ; i < ENA_RESET_MAX_WAIT_MS ; i++ ) {
  73. /* Check if device is ready */
  74. stat = readl ( ena->regs + ENA_STAT );
  75. if ( stat & ENA_STAT_READY )
  76. return 0;
  77. /* Delay */
  78. mdelay ( 1 );
  79. }
  80. DBGC ( ena, "ENA %p timed out waiting for reset (status %#08x)\n",
  81. ena, stat );
  82. return -ETIMEDOUT;
  83. }
  84. /******************************************************************************
  85. *
  86. * Admin queue
  87. *
  88. ******************************************************************************
  89. */
  90. /**
  91. * Set queue base address
  92. *
  93. * @v ena ENA device
  94. * @v offset Register offset
  95. * @v address Base address
  96. */
  97. static inline void ena_set_base ( struct ena_nic *ena, unsigned int offset,
  98. void *base ) {
  99. physaddr_t phys = virt_to_bus ( base );
  100. /* Program base address registers */
  101. writel ( ( phys & 0xffffffffUL ),
  102. ( ena->regs + offset + ENA_BASE_LO ) );
  103. if ( sizeof ( phys ) > sizeof ( uint32_t ) ) {
  104. writel ( ( ( ( uint64_t ) phys ) >> 32 ),
  105. ( ena->regs + offset + ENA_BASE_HI ) );
  106. } else {
  107. writel ( 0, ( ena->regs + offset + ENA_BASE_HI ) );
  108. }
  109. }
  110. /**
  111. * Set queue capabilities
  112. *
  113. * @v ena ENA device
  114. * @v offset Register offset
  115. * @v count Number of entries
  116. * @v size Size of each entry
  117. */
  118. static inline __attribute__ (( always_inline )) void
  119. ena_set_caps ( struct ena_nic *ena, unsigned int offset, unsigned int count,
  120. size_t size ) {
  121. /* Program capabilities register */
  122. writel ( ENA_CAPS ( count, size ), ( ena->regs + offset ) );
  123. }
  124. /**
  125. * Clear queue capabilities
  126. *
  127. * @v ena ENA device
  128. * @v offset Register offset
  129. */
  130. static inline __attribute__ (( always_inline )) void
  131. ena_clear_caps ( struct ena_nic *ena, unsigned int offset ) {
  132. /* Clear capabilities register */
  133. writel ( 0, ( ena->regs + offset ) );
  134. }
  135. /**
  136. * Create admin queues
  137. *
  138. * @v ena ENA device
  139. * @ret rc Return status code
  140. */
  141. static int ena_create_admin ( struct ena_nic *ena ) {
  142. size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
  143. size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
  144. int rc;
  145. /* Allocate admin completion queue */
  146. ena->acq.rsp = malloc_dma ( acq_len, acq_len );
  147. if ( ! ena->acq.rsp ) {
  148. rc = -ENOMEM;
  149. goto err_alloc_acq;
  150. }
  151. memset ( ena->acq.rsp, 0, acq_len );
  152. /* Allocate admin queue */
  153. ena->aq.req = malloc_dma ( aq_len, aq_len );
  154. if ( ! ena->aq.req ) {
  155. rc = -ENOMEM;
  156. goto err_alloc_aq;
  157. }
  158. memset ( ena->aq.req, 0, aq_len );
  159. /* Program queue addresses and capabilities */
  160. ena_set_base ( ena, ENA_ACQ_BASE, ena->acq.rsp );
  161. ena_set_caps ( ena, ENA_ACQ_CAPS, ENA_ACQ_COUNT,
  162. sizeof ( ena->acq.rsp[0] ) );
  163. ena_set_base ( ena, ENA_AQ_BASE, ena->aq.req );
  164. ena_set_caps ( ena, ENA_AQ_CAPS, ENA_AQ_COUNT,
  165. sizeof ( ena->aq.req[0] ) );
  166. DBGC ( ena, "ENA %p AQ [%08lx,%08lx) ACQ [%08lx,%08lx)\n",
  167. ena, virt_to_phys ( ena->aq.req ),
  168. ( virt_to_phys ( ena->aq.req ) + aq_len ),
  169. virt_to_phys ( ena->acq.rsp ),
  170. ( virt_to_phys ( ena->acq.rsp ) + acq_len ) );
  171. return 0;
  172. ena_clear_caps ( ena, ENA_AQ_CAPS );
  173. ena_clear_caps ( ena, ENA_ACQ_CAPS );
  174. free_dma ( ena->aq.req, aq_len );
  175. err_alloc_aq:
  176. free_dma ( ena->acq.rsp, acq_len );
  177. err_alloc_acq:
  178. return rc;
  179. }
  180. /**
  181. * Destroy admin queues
  182. *
  183. * @v ena ENA device
  184. */
  185. static void ena_destroy_admin ( struct ena_nic *ena ) {
  186. size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
  187. size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
  188. /* Clear queue capabilities */
  189. ena_clear_caps ( ena, ENA_AQ_CAPS );
  190. ena_clear_caps ( ena, ENA_ACQ_CAPS );
  191. wmb();
  192. /* Free queues */
  193. free_dma ( ena->aq.req, aq_len );
  194. free_dma ( ena->acq.rsp, acq_len );
  195. DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
  196. }
  197. /**
  198. * Get next available admin queue request
  199. *
  200. * @v ena ENA device
  201. * @ret req Admin queue request
  202. */
  203. static union ena_aq_req * ena_admin_req ( struct ena_nic *ena ) {
  204. union ena_aq_req *req;
  205. unsigned int index;
  206. /* Get next request */
  207. index = ( ena->aq.prod % ENA_AQ_COUNT );
  208. req = &ena->aq.req[index];
  209. /* Initialise request */
  210. memset ( ( ( ( void * ) req ) + sizeof ( req->header ) ), 0,
  211. ( sizeof ( *req ) - sizeof ( req->header ) ) );
  212. req->header.id = ena->aq.prod;
  213. /* Increment producer counter */
  214. ena->aq.prod++;
  215. return req;
  216. }
  217. /**
  218. * Issue admin queue request
  219. *
  220. * @v ena ENA device
  221. * @v req Admin queue request
  222. * @v rsp Admin queue response to fill in
  223. * @ret rc Return status code
  224. */
  225. static int ena_admin ( struct ena_nic *ena, union ena_aq_req *req,
  226. union ena_acq_rsp **rsp ) {
  227. unsigned int index;
  228. unsigned int i;
  229. int rc;
  230. /* Locate response */
  231. index = ( ena->acq.cons % ENA_ACQ_COUNT );
  232. *rsp = &ena->acq.rsp[index];
  233. /* Mark request as ready */
  234. req->header.flags ^= ENA_AQ_PHASE;
  235. wmb();
  236. DBGC2 ( ena, "ENA %p admin request %#x:\n",
  237. ena, le16_to_cpu ( req->header.id ) );
  238. DBGC2_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
  239. /* Ring doorbell */
  240. writel ( ena->aq.prod, ( ena->regs + ENA_AQ_DB ) );
  241. /* Wait for response */
  242. for ( i = 0 ; i < ENA_ADMIN_MAX_WAIT_MS ; i++ ) {
  243. /* Check for response */
  244. if ( ( (*rsp)->header.flags ^ ena->acq.phase ) & ENA_ACQ_PHASE){
  245. mdelay ( 1 );
  246. continue;
  247. }
  248. DBGC2 ( ena, "ENA %p admin response %#x:\n",
  249. ena, le16_to_cpu ( (*rsp)->header.id ) );
  250. DBGC2_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ));
  251. /* Increment consumer counter */
  252. ena->acq.cons++;
  253. if ( ( ena->acq.cons % ENA_ACQ_COUNT ) == 0 )
  254. ena->acq.phase ^= ENA_ACQ_PHASE;
  255. /* Check command identifier */
  256. if ( (*rsp)->header.id != req->header.id ) {
  257. DBGC ( ena, "ENA %p admin response %#x mismatch:\n",
  258. ena, le16_to_cpu ( (*rsp)->header.id ) );
  259. rc = -EILSEQ;
  260. goto err;
  261. }
  262. /* Check status */
  263. if ( (*rsp)->header.status != 0 ) {
  264. DBGC ( ena, "ENA %p admin response %#x status %d:\n",
  265. ena, le16_to_cpu ( (*rsp)->header.id ),
  266. (*rsp)->header.status );
  267. rc = -EIO;
  268. goto err;
  269. }
  270. /* Success */
  271. return 0;
  272. }
  273. rc = -ETIMEDOUT;
  274. DBGC ( ena, "ENA %p timed out waiting for admin request %#x:\n",
  275. ena, le16_to_cpu ( req->header.id ) );
  276. err:
  277. DBGC_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
  278. DBGC_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ) );
  279. return rc;
  280. }
  281. /**
  282. * Create submission queue
  283. *
  284. * @v ena ENA device
  285. * @v sq Submission queue
  286. * @v cq Corresponding completion queue
  287. * @ret rc Return status code
  288. */
  289. static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
  290. struct ena_cq *cq ) {
  291. union ena_aq_req *req;
  292. union ena_acq_rsp *rsp;
  293. int rc;
  294. /* Allocate submission queue entries */
  295. sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
  296. if ( ! sq->sqe.raw ) {
  297. rc = -ENOMEM;
  298. goto err_alloc;
  299. }
  300. memset ( sq->sqe.raw, 0, sq->len );
  301. /* Construct request */
  302. req = ena_admin_req ( ena );
  303. req->header.opcode = ENA_CREATE_SQ;
  304. req->create_sq.direction = sq->direction;
  305. req->create_sq.policy = cpu_to_le16 ( ENA_SQ_HOST_MEMORY |
  306. ENA_SQ_CONTIGUOUS );
  307. req->create_sq.cq_id = cpu_to_le16 ( cq->id );
  308. req->create_sq.count = cpu_to_le16 ( sq->count );
  309. req->create_sq.address = cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) );
  310. /* Issue request */
  311. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  312. goto err_admin;
  313. /* Parse response */
  314. sq->id = le16_to_cpu ( rsp->create_sq.id );
  315. sq->doorbell = le32_to_cpu ( rsp->create_sq.doorbell );
  316. /* Reset producer counter and phase */
  317. sq->prod = 0;
  318. sq->phase = ENA_SQE_PHASE;
  319. DBGC ( ena, "ENA %p %s SQ%d at [%08lx,%08lx) db +%04x CQ%d\n",
  320. ena, ena_direction ( sq->direction ), sq->id,
  321. virt_to_phys ( sq->sqe.raw ),
  322. ( virt_to_phys ( sq->sqe.raw ) + sq->len ),
  323. sq->doorbell, cq->id );
  324. return 0;
  325. err_admin:
  326. free_dma ( sq->sqe.raw, sq->len );
  327. err_alloc:
  328. return rc;
  329. }
  330. /**
  331. * Destroy submission queue
  332. *
  333. * @v ena ENA device
  334. * @v sq Submission queue
  335. * @ret rc Return status code
  336. */
  337. static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) {
  338. union ena_aq_req *req;
  339. union ena_acq_rsp *rsp;
  340. int rc;
  341. /* Construct request */
  342. req = ena_admin_req ( ena );
  343. req->header.opcode = ENA_DESTROY_SQ;
  344. req->destroy_sq.id = cpu_to_le16 ( sq->id );
  345. req->destroy_sq.direction = sq->direction;
  346. /* Issue request */
  347. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  348. return rc;
  349. /* Free submission queue entries */
  350. free_dma ( sq->sqe.raw, sq->len );
  351. DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
  352. ena, ena_direction ( sq->direction ), sq->id );
  353. return 0;
  354. }
  355. /**
  356. * Create completion queue
  357. *
  358. * @v ena ENA device
  359. * @v cq Completion queue
  360. * @ret rc Return status code
  361. */
  362. static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
  363. union ena_aq_req *req;
  364. union ena_acq_rsp *rsp;
  365. int rc;
  366. /* Allocate completion queue entries */
  367. cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
  368. if ( ! cq->cqe.raw ) {
  369. rc = -ENOMEM;
  370. goto err_alloc;
  371. }
  372. memset ( cq->cqe.raw, 0, cq->len );
  373. /* Construct request */
  374. req = ena_admin_req ( ena );
  375. req->header.opcode = ENA_CREATE_CQ;
  376. req->create_cq.size = cq->size;
  377. req->create_cq.count = cpu_to_le16 ( cq->requested );
  378. req->create_cq.address = cpu_to_le64 ( virt_to_bus ( cq->cqe.raw ) );
  379. /* Issue request */
  380. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  381. goto err_admin;
  382. /* Parse response */
  383. cq->id = le16_to_cpu ( rsp->create_cq.id );
  384. cq->actual = le16_to_cpu ( rsp->create_cq.count );
  385. cq->doorbell = le32_to_cpu ( rsp->create_cq.doorbell );
  386. cq->mask = ( cq->actual - 1 );
  387. if ( cq->actual != cq->requested ) {
  388. DBGC ( ena, "ENA %p CQ%d requested %d actual %d\n",
  389. ena, cq->id, cq->requested, cq->actual );
  390. }
  391. /* Reset consumer counter and phase */
  392. cq->cons = 0;
  393. cq->phase = ENA_CQE_PHASE;
  394. DBGC ( ena, "ENA %p CQ%d at [%08lx,%08lx) db +%04x\n",
  395. ena, cq->id, virt_to_phys ( cq->cqe.raw ),
  396. ( virt_to_phys ( cq->cqe.raw ) + cq->len ), cq->doorbell );
  397. return 0;
  398. err_admin:
  399. free_dma ( cq->cqe.raw, cq->len );
  400. err_alloc:
  401. return rc;
  402. }
  403. /**
  404. * Destroy completion queue
  405. *
  406. * @v ena ENA device
  407. * @v cq Completion queue
  408. * @ret rc Return status code
  409. */
  410. static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
  411. union ena_aq_req *req;
  412. union ena_acq_rsp *rsp;
  413. int rc;
  414. /* Construct request */
  415. req = ena_admin_req ( ena );
  416. req->header.opcode = ENA_DESTROY_CQ;
  417. req->destroy_cq.id = cpu_to_le16 ( cq->id );
  418. /* Issue request */
  419. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  420. return rc;
  421. /* Free completion queue entries */
  422. free_dma ( cq->cqe.raw, cq->len );
  423. DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
  424. return 0;
  425. }
  426. /**
  427. * Create queue pair
  428. *
  429. * @v ena ENA device
  430. * @v qp Queue pair
  431. * @ret rc Return status code
  432. */
  433. static int ena_create_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
  434. int rc;
  435. /* Create completion queue */
  436. if ( ( rc = ena_create_cq ( ena, &qp->cq ) ) != 0 )
  437. goto err_create_cq;
  438. /* Create submission queue */
  439. if ( ( rc = ena_create_sq ( ena, &qp->sq, &qp->cq ) ) != 0 )
  440. goto err_create_sq;
  441. return 0;
  442. ena_destroy_sq ( ena, &qp->sq );
  443. err_create_sq:
  444. ena_destroy_cq ( ena, &qp->cq );
  445. err_create_cq:
  446. return rc;
  447. }
  448. /**
  449. * Destroy queue pair
  450. *
  451. * @v ena ENA device
  452. * @v qp Queue pair
  453. * @ret rc Return status code
  454. */
  455. static int ena_destroy_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
  456. /* Destroy submission queue */
  457. ena_destroy_sq ( ena, &qp->sq );
  458. /* Destroy completion queue */
  459. ena_destroy_cq ( ena, &qp->cq );
  460. return 0;
  461. }
  462. /**
  463. * Get device attributes
  464. *
  465. * @v netdev Network device
  466. * @ret rc Return status code
  467. */
  468. static int ena_get_device_attributes ( struct net_device *netdev ) {
  469. struct ena_nic *ena = netdev->priv;
  470. union ena_aq_req *req;
  471. union ena_acq_rsp *rsp;
  472. union ena_feature *feature;
  473. int rc;
  474. /* Construct request */
  475. req = ena_admin_req ( ena );
  476. req->header.opcode = ENA_GET_FEATURE;
  477. req->get_feature.id = ENA_DEVICE_ATTRIBUTES;
  478. /* Issue request */
  479. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  480. return rc;
  481. /* Parse response */
  482. feature = &rsp->get_feature.feature;
  483. memcpy ( netdev->hw_addr, feature->device.mac, ETH_ALEN );
  484. netdev->max_pkt_len = le32_to_cpu ( feature->device.mtu );
  485. DBGC ( ena, "ENA %p MAC %s MTU %zd\n",
  486. ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len );
  487. return 0;
  488. }
  489. /**
  490. * Get statistics (for debugging)
  491. *
  492. * @v ena ENA device
  493. * @ret rc Return status code
  494. */
  495. static int ena_get_stats ( struct ena_nic *ena ) {
  496. union ena_aq_req *req;
  497. union ena_acq_rsp *rsp;
  498. struct ena_get_stats_rsp *stats;
  499. int rc;
  500. /* Do nothing unless debug messages are enabled */
  501. if ( ! DBG_LOG )
  502. return 0;
  503. /* Construct request */
  504. req = ena_admin_req ( ena );
  505. req->header.opcode = ENA_GET_STATS;
  506. req->get_stats.type = ENA_STATS_TYPE_BASIC;
  507. req->get_stats.scope = ENA_STATS_SCOPE_ETH;
  508. req->get_stats.device = ENA_DEVICE_MINE;
  509. /* Issue request */
  510. if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
  511. return rc;
  512. /* Parse response */
  513. stats = &rsp->get_stats;
  514. DBGC ( ena, "ENA %p TX bytes %#llx packets %#llx\n", ena,
  515. ( ( unsigned long long ) le64_to_cpu ( stats->tx_bytes ) ),
  516. ( ( unsigned long long ) le64_to_cpu ( stats->tx_packets ) ) );
  517. DBGC ( ena, "ENA %p RX bytes %#llx packets %#llx drops %#llx\n", ena,
  518. ( ( unsigned long long ) le64_to_cpu ( stats->rx_bytes ) ),
  519. ( ( unsigned long long ) le64_to_cpu ( stats->rx_packets ) ),
  520. ( ( unsigned long long ) le64_to_cpu ( stats->rx_drops ) ) );
  521. return 0;
  522. }
  523. /******************************************************************************
  524. *
  525. * Network device interface
  526. *
  527. ******************************************************************************
  528. */
  529. /**
  530. * Refill receive queue
  531. *
  532. * @v netdev Network device
  533. */
  534. static void ena_refill_rx ( struct net_device *netdev ) {
  535. struct ena_nic *ena = netdev->priv;
  536. struct io_buffer *iobuf;
  537. struct ena_rx_sqe *sqe;
  538. unsigned int index;
  539. physaddr_t address;
  540. size_t len = netdev->max_pkt_len;
  541. unsigned int refilled = 0;
  542. /* Refill queue */
  543. while ( ( ena->rx.sq.prod - ena->rx.cq.cons ) < ENA_RX_COUNT ) {
  544. /* Allocate I/O buffer */
  545. iobuf = alloc_iob ( len );
  546. if ( ! iobuf ) {
  547. /* Wait for next refill */
  548. break;
  549. }
  550. /* Get next submission queue entry */
  551. index = ( ena->rx.sq.prod % ENA_RX_COUNT );
  552. sqe = &ena->rx.sq.sqe.rx[index];
  553. /* Construct submission queue entry */
  554. address = virt_to_bus ( iobuf->data );
  555. sqe->len = cpu_to_le16 ( len );
  556. sqe->id = cpu_to_le16 ( ena->rx.sq.prod );
  557. sqe->address = cpu_to_le64 ( address );
  558. wmb();
  559. sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
  560. ena->rx.sq.phase );
  561. /* Increment producer counter */
  562. ena->rx.sq.prod++;
  563. if ( ( ena->rx.sq.prod % ENA_RX_COUNT ) == 0 )
  564. ena->rx.sq.phase ^= ENA_SQE_PHASE;
  565. /* Record I/O buffer */
  566. assert ( ena->rx_iobuf[index] == NULL );
  567. ena->rx_iobuf[index] = iobuf;
  568. DBGC2 ( ena, "ENA %p RX %d at [%08llx,%08llx)\n", ena, sqe->id,
  569. ( ( unsigned long long ) address ),
  570. ( ( unsigned long long ) address + len ) );
  571. refilled++;
  572. }
  573. /* Ring doorbell, if applicable */
  574. if ( refilled ) {
  575. wmb();
  576. writel ( ena->rx.sq.prod, ( ena->regs + ena->rx.sq.doorbell ) );
  577. }
  578. }
  579. /**
  580. * Discard unused receive I/O buffers
  581. *
  582. * @v ena ENA device
  583. */
  584. static void ena_empty_rx ( struct ena_nic *ena ) {
  585. unsigned int i;
  586. for ( i = 0 ; i < ENA_RX_COUNT ; i++ ) {
  587. if ( ena->rx_iobuf[i] )
  588. free_iob ( ena->rx_iobuf[i] );
  589. ena->rx_iobuf[i] = NULL;
  590. }
  591. }
  592. /**
  593. * Open network device
  594. *
  595. * @v netdev Network device
  596. * @ret rc Return status code
  597. */
  598. static int ena_open ( struct net_device *netdev ) {
  599. struct ena_nic *ena = netdev->priv;
  600. int rc;
  601. /* Create transmit queue pair */
  602. if ( ( rc = ena_create_qp ( ena, &ena->tx ) ) != 0 )
  603. goto err_create_tx;
  604. /* Create receive queue pair */
  605. if ( ( rc = ena_create_qp ( ena, &ena->rx ) ) != 0 )
  606. goto err_create_rx;
  607. /* Refill receive queue */
  608. ena_refill_rx ( netdev );
  609. return 0;
  610. ena_destroy_qp ( ena, &ena->rx );
  611. err_create_rx:
  612. ena_destroy_qp ( ena, &ena->tx );
  613. err_create_tx:
  614. return rc;
  615. }
  616. /**
  617. * Close network device
  618. *
  619. * @v netdev Network device
  620. */
  621. static void ena_close ( struct net_device *netdev ) {
  622. struct ena_nic *ena = netdev->priv;
  623. /* Dump statistics (for debugging) */
  624. ena_get_stats ( ena );
  625. /* Destroy receive queue pair */
  626. ena_destroy_qp ( ena, &ena->rx );
  627. /* Discard any unused receive buffers */
  628. ena_empty_rx ( ena );
  629. /* Destroy transmit queue pair */
  630. ena_destroy_qp ( ena, &ena->tx );
  631. }
  632. /**
  633. * Transmit packet
  634. *
  635. * @v netdev Network device
  636. * @v iobuf I/O buffer
  637. * @ret rc Return status code
  638. */
  639. static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
  640. struct ena_nic *ena = netdev->priv;
  641. struct ena_tx_sqe *sqe;
  642. unsigned int index;
  643. physaddr_t address;
  644. size_t len;
  645. /* Get next submission queue entry */
  646. if ( ( ena->tx.sq.prod - ena->tx.cq.cons ) >= ENA_TX_COUNT ) {
  647. DBGC ( ena, "ENA %p out of transmit descriptors\n", ena );
  648. return -ENOBUFS;
  649. }
  650. index = ( ena->tx.sq.prod % ENA_TX_COUNT );
  651. sqe = &ena->tx.sq.sqe.tx[index];
  652. /* Construct submission queue entry */
  653. address = virt_to_bus ( iobuf->data );
  654. len = iob_len ( iobuf );
  655. sqe->len = cpu_to_le16 ( len );
  656. sqe->id = ena->tx.sq.prod;
  657. sqe->address = cpu_to_le64 ( address );
  658. wmb();
  659. sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
  660. ena->tx.sq.phase );
  661. wmb();
  662. /* Increment producer counter */
  663. ena->tx.sq.prod++;
  664. if ( ( ena->tx.sq.prod % ENA_TX_COUNT ) == 0 )
  665. ena->tx.sq.phase ^= ENA_SQE_PHASE;
  666. /* Ring doorbell */
  667. writel ( ena->tx.sq.prod, ( ena->regs + ena->tx.sq.doorbell ) );
  668. DBGC2 ( ena, "ENA %p TX %d at [%08llx,%08llx)\n", ena, sqe->id,
  669. ( ( unsigned long long ) address ),
  670. ( ( unsigned long long ) address + len ) );
  671. return 0;
  672. }
  673. /**
  674. * Poll for completed transmissions
  675. *
  676. * @v netdev Network device
  677. */
  678. static void ena_poll_tx ( struct net_device *netdev ) {
  679. struct ena_nic *ena = netdev->priv;
  680. struct ena_tx_cqe *cqe;
  681. unsigned int index;
  682. /* Check for completed packets */
  683. while ( ena->tx.cq.cons != ena->tx.sq.prod ) {
  684. /* Get next completion queue entry */
  685. index = ( ena->tx.cq.cons & ena->tx.cq.mask );
  686. cqe = &ena->tx.cq.cqe.tx[index];
  687. /* Stop if completion queue entry is empty */
  688. if ( ( cqe->flags ^ ena->tx.cq.phase ) & ENA_CQE_PHASE )
  689. return;
  690. DBGC2 ( ena, "ENA %p TX %d complete\n", ena,
  691. ( le16_to_cpu ( cqe->id ) >> 2 /* Don't ask */ ) );
  692. /* Increment consumer counter */
  693. ena->tx.cq.cons++;
  694. if ( ! ( ena->tx.cq.cons & ena->tx.cq.mask ) )
  695. ena->tx.cq.phase ^= ENA_CQE_PHASE;
  696. /* Complete transmit */
  697. netdev_tx_complete_next ( netdev );
  698. }
  699. }
  700. /**
  701. * Poll for received packets
  702. *
  703. * @v netdev Network device
  704. */
  705. static void ena_poll_rx ( struct net_device *netdev ) {
  706. struct ena_nic *ena = netdev->priv;
  707. struct ena_rx_cqe *cqe;
  708. struct io_buffer *iobuf;
  709. unsigned int index;
  710. size_t len;
  711. /* Check for received packets */
  712. while ( ena->rx.cq.cons != ena->rx.sq.prod ) {
  713. /* Get next completion queue entry */
  714. index = ( ena->rx.cq.cons % ENA_RX_COUNT );
  715. cqe = &ena->rx.cq.cqe.rx[index];
  716. /* Stop if completion queue entry is empty */
  717. if ( ( cqe->flags ^ ena->rx.cq.phase ) & ENA_CQE_PHASE )
  718. return;
  719. /* Increment consumer counter */
  720. ena->rx.cq.cons++;
  721. if ( ! ( ena->rx.cq.cons & ena->rx.cq.mask ) )
  722. ena->rx.cq.phase ^= ENA_CQE_PHASE;
  723. /* Populate I/O buffer */
  724. iobuf = ena->rx_iobuf[index];
  725. ena->rx_iobuf[index] = NULL;
  726. len = le16_to_cpu ( cqe->len );
  727. iob_put ( iobuf, len );
  728. /* Hand off to network stack */
  729. DBGC2 ( ena, "ENA %p RX %d complete (length %zd)\n",
  730. ena, le16_to_cpu ( cqe->id ), len );
  731. netdev_rx ( netdev, iobuf );
  732. }
  733. }
  734. /**
  735. * Poll for completed and received packets
  736. *
  737. * @v netdev Network device
  738. */
  739. static void ena_poll ( struct net_device *netdev ) {
  740. /* Poll for transmit completions */
  741. ena_poll_tx ( netdev );
  742. /* Poll for receive completions */
  743. ena_poll_rx ( netdev );
  744. /* Refill receive ring */
  745. ena_refill_rx ( netdev );
  746. }
  747. /** ENA network device operations */
  748. static struct net_device_operations ena_operations = {
  749. .open = ena_open,
  750. .close = ena_close,
  751. .transmit = ena_transmit,
  752. .poll = ena_poll,
  753. };
  754. /******************************************************************************
  755. *
  756. * PCI interface
  757. *
  758. ******************************************************************************
  759. */
  760. /**
  761. * Probe PCI device
  762. *
  763. * @v pci PCI device
  764. * @ret rc Return status code
  765. */
  766. static int ena_probe ( struct pci_device *pci ) {
  767. struct net_device *netdev;
  768. struct ena_nic *ena;
  769. int rc;
  770. /* Allocate and initialise net device */
  771. netdev = alloc_etherdev ( sizeof ( *ena ) );
  772. if ( ! netdev ) {
  773. rc = -ENOMEM;
  774. goto err_alloc;
  775. }
  776. netdev_init ( netdev, &ena_operations );
  777. ena = netdev->priv;
  778. pci_set_drvdata ( pci, netdev );
  779. netdev->dev = &pci->dev;
  780. memset ( ena, 0, sizeof ( *ena ) );
  781. ena->acq.phase = ENA_ACQ_PHASE;
  782. ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT,
  783. sizeof ( ena->tx.cq.cqe.tx[0] ) );
  784. ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT,
  785. sizeof ( ena->tx.sq.sqe.tx[0] ) );
  786. ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT,
  787. sizeof ( ena->rx.cq.cqe.rx[0] ) );
  788. ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT,
  789. sizeof ( ena->rx.sq.sqe.rx[0] ) );
  790. /* Fix up PCI device */
  791. adjust_pci_device ( pci );
  792. /* Map registers */
  793. ena->regs = ioremap ( pci->membase, ENA_BAR_SIZE );
  794. if ( ! ena->regs ) {
  795. rc = -ENODEV;
  796. goto err_ioremap;
  797. }
  798. /* Reset the NIC */
  799. if ( ( rc = ena_reset ( ena ) ) != 0 )
  800. goto err_reset;
  801. /* Create admin queues */
  802. if ( ( rc = ena_create_admin ( ena ) ) != 0 )
  803. goto err_create_admin;
  804. /* Fetch MAC address */
  805. if ( ( rc = ena_get_device_attributes ( netdev ) ) != 0 )
  806. goto err_get_device_attributes;
  807. /* Register network device */
  808. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  809. goto err_register_netdev;
  810. /* Mark as link up, since we have no way to test link state on
  811. * this hardware.
  812. */
  813. netdev_link_up ( netdev );
  814. return 0;
  815. unregister_netdev ( netdev );
  816. err_register_netdev:
  817. err_get_device_attributes:
  818. ena_destroy_admin ( ena );
  819. err_create_admin:
  820. ena_reset ( ena );
  821. err_reset:
  822. iounmap ( ena->regs );
  823. err_ioremap:
  824. netdev_nullify ( netdev );
  825. netdev_put ( netdev );
  826. err_alloc:
  827. return rc;
  828. }
  829. /**
  830. * Remove PCI device
  831. *
  832. * @v pci PCI device
  833. */
  834. static void ena_remove ( struct pci_device *pci ) {
  835. struct net_device *netdev = pci_get_drvdata ( pci );
  836. struct ena_nic *ena = netdev->priv;
  837. /* Unregister network device */
  838. unregister_netdev ( netdev );
  839. /* Destroy admin queues */
  840. ena_destroy_admin ( ena );
  841. /* Reset card */
  842. ena_reset ( ena );
  843. /* Free network device */
  844. iounmap ( ena->regs );
  845. netdev_nullify ( netdev );
  846. netdev_put ( netdev );
  847. }
  848. /** ENA PCI device IDs */
  849. static struct pci_device_id ena_nics[] = {
  850. PCI_ROM ( 0x1d0f, 0xec20, "ena-vf", "ENA VF", 0 ),
  851. PCI_ROM ( 0x1d0f, 0xec21, "ena-vf-llq", "ENA VF (LLQ)", 0 ),
  852. };
  853. /** ENA PCI driver */
  854. struct pci_driver ena_driver __pci_driver = {
  855. .ids = ena_nics,
  856. .id_count = ( sizeof ( ena_nics ) / sizeof ( ena_nics[0] ) ),
  857. .probe = ena_probe,
  858. .remove = ena_remove,
  859. };