You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

thunderx.c 46KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716
  1. /*
  2. * Copyright (C) 2016 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <string.h>
  26. #include <strings.h>
  27. #include <stdio.h>
  28. #include <unistd.h>
  29. #include <errno.h>
  30. #include <assert.h>
  31. #include <byteswap.h>
  32. #include <ipxe/netdevice.h>
  33. #include <ipxe/ethernet.h>
  34. #include <ipxe/if_ether.h>
  35. #include <ipxe/iobuf.h>
  36. #include <ipxe/malloc.h>
  37. #include <ipxe/pci.h>
  38. #include <ipxe/pciea.h>
  39. #include <ipxe/umalloc.h>
  40. #include "thunderx.h"
  41. #include "thunderxcfg.h"
  42. /** @file
  43. *
  44. * Cavium ThunderX Ethernet driver
  45. *
  46. */
  47. /** List of BGX Ethernet interfaces */
  48. static LIST_HEAD ( txnic_bgxs );
  49. /** List of physical functions */
  50. static LIST_HEAD ( txnic_pfs );
  51. /** Debug colour for physical function and BGX messages */
  52. #define TXNICCOL(x) ( &txnic_pfs + (x)->node )
  53. /** Board configuration protocol */
  54. static EFI_THUNDER_CONFIG_PROTOCOL *txcfg;
  55. EFI_REQUEST_PROTOCOL ( EFI_THUNDER_CONFIG_PROTOCOL, &txcfg );
  56. /******************************************************************************
  57. *
  58. * Diagnostics
  59. *
  60. ******************************************************************************
  61. */
  62. /**
  63. * Show virtual NIC diagnostics (for debugging)
  64. *
  65. * @v vnic Virtual NIC
  66. */
  67. static __attribute__ (( unused )) void txnic_diag ( struct txnic *vnic ) {
  68. DBGC ( vnic, "TXNIC %s SQ %05zx(%05llx)/%05zx(%05llx) %08llx\n",
  69. vnic->name,
  70. ( ( vnic->sq.prod % TXNIC_SQES ) * TXNIC_SQ_STRIDE ),
  71. readq ( vnic->regs + TXNIC_QS_SQ_TAIL(0) ),
  72. ( ( vnic->sq.cons % TXNIC_SQES ) * TXNIC_SQ_STRIDE ),
  73. readq ( vnic->regs + TXNIC_QS_SQ_HEAD(0) ),
  74. readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) ) );
  75. DBGC ( vnic, "TXNIC %s RQ %05zx(%05llx)/%05zx(%05llx) %016llx\n",
  76. vnic->name,
  77. ( ( vnic->rq.prod % TXNIC_RQES ) * TXNIC_RQ_STRIDE ),
  78. readq ( vnic->regs + TXNIC_QS_RBDR_TAIL(0) ),
  79. ( ( vnic->rq.cons % TXNIC_RQES ) * TXNIC_RQ_STRIDE ),
  80. readq ( vnic->regs + TXNIC_QS_RBDR_HEAD(0) ),
  81. readq ( vnic->regs + TXNIC_QS_RBDR_STATUS0(0) ) );
  82. DBGC ( vnic, "TXNIC %s CQ xxxxx(%05llx)/%05x(%05llx) %08llx:%08llx\n",
  83. vnic->name, readq ( vnic->regs + TXNIC_QS_CQ_TAIL(0) ),
  84. ( ( vnic->cq.cons % TXNIC_CQES ) * TXNIC_CQ_STRIDE ),
  85. readq ( vnic->regs + TXNIC_QS_CQ_HEAD(0) ),
  86. readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) ),
  87. readq ( vnic->regs + TXNIC_QS_CQ_STATUS2(0) ) );
  88. }
  89. /******************************************************************************
  90. *
  91. * Send queue
  92. *
  93. ******************************************************************************
  94. */
  95. /**
  96. * Create send queue
  97. *
  98. * @v vnic Virtual NIC
  99. * @ret rc Return status code
  100. */
  101. static int txnic_create_sq ( struct txnic *vnic ) {
  102. /* Reset send queue */
  103. vnic->sq.prod = 0;
  104. vnic->sq.cons = 0;
  105. writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
  106. /* Configure and enable send queue */
  107. writeq ( user_to_phys ( vnic->sq.sqe, 0 ),
  108. ( vnic->regs + TXNIC_QS_SQ_BASE(0) ) );
  109. writeq ( ( TXNIC_QS_SQ_CFG_ENA | TXNIC_QS_SQ_CFG_QSIZE_1K ),
  110. ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
  111. DBGC ( vnic, "TXNIC %s SQ at [%08lx,%08lx)\n",
  112. vnic->name, user_to_phys ( vnic->sq.sqe, 0 ),
  113. user_to_phys ( vnic->sq.sqe, TXNIC_SQ_SIZE ) );
  114. return 0;
  115. }
  116. /**
  117. * Disable send queue
  118. *
  119. * @v vnic Virtual NIC
  120. * @ret rc Return status code
  121. */
  122. static int txnic_disable_sq ( struct txnic *vnic ) {
  123. uint64_t status;
  124. unsigned int i;
  125. /* Disable send queue */
  126. writeq ( 0, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
  127. /* Wait for send queue to be stopped */
  128. for ( i = 0 ; i < TXNIC_SQ_STOP_MAX_WAIT_MS ; i++ ) {
  129. /* Check if send queue is stopped */
  130. status = readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) );
  131. if ( status & TXNIC_QS_SQ_STATUS_STOPPED )
  132. return 0;
  133. /* Delay */
  134. mdelay ( 1 );
  135. }
  136. DBGC ( vnic, "TXNIC %s SQ disable timed out\n", vnic->name );
  137. return -ETIMEDOUT;
  138. }
  139. /**
  140. * Destroy send queue
  141. *
  142. * @v vnic Virtual NIC
  143. */
  144. static void txnic_destroy_sq ( struct txnic *vnic ) {
  145. int rc;
  146. /* Disable send queue */
  147. if ( ( rc = txnic_disable_sq ( vnic ) ) != 0 ) {
  148. /* Nothing else we can do */
  149. return;
  150. }
  151. /* Reset send queue */
  152. writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
  153. }
  154. /**
  155. * Send packet
  156. *
  157. * @v vnic Virtual NIC
  158. * @v iobuf I/O buffer
  159. * @ret rc Return status code
  160. */
  161. static int txnic_send ( struct txnic *vnic, struct io_buffer *iobuf ) {
  162. struct txnic_sqe sqe;
  163. unsigned int sq_idx;
  164. size_t offset;
  165. size_t len;
  166. /* Get next send queue entry */
  167. if ( ( vnic->sq.prod - vnic->sq.cons ) >= TXNIC_SQ_FILL ) {
  168. DBGC ( vnic, "TXNIC %s out of send queue entries\n",
  169. vnic->name );
  170. return -ENOBUFS;
  171. }
  172. sq_idx = ( vnic->sq.prod++ % TXNIC_SQES );
  173. offset = ( sq_idx * TXNIC_SQ_STRIDE );
  174. /* Populate send descriptor */
  175. len = iob_len ( iobuf );
  176. memset ( &sqe, 0, sizeof ( sqe ) );
  177. sqe.hdr.total = cpu_to_le32 ( ( len >= ETH_ZLEN ) ? len : ETH_ZLEN );
  178. sqe.hdr.subdcnt = ( TXNIC_SQE_SUBDESCS - 1 );
  179. sqe.hdr.flags = TXNIC_SEND_HDR_FLAGS;
  180. sqe.gather.size = cpu_to_le16 ( len );
  181. sqe.gather.flags = TXNIC_SEND_GATHER_FLAGS;
  182. sqe.gather.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  183. DBGC2 ( vnic, "TXNIC %s SQE %#03x is [%08lx,%08lx)\n",
  184. vnic->name, sq_idx, virt_to_bus ( iobuf->data ),
  185. ( virt_to_bus ( iobuf->data ) + len ) );
  186. /* Copy send descriptor to ring */
  187. copy_to_user ( vnic->sq.sqe, offset, &sqe, sizeof ( sqe ) );
  188. /* Ring doorbell */
  189. wmb();
  190. writeq ( TXNIC_SQE_SUBDESCS, ( vnic->regs + TXNIC_QS_SQ_DOOR(0) ) );
  191. return 0;
  192. }
  193. /**
  194. * Complete send queue entry
  195. *
  196. * @v vnic Virtual NIC
  197. * @v cqe Send completion queue entry
  198. */
  199. static void txnic_complete_sqe ( struct txnic *vnic,
  200. struct txnic_cqe_send *cqe ) {
  201. struct net_device *netdev = vnic->netdev;
  202. unsigned int sq_idx;
  203. unsigned int status;
  204. /* Parse completion */
  205. sq_idx = ( le16_to_cpu ( cqe->sqe_ptr ) / TXNIC_SQE_SUBDESCS );
  206. status = cqe->send_status;
  207. /* Sanity check */
  208. assert ( sq_idx == ( vnic->sq.cons % TXNIC_SQES ) );
  209. /* Free send queue entry */
  210. vnic->sq.cons++;
  211. /* Complete transmission */
  212. if ( status ) {
  213. DBGC ( vnic, "TXNIC %s SQE %#03x complete (status %#02x)\n",
  214. vnic->name, sq_idx, status );
  215. netdev_tx_complete_next_err ( netdev, -EIO );
  216. } else {
  217. DBGC2 ( vnic, "TXNIC %s SQE %#03x complete\n",
  218. vnic->name, sq_idx );
  219. netdev_tx_complete_next ( netdev );
  220. }
  221. }
  222. /******************************************************************************
  223. *
  224. * Receive queue
  225. *
  226. ******************************************************************************
  227. */
  228. /**
  229. * Create receive queue
  230. *
  231. * @v vnic Virtual NIC
  232. * @ret rc Return status code
  233. */
  234. static int txnic_create_rq ( struct txnic *vnic ) {
  235. /* Reset receive buffer descriptor ring */
  236. vnic->rq.prod = 0;
  237. vnic->rq.cons = 0;
  238. writeq ( TXNIC_QS_RBDR_CFG_RESET,
  239. ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
  240. /* Configure and enable receive buffer descriptor ring */
  241. writeq ( user_to_phys ( vnic->rq.rqe, 0 ),
  242. ( vnic->regs + TXNIC_QS_RBDR_BASE(0) ) );
  243. writeq ( ( TXNIC_QS_RBDR_CFG_ENA | TXNIC_QS_RBDR_CFG_QSIZE_8K |
  244. TXNIC_QS_RBDR_CFG_LINES ( TXNIC_RQE_SIZE /
  245. TXNIC_LINE_SIZE ) ),
  246. ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
  247. /* Enable receive queue */
  248. writeq ( TXNIC_QS_RQ_CFG_ENA, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) );
  249. DBGC ( vnic, "TXNIC %s RQ at [%08lx,%08lx)\n",
  250. vnic->name, user_to_phys ( vnic->rq.rqe, 0 ),
  251. user_to_phys ( vnic->rq.rqe, TXNIC_RQ_SIZE ) );
  252. return 0;
  253. }
  254. /**
  255. * Disable receive queue
  256. *
  257. * @v vnic Virtual NIC
  258. * @ret rc Return status code
  259. */
  260. static int txnic_disable_rq ( struct txnic *vnic ) {
  261. uint64_t cfg;
  262. unsigned int i;
  263. /* Disable receive queue */
  264. writeq ( 0, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) );
  265. /* Wait for receive queue to be disabled */
  266. for ( i = 0 ; i < TXNIC_RQ_DISABLE_MAX_WAIT_MS ; i++ ) {
  267. /* Check if receive queue is disabled */
  268. cfg = readq ( vnic->regs + TXNIC_QS_RQ_CFG(0) );
  269. if ( ! ( cfg & TXNIC_QS_RQ_CFG_ENA ) )
  270. return 0;
  271. /* Delay */
  272. mdelay ( 1 );
  273. }
  274. DBGC ( vnic, "TXNIC %s RQ disable timed out\n", vnic->name );
  275. return -ETIMEDOUT;
  276. }
  277. /**
  278. * Destroy receive queue
  279. *
  280. * @v vnic Virtual NIC
  281. */
  282. static void txnic_destroy_rq ( struct txnic *vnic ) {
  283. unsigned int i;
  284. int rc;
  285. /* Disable receive queue */
  286. if ( ( rc = txnic_disable_rq ( vnic ) ) != 0 ) {
  287. /* Leak memory; there's nothing else we can do */
  288. return;
  289. }
  290. /* Disable receive buffer descriptor ring */
  291. writeq ( 0, ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
  292. /* Reset receive buffer descriptor ring */
  293. writeq ( TXNIC_QS_RBDR_CFG_RESET,
  294. ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
  295. /* Free any unused I/O buffers */
  296. for ( i = 0 ; i < TXNIC_RQ_FILL ; i++ ) {
  297. if ( vnic->rq.iobuf[i] )
  298. free_iob ( vnic->rq.iobuf[i] );
  299. vnic->rq.iobuf[i] = NULL;
  300. }
  301. }
  302. /**
  303. * Refill receive queue
  304. *
  305. * @v vnic Virtual NIC
  306. */
  307. static void txnic_refill_rq ( struct txnic *vnic ) {
  308. struct io_buffer *iobuf;
  309. struct txnic_rqe rqe;
  310. unsigned int rq_idx;
  311. unsigned int rq_iobuf_idx;
  312. unsigned int refilled = 0;
  313. size_t offset;
  314. /* Refill ring */
  315. while ( ( vnic->rq.prod - vnic->rq.cons ) < TXNIC_RQ_FILL ) {
  316. /* Allocate I/O buffer */
  317. iobuf = alloc_iob ( TXNIC_RQE_SIZE );
  318. if ( ! iobuf ) {
  319. /* Wait for next refill */
  320. break;
  321. }
  322. /* Get next receive descriptor */
  323. rq_idx = ( vnic->rq.prod++ % TXNIC_RQES );
  324. offset = ( rq_idx * TXNIC_RQ_STRIDE );
  325. /* Populate receive descriptor */
  326. rqe.rbdre.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  327. DBGC2 ( vnic, "TXNIC %s RQE %#03x is [%08lx,%08lx)\n",
  328. vnic->name, rq_idx, virt_to_bus ( iobuf->data ),
  329. ( virt_to_bus ( iobuf->data ) + TXNIC_RQE_SIZE ) );
  330. /* Copy receive descriptor to ring */
  331. copy_to_user ( vnic->rq.rqe, offset, &rqe, sizeof ( rqe ) );
  332. refilled++;
  333. /* Record I/O buffer */
  334. rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL );
  335. assert ( vnic->rq.iobuf[rq_iobuf_idx] == NULL );
  336. vnic->rq.iobuf[rq_iobuf_idx] = iobuf;
  337. }
  338. /* Ring doorbell */
  339. wmb();
  340. writeq ( refilled, ( vnic->regs + TXNIC_QS_RBDR_DOOR(0) ) );
  341. }
  342. /**
  343. * Complete receive queue entry
  344. *
  345. * @v vnic Virtual NIC
  346. * @v cqe Receive completion queue entry
  347. */
  348. static void txnic_complete_rqe ( struct txnic *vnic,
  349. struct txnic_cqe_rx *cqe ) {
  350. struct net_device *netdev = vnic->netdev;
  351. struct io_buffer *iobuf;
  352. unsigned int errop;
  353. unsigned int rq_idx;
  354. unsigned int rq_iobuf_idx;
  355. size_t apad_len;
  356. size_t len;
  357. /* Parse completion */
  358. errop = cqe->errop;
  359. apad_len = TXNIC_CQE_RX_APAD_LEN ( cqe->apad );
  360. len = le16_to_cpu ( cqe->len );
  361. /* Get next receive I/O buffer */
  362. rq_idx = ( vnic->rq.cons++ % TXNIC_RQES );
  363. rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL );
  364. iobuf = vnic->rq.iobuf[rq_iobuf_idx];
  365. vnic->rq.iobuf[rq_iobuf_idx] = NULL;
  366. /* Populate I/O buffer */
  367. iob_reserve ( iobuf, apad_len );
  368. iob_put ( iobuf, len );
  369. /* Hand off to network stack */
  370. if ( errop ) {
  371. DBGC ( vnic, "TXNIC %s RQE %#03x error (length %zd, errop "
  372. "%#02x)\n", vnic->name, rq_idx, len, errop );
  373. netdev_rx_err ( netdev, iobuf, -EIO );
  374. } else {
  375. DBGC2 ( vnic, "TXNIC %s RQE %#03x complete (length %zd)\n",
  376. vnic->name, rq_idx, len );
  377. netdev_rx ( netdev, iobuf );
  378. }
  379. }
  380. /******************************************************************************
  381. *
  382. * Completion queue
  383. *
  384. ******************************************************************************
  385. */
  386. /**
  387. * Create completion queue
  388. *
  389. * @v vnic Virtual NIC
  390. * @ret rc Return status code
  391. */
  392. static int txnic_create_cq ( struct txnic *vnic ) {
  393. /* Reset completion queue */
  394. vnic->cq.cons = 0;
  395. writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
  396. /* Configure and enable completion queue */
  397. writeq ( user_to_phys ( vnic->cq.cqe, 0 ),
  398. ( vnic->regs + TXNIC_QS_CQ_BASE(0) ) );
  399. writeq ( ( TXNIC_QS_CQ_CFG_ENA | TXNIC_QS_CQ_CFG_QSIZE_256 ),
  400. ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
  401. DBGC ( vnic, "TXNIC %s CQ at [%08lx,%08lx)\n",
  402. vnic->name, user_to_phys ( vnic->cq.cqe, 0 ),
  403. user_to_phys ( vnic->cq.cqe, TXNIC_CQ_SIZE ) );
  404. return 0;
  405. }
  406. /**
  407. * Disable completion queue
  408. *
  409. * @v vnic Virtual NIC
  410. * @ret rc Return status code
  411. */
  412. static int txnic_disable_cq ( struct txnic *vnic ) {
  413. uint64_t cfg;
  414. unsigned int i;
  415. /* Disable completion queue */
  416. writeq ( 0, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
  417. /* Wait for completion queue to be disabled */
  418. for ( i = 0 ; i < TXNIC_CQ_DISABLE_MAX_WAIT_MS ; i++ ) {
  419. /* Check if completion queue is disabled */
  420. cfg = readq ( vnic->regs + TXNIC_QS_CQ_CFG(0) );
  421. if ( ! ( cfg & TXNIC_QS_CQ_CFG_ENA ) )
  422. return 0;
  423. /* Delay */
  424. mdelay ( 1 );
  425. }
  426. DBGC ( vnic, "TXNIC %s CQ disable timed out\n", vnic->name );
  427. return -ETIMEDOUT;
  428. }
  429. /**
  430. * Destroy completion queue
  431. *
  432. * @v vnic Virtual NIC
  433. */
  434. static void txnic_destroy_cq ( struct txnic *vnic ) {
  435. int rc;
  436. /* Disable completion queue */
  437. if ( ( rc = txnic_disable_cq ( vnic ) ) != 0 ) {
  438. /* Leak memory; there's nothing else we can do */
  439. return;
  440. }
  441. /* Reset completion queue */
  442. writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
  443. }
  444. /**
  445. * Poll completion queue
  446. *
  447. * @v vnic Virtual NIC
  448. */
  449. static void txnic_poll_cq ( struct txnic *vnic ) {
  450. union txnic_cqe cqe;
  451. uint64_t status;
  452. size_t offset;
  453. unsigned int qcount;
  454. unsigned int cq_idx;
  455. unsigned int i;
  456. /* Get number of completions */
  457. status = readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) );
  458. qcount = TXNIC_QS_CQ_STATUS_QCOUNT ( status );
  459. if ( ! qcount )
  460. return;
  461. /* Process completion queue entries */
  462. for ( i = 0 ; i < qcount ; i++ ) {
  463. /* Get completion queue entry */
  464. cq_idx = ( vnic->cq.cons++ % TXNIC_CQES );
  465. offset = ( cq_idx * TXNIC_CQ_STRIDE );
  466. copy_from_user ( &cqe, vnic->cq.cqe, offset, sizeof ( cqe ) );
  467. /* Process completion queue entry */
  468. switch ( cqe.common.cqe_type ) {
  469. case TXNIC_CQE_TYPE_SEND:
  470. txnic_complete_sqe ( vnic, &cqe.send );
  471. break;
  472. case TXNIC_CQE_TYPE_RX:
  473. txnic_complete_rqe ( vnic, &cqe.rx );
  474. break;
  475. default:
  476. DBGC ( vnic, "TXNIC %s unknown completion type %d\n",
  477. vnic->name, cqe.common.cqe_type );
  478. DBGC_HDA ( vnic, user_to_phys ( vnic->cq.cqe, offset ),
  479. &cqe, sizeof ( cqe ) );
  480. break;
  481. }
  482. }
  483. /* Ring doorbell */
  484. writeq ( qcount, ( vnic->regs + TXNIC_QS_CQ_DOOR(0) ) );
  485. }
  486. /******************************************************************************
  487. *
  488. * Virtual NIC
  489. *
  490. ******************************************************************************
  491. */
  492. /**
  493. * Open virtual NIC
  494. *
  495. * @v vnic Virtual NIC
  496. * @ret rc Return status code
  497. */
  498. static int txnic_open ( struct txnic *vnic ) {
  499. int rc;
  500. /* Create completion queue */
  501. if ( ( rc = txnic_create_cq ( vnic ) ) != 0 )
  502. goto err_create_cq;
  503. /* Create send queue */
  504. if ( ( rc = txnic_create_sq ( vnic ) ) != 0 )
  505. goto err_create_sq;
  506. /* Create receive queue */
  507. if ( ( rc = txnic_create_rq ( vnic ) ) != 0 )
  508. goto err_create_rq;
  509. /* Refill receive queue */
  510. txnic_refill_rq ( vnic );
  511. return 0;
  512. txnic_destroy_rq ( vnic );
  513. err_create_rq:
  514. txnic_destroy_sq ( vnic );
  515. err_create_sq:
  516. txnic_destroy_cq ( vnic );
  517. err_create_cq:
  518. return rc;
  519. }
  520. /**
  521. * Close virtual NIC
  522. *
  523. * @v vnic Virtual NIC
  524. */
  525. static void txnic_close ( struct txnic *vnic ) {
  526. /* Destroy receive queue */
  527. txnic_destroy_rq ( vnic );
  528. /* Destroy send queue */
  529. txnic_destroy_sq ( vnic );
  530. /* Destroy completion queue */
  531. txnic_destroy_cq ( vnic );
  532. }
  533. /**
  534. * Poll virtual NIC
  535. *
  536. * @v vnic Virtual NIC
  537. */
  538. static void txnic_poll ( struct txnic *vnic ) {
  539. /* Poll completion queue */
  540. txnic_poll_cq ( vnic );
  541. /* Refill receive queue */
  542. txnic_refill_rq ( vnic );
  543. }
  544. /**
  545. * Allocate virtual NIC
  546. *
  547. * @v dev Underlying device
  548. * @v membase Register base address
  549. * @ret vnic Virtual NIC, or NULL on failure
  550. */
  551. static struct txnic * txnic_alloc ( struct device *dev,
  552. unsigned long membase ) {
  553. struct net_device *netdev;
  554. struct txnic *vnic;
  555. /* Allocate network device */
  556. netdev = alloc_etherdev ( sizeof ( *vnic ) );
  557. if ( ! netdev )
  558. goto err_alloc_netdev;
  559. netdev->dev = dev;
  560. vnic = netdev->priv;
  561. vnic->netdev = netdev;
  562. vnic->name = dev->name;
  563. /* Allow caller to reuse netdev->priv. (The generic virtual
  564. * NIC code never assumes that netdev->priv==vnic.)
  565. */
  566. netdev->priv = NULL;
  567. /* Allocate completion queue */
  568. vnic->cq.cqe = umalloc ( TXNIC_CQ_SIZE );
  569. if ( ! vnic->cq.cqe )
  570. goto err_alloc_cq;
  571. /* Allocate send queue */
  572. vnic->sq.sqe = umalloc ( TXNIC_SQ_SIZE );
  573. if ( ! vnic->sq.sqe )
  574. goto err_alloc_sq;
  575. /* Allocate receive queue */
  576. vnic->rq.rqe = umalloc ( TXNIC_RQ_SIZE );
  577. if ( ! vnic->rq.rqe )
  578. goto err_alloc_rq;
  579. /* Map registers */
  580. vnic->regs = ioremap ( membase, TXNIC_VF_BAR_SIZE );
  581. if ( ! vnic->regs )
  582. goto err_ioremap;
  583. return vnic;
  584. iounmap ( vnic->regs );
  585. err_ioremap:
  586. ufree ( vnic->rq.rqe );
  587. err_alloc_rq:
  588. ufree ( vnic->sq.sqe );
  589. err_alloc_sq:
  590. ufree ( vnic->cq.cqe );
  591. err_alloc_cq:
  592. netdev_nullify ( netdev );
  593. netdev_put ( netdev );
  594. err_alloc_netdev:
  595. return NULL;
  596. }
  597. /**
  598. * Free virtual NIC
  599. *
  600. * @v vnic Virtual NIC
  601. */
  602. static void txnic_free ( struct txnic *vnic ) {
  603. struct net_device *netdev = vnic->netdev;
  604. /* Unmap registers */
  605. iounmap ( vnic->regs );
  606. /* Free receive queue */
  607. ufree ( vnic->rq.rqe );
  608. /* Free send queue */
  609. ufree ( vnic->sq.sqe );
  610. /* Free completion queue */
  611. ufree ( vnic->cq.cqe );
  612. /* Free network device */
  613. netdev_nullify ( netdev );
  614. netdev_put ( netdev );
  615. }
  616. /******************************************************************************
  617. *
  618. * Logical MAC virtual NICs
  619. *
  620. ******************************************************************************
  621. */
  622. /**
  623. * Show LMAC diagnostics (for debugging)
  624. *
  625. * @v lmac Logical MAC
  626. */
  627. static __attribute__ (( unused )) void
  628. txnic_lmac_diag ( struct txnic_lmac *lmac ) {
  629. struct txnic *vnic = lmac->vnic;
  630. uint64_t status1;
  631. uint64_t status2;
  632. uint64_t br_status1;
  633. uint64_t br_status2;
  634. uint64_t br_algn_status;
  635. uint64_t br_pmd_status;
  636. uint64_t an_status;
  637. /* Read status (clearing latching bits) */
  638. writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) );
  639. writeq ( BGX_SPU_STATUS2_RCVFLT, ( lmac->regs + BGX_SPU_STATUS2 ) );
  640. status1 = readq ( lmac->regs + BGX_SPU_STATUS1 );
  641. status2 = readq ( lmac->regs + BGX_SPU_STATUS2 );
  642. DBGC ( vnic, "TXNIC %s SPU %02llx:%04llx%s%s%s\n",
  643. vnic->name, status1, status2,
  644. ( ( status1 & BGX_SPU_STATUS1_FLT ) ? " FLT" : "" ),
  645. ( ( status1 & BGX_SPU_STATUS1_RCV_LNK ) ? " RCV_LNK" : "" ),
  646. ( ( status2 & BGX_SPU_STATUS2_RCVFLT ) ? " RCVFLT" : "" ) );
  647. /* Read BASE-R status (clearing latching bits) */
  648. writeq ( ( BGX_SPU_BR_STATUS2_LATCHED_LOCK |
  649. BGX_SPU_BR_STATUS2_LATCHED_BER ),
  650. ( lmac->regs + BGX_SPU_BR_STATUS2 ) );
  651. br_status1 = readq ( lmac->regs + BGX_SPU_BR_STATUS1 );
  652. br_status2 = readq ( lmac->regs + BGX_SPU_BR_STATUS2 );
  653. DBGC ( vnic, "TXNIC %s BR %04llx:%04llx%s%s%s%s%s\n",
  654. vnic->name, br_status2, br_status2,
  655. ( ( br_status1 & BGX_SPU_BR_STATUS1_RCV_LNK ) ? " RCV_LNK" : ""),
  656. ( ( br_status1 & BGX_SPU_BR_STATUS1_HI_BER ) ? " HI_BER" : "" ),
  657. ( ( br_status1 & BGX_SPU_BR_STATUS1_BLK_LOCK ) ?
  658. " BLK_LOCK" : "" ),
  659. ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_LOCK ) ?
  660. " LATCHED_LOCK" : "" ),
  661. ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_BER ) ?
  662. " LATCHED_BER" : "" ) );
  663. /* Read BASE-R alignment status */
  664. br_algn_status = readq ( lmac->regs + BGX_SPU_BR_ALGN_STATUS );
  665. DBGC ( vnic, "TXNIC %s BR ALGN %016llx%s\n", vnic->name, br_algn_status,
  666. ( ( br_algn_status & BGX_SPU_BR_ALGN_STATUS_ALIGND ) ?
  667. " ALIGND" : "" ) );
  668. /* Read BASE-R link training status */
  669. br_pmd_status = readq ( lmac->regs + BGX_SPU_BR_PMD_STATUS );
  670. DBGC ( vnic, "TXNIC %s BR PMD %04llx\n", vnic->name, br_pmd_status );
  671. /* Read autonegotiation status (clearing latching bits) */
  672. writeq ( ( BGX_SPU_AN_STATUS_PAGE_RX | BGX_SPU_AN_STATUS_LINK_STATUS ),
  673. ( lmac->regs + BGX_SPU_AN_STATUS ) );
  674. an_status = readq ( lmac->regs + BGX_SPU_AN_STATUS );
  675. DBGC ( vnic, "TXNIC %s BR AN %04llx%s%s%s%s%s\n", vnic->name, an_status,
  676. ( ( an_status & BGX_SPU_AN_STATUS_XNP_STAT ) ? " XNP_STAT" : ""),
  677. ( ( an_status & BGX_SPU_AN_STATUS_PAGE_RX ) ? " PAGE_RX" : "" ),
  678. ( ( an_status & BGX_SPU_AN_STATUS_AN_COMPLETE ) ?
  679. " AN_COMPLETE" : "" ),
  680. ( ( an_status & BGX_SPU_AN_STATUS_LINK_STATUS ) ?
  681. " LINK_STATUS" : "" ),
  682. ( ( an_status & BGX_SPU_AN_STATUS_LP_AN_ABLE ) ?
  683. " LP_AN_ABLE" : "" ) );
  684. /* Read transmit statistics */
  685. DBGC ( vnic, "TXNIC %s TXF xc %#llx xd %#llx mc %#llx sc %#llx ok "
  686. "%#llx bc %#llx mc %#llx un %#llx pa %#llx\n", vnic->name,
  687. readq ( lmac->regs + BGX_CMR_TX_STAT0 ),
  688. readq ( lmac->regs + BGX_CMR_TX_STAT1 ),
  689. readq ( lmac->regs + BGX_CMR_TX_STAT2 ),
  690. readq ( lmac->regs + BGX_CMR_TX_STAT3 ),
  691. readq ( lmac->regs + BGX_CMR_TX_STAT5 ),
  692. readq ( lmac->regs + BGX_CMR_TX_STAT14 ),
  693. readq ( lmac->regs + BGX_CMR_TX_STAT15 ),
  694. readq ( lmac->regs + BGX_CMR_TX_STAT16 ),
  695. readq ( lmac->regs + BGX_CMR_TX_STAT17 ) );
  696. DBGC ( vnic, "TXNIC %s TXB ok %#llx hist %#llx:%#llx:%#llx:%#llx:"
  697. "%#llx:%#llx:%#llx:%#llx\n", vnic->name,
  698. readq ( lmac->regs + BGX_CMR_TX_STAT4 ),
  699. readq ( lmac->regs + BGX_CMR_TX_STAT6 ),
  700. readq ( lmac->regs + BGX_CMR_TX_STAT7 ),
  701. readq ( lmac->regs + BGX_CMR_TX_STAT8 ),
  702. readq ( lmac->regs + BGX_CMR_TX_STAT9 ),
  703. readq ( lmac->regs + BGX_CMR_TX_STAT10 ),
  704. readq ( lmac->regs + BGX_CMR_TX_STAT11 ),
  705. readq ( lmac->regs + BGX_CMR_TX_STAT12 ),
  706. readq ( lmac->regs + BGX_CMR_TX_STAT13 ) );
  707. /* Read receive statistics */
  708. DBGC ( vnic, "TXNIC %s RXF ok %#llx pa %#llx nm %#llx ov %#llx er "
  709. "%#llx nc %#llx\n", vnic->name,
  710. readq ( lmac->regs + BGX_CMR_RX_STAT0 ),
  711. readq ( lmac->regs + BGX_CMR_RX_STAT2 ),
  712. readq ( lmac->regs + BGX_CMR_RX_STAT4 ),
  713. readq ( lmac->regs + BGX_CMR_RX_STAT6 ),
  714. readq ( lmac->regs + BGX_CMR_RX_STAT8 ),
  715. readq ( lmac->regs + BGX_CMR_RX_STAT9 ) );
  716. DBGC ( vnic, "TXNIC %s RXB ok %#llx pa %#llx nm %#llx ov %#llx nc "
  717. "%#llx\n", vnic->name,
  718. readq ( lmac->regs + BGX_CMR_RX_STAT1 ),
  719. readq ( lmac->regs + BGX_CMR_RX_STAT3 ),
  720. readq ( lmac->regs + BGX_CMR_RX_STAT5 ),
  721. readq ( lmac->regs + BGX_CMR_RX_STAT7 ),
  722. readq ( lmac->regs + BGX_CMR_RX_STAT10 ) );
  723. }
  724. /**
  725. * Update LMAC link state
  726. *
  727. * @v lmac Logical MAC
  728. */
  729. static void txnic_lmac_update_link ( struct txnic_lmac *lmac ) {
  730. struct txnic *vnic = lmac->vnic;
  731. struct net_device *netdev = vnic->netdev;
  732. uint64_t status1;
  733. /* Read status (clearing latching bits) */
  734. writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) );
  735. status1 = readq ( lmac->regs + BGX_SPU_STATUS1 );
  736. /* Report link status */
  737. if ( status1 & BGX_SPU_STATUS1_RCV_LNK ) {
  738. netdev_link_up ( netdev );
  739. } else {
  740. netdev_link_down ( netdev );
  741. }
  742. }
  743. /**
  744. * Poll LMAC link state
  745. *
  746. * @v lmac Logical MAC
  747. */
  748. static void txnic_lmac_poll_link ( struct txnic_lmac *lmac ) {
  749. struct txnic *vnic = lmac->vnic;
  750. uint64_t intr;
  751. /* Get interrupt status */
  752. intr = readq ( lmac->regs + BGX_SPU_INT );
  753. if ( ! intr )
  754. return;
  755. DBGC ( vnic, "TXNIC %s INT %04llx%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
  756. vnic->name, intr,
  757. ( ( intr & BGX_SPU_INT_TRAINING_FAIL ) ? " TRAINING_FAIL" : "" ),
  758. ( ( intr & BGX_SPU_INT_TRAINING_DONE ) ? " TRAINING_DONE" : "" ),
  759. ( ( intr & BGX_SPU_INT_AN_COMPLETE ) ? " AN_COMPLETE" : "" ),
  760. ( ( intr & BGX_SPU_INT_AN_LINK_GOOD ) ? " AN_LINK_GOOD" : "" ),
  761. ( ( intr & BGX_SPU_INT_AN_PAGE_RX ) ? " AN_PAGE_RX" : "" ),
  762. ( ( intr & BGX_SPU_INT_FEC_UNCORR ) ? " FEC_UNCORR" : "" ),
  763. ( ( intr & BGX_SPU_INT_FEC_CORR ) ? " FEC_CORR" : "" ),
  764. ( ( intr & BGX_SPU_INT_BIP_ERR ) ? " BIP_ERR" : "" ),
  765. ( ( intr & BGX_SPU_INT_DBG_SYNC ) ? " DBG_SYNC" : "" ),
  766. ( ( intr & BGX_SPU_INT_ALGNLOS ) ? " ALGNLOS" : "" ),
  767. ( ( intr & BGX_SPU_INT_SYNLOS ) ? " SYNLOS" : "" ),
  768. ( ( intr & BGX_SPU_INT_BITLCKLS ) ? " BITLCKLS" : "" ),
  769. ( ( intr & BGX_SPU_INT_ERR_BLK ) ? " ERR_BLK" : "" ),
  770. ( ( intr & BGX_SPU_INT_RX_LINK_DOWN ) ? " RX_LINK_DOWN" : "" ),
  771. ( ( intr & BGX_SPU_INT_RX_LINK_UP ) ? " RX_LINK_UP" : "" ) );
  772. /* Clear interrupt status */
  773. writeq ( intr, ( lmac->regs + BGX_SPU_INT ) );
  774. /* Update link state */
  775. txnic_lmac_update_link ( lmac );
  776. }
  777. /**
  778. * Reset LMAC
  779. *
  780. * @v lmac Logical MAC
  781. */
  782. static void txnic_lmac_reset ( struct txnic_lmac *lmac ) {
  783. struct txnic_bgx *bgx = lmac->bgx;
  784. struct txnic_pf *pf = bgx->pf;
  785. void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) );
  786. /* There is no reset available for the physical function
  787. * aspects of a virtual NIC; we have to explicitly reload a
  788. * sensible set of default values.
  789. */
  790. writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
  791. writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_CFG(0) ) );
  792. writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_DROP_CFG(0) ) );
  793. writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_BP_CFG(0) ) );
  794. writeq ( 0, ( qsregs + TXNIC_PF_QS_SQ_CFG(0) ) );
  795. }
  796. /**
  797. * Open network device
  798. *
  799. * @v netdev Network device
  800. * @ret rc Return status code
  801. */
  802. static int txnic_lmac_open ( struct net_device *netdev ) {
  803. struct txnic_lmac *lmac = netdev->priv;
  804. struct txnic_bgx *bgx = lmac->bgx;
  805. struct txnic_pf *pf = bgx->pf;
  806. struct txnic *vnic = lmac->vnic;
  807. unsigned int vnic_idx = lmac->idx;
  808. unsigned int chan_idx = TXNIC_CHAN_IDX ( vnic_idx );
  809. unsigned int tl4_idx = TXNIC_TL4_IDX ( vnic_idx );
  810. unsigned int tl3_idx = TXNIC_TL3_IDX ( vnic_idx );
  811. unsigned int tl2_idx = TXNIC_TL2_IDX ( vnic_idx );
  812. void *lmregs = ( pf->regs + TXNIC_PF_LMAC ( vnic_idx ) );
  813. void *chregs = ( pf->regs + TXNIC_PF_CHAN ( chan_idx ) );
  814. void *qsregs = ( pf->regs + TXNIC_PF_QS ( vnic_idx ) );
  815. size_t max_pkt_size;
  816. int rc;
  817. /* Configure channel/match parse indices */
  818. writeq ( ( TXNIC_PF_MPI_CFG_VNIC ( vnic_idx ) |
  819. TXNIC_PF_MPI_CFG_RSSI_BASE ( vnic_idx ) ),
  820. ( TXNIC_PF_MPI_CFG ( vnic_idx ) + pf->regs ) );
  821. writeq ( ( TXNIC_PF_RSSI_RQ_RQ_QS ( vnic_idx ) ),
  822. ( TXNIC_PF_RSSI_RQ ( vnic_idx ) + pf->regs ) );
  823. /* Configure LMAC */
  824. max_pkt_size = ( netdev->max_pkt_len + 4 /* possible VLAN */ );
  825. writeq ( ( TXNIC_PF_LMAC_CFG_ADJUST_DEFAULT |
  826. TXNIC_PF_LMAC_CFG_MIN_PKT_SIZE ( ETH_ZLEN ) ),
  827. ( TXNIC_PF_LMAC_CFG + lmregs ) );
  828. writeq ( ( TXNIC_PF_LMAC_CFG2_MAX_PKT_SIZE ( max_pkt_size ) ),
  829. ( TXNIC_PF_LMAC_CFG2 + lmregs ) );
  830. writeq ( ( TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT_DEFAULT |
  831. TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT_DEFAULT |
  832. TXNIC_PF_LMAC_CREDIT_CC_ENABLE ),
  833. ( TXNIC_PF_LMAC_CREDIT + lmregs ) );
  834. /* Configure channels */
  835. writeq ( ( TXNIC_PF_CHAN_TX_CFG_BP_ENA ),
  836. ( TXNIC_PF_CHAN_TX_CFG + chregs ) );
  837. writeq ( ( TXNIC_PF_CHAN_RX_CFG_CPI_BASE ( vnic_idx ) ),
  838. ( TXNIC_PF_CHAN_RX_CFG + chregs ) );
  839. writeq ( ( TXNIC_PF_CHAN_RX_BP_CFG_ENA |
  840. TXNIC_PF_CHAN_RX_BP_CFG_BPID ( vnic_idx ) ),
  841. ( TXNIC_PF_CHAN_RX_BP_CFG + chregs ) );
  842. /* Configure traffic limiters */
  843. writeq ( ( TXNIC_PF_TL2_CFG_RR_QUANTUM_DEFAULT ),
  844. ( TXNIC_PF_TL2_CFG ( tl2_idx ) + pf->regs ) );
  845. writeq ( ( TXNIC_PF_TL3_CFG_RR_QUANTUM_DEFAULT ),
  846. ( TXNIC_PF_TL3_CFG ( tl3_idx ) + pf->regs ) );
  847. writeq ( ( TXNIC_PF_TL3_CHAN_CHAN ( chan_idx ) ),
  848. ( TXNIC_PF_TL3_CHAN ( tl3_idx ) + pf->regs ) );
  849. writeq ( ( TXNIC_PF_TL4_CFG_SQ_QS ( vnic_idx ) |
  850. TXNIC_PF_TL4_CFG_RR_QUANTUM_DEFAULT ),
  851. ( TXNIC_PF_TL4_CFG ( tl4_idx ) + pf->regs ) );
  852. /* Configure send queue */
  853. writeq ( ( TXNIC_PF_QS_SQ_CFG_CQ_QS ( vnic_idx ) ),
  854. ( TXNIC_PF_QS_SQ_CFG(0) + qsregs ) );
  855. writeq ( ( TXNIC_PF_QS_SQ_CFG2_TL4 ( tl4_idx ) ),
  856. ( TXNIC_PF_QS_SQ_CFG2(0) + qsregs ) );
  857. /* Configure receive queue */
  858. writeq ( ( TXNIC_PF_QS_RQ_CFG_CACHING_ALL |
  859. TXNIC_PF_QS_RQ_CFG_CQ_QS ( vnic_idx ) |
  860. TXNIC_PF_QS_RQ_CFG_RBDR_CONT_QS ( vnic_idx ) |
  861. TXNIC_PF_QS_RQ_CFG_RBDR_STRT_QS ( vnic_idx ) ),
  862. ( TXNIC_PF_QS_RQ_CFG(0) + qsregs ) );
  863. writeq ( ( TXNIC_PF_QS_RQ_BP_CFG_RBDR_BP_ENA |
  864. TXNIC_PF_QS_RQ_BP_CFG_CQ_BP_ENA |
  865. TXNIC_PF_QS_RQ_BP_CFG_BPID ( vnic_idx ) ),
  866. ( TXNIC_PF_QS_RQ_BP_CFG(0) + qsregs ) );
  867. /* Enable queue set */
  868. writeq ( ( TXNIC_PF_QS_CFG_ENA | TXNIC_PF_QS_CFG_VNIC ( vnic_idx ) ),
  869. ( TXNIC_PF_QS_CFG + qsregs ) );
  870. /* Open virtual NIC */
  871. if ( ( rc = txnic_open ( vnic ) ) != 0 )
  872. goto err_open;
  873. /* Update link state */
  874. txnic_lmac_update_link ( lmac );
  875. return 0;
  876. txnic_close ( vnic );
  877. err_open:
  878. writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
  879. return rc;
  880. }
  881. /**
  882. * Close network device
  883. *
  884. * @v netdev Network device
  885. */
  886. static void txnic_lmac_close ( struct net_device *netdev ) {
  887. struct txnic_lmac *lmac = netdev->priv;
  888. struct txnic_bgx *bgx = lmac->bgx;
  889. struct txnic_pf *pf = bgx->pf;
  890. struct txnic *vnic = lmac->vnic;
  891. void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) );
  892. /* Close virtual NIC */
  893. txnic_close ( vnic );
  894. /* Disable queue set */
  895. writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
  896. }
  897. /**
  898. * Transmit packet
  899. *
  900. * @v netdev Network device
  901. * @v iobuf I/O buffer
  902. * @ret rc Return status code
  903. */
  904. static int txnic_lmac_transmit ( struct net_device *netdev,
  905. struct io_buffer *iobuf ) {
  906. struct txnic_lmac *lmac = netdev->priv;
  907. struct txnic *vnic = lmac->vnic;
  908. return txnic_send ( vnic, iobuf );
  909. }
  910. /**
  911. * Poll network device
  912. *
  913. * @v netdev Network device
  914. */
  915. static void txnic_lmac_poll ( struct net_device *netdev ) {
  916. struct txnic_lmac *lmac = netdev->priv;
  917. struct txnic *vnic = lmac->vnic;
  918. /* Poll virtual NIC */
  919. txnic_poll ( vnic );
  920. /* Poll link state */
  921. txnic_lmac_poll_link ( lmac );
  922. }
  923. /** Network device operations */
  924. static struct net_device_operations txnic_lmac_operations = {
  925. .open = txnic_lmac_open,
  926. .close = txnic_lmac_close,
  927. .transmit = txnic_lmac_transmit,
  928. .poll = txnic_lmac_poll,
  929. };
  930. /**
  931. * Probe logical MAC virtual NIC
  932. *
  933. * @v lmac Logical MAC
  934. * @ret rc Return status code
  935. */
  936. static int txnic_lmac_probe ( struct txnic_lmac *lmac ) {
  937. struct txnic_bgx *bgx = lmac->bgx;
  938. struct txnic_pf *pf = bgx->pf;
  939. struct txnic *vnic;
  940. struct net_device *netdev;
  941. unsigned long membase;
  942. int rc;
  943. /* Sanity check */
  944. assert ( lmac->vnic == NULL );
  945. /* Calculate register base address */
  946. membase = ( pf->vf_membase + ( lmac->idx * pf->vf_stride ) );
  947. /* Allocate and initialise network device */
  948. vnic = txnic_alloc ( &bgx->pci->dev, membase );
  949. if ( ! vnic ) {
  950. rc = -ENOMEM;
  951. goto err_alloc;
  952. }
  953. netdev = vnic->netdev;
  954. netdev_init ( netdev, &txnic_lmac_operations );
  955. netdev->priv = lmac;
  956. lmac->vnic = vnic;
  957. /* Reset device */
  958. txnic_lmac_reset ( lmac );
  959. /* Set MAC address */
  960. memcpy ( netdev->hw_addr, lmac->mac.raw, ETH_ALEN );
  961. /* Register network device */
  962. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  963. goto err_register;
  964. vnic->name = netdev->name;
  965. DBGC ( TXNICCOL ( pf ), "TXNIC %d/%d/%d is %s (%s)\n", pf->node,
  966. bgx->idx, lmac->idx, vnic->name, eth_ntoa ( lmac->mac.raw ) );
  967. /* Update link state */
  968. txnic_lmac_update_link ( lmac );
  969. return 0;
  970. unregister_netdev ( netdev );
  971. err_register:
  972. txnic_lmac_reset ( lmac );
  973. txnic_free ( vnic );
  974. lmac->vnic = NULL;
  975. err_alloc:
  976. return rc;
  977. }
  978. /**
  979. * Remove logical MAC virtual NIC
  980. *
  981. * @v lmac Logical MAC
  982. */
  983. static void txnic_lmac_remove ( struct txnic_lmac *lmac ) {
  984. uint64_t config;
  985. /* Sanity check */
  986. assert ( lmac->vnic != NULL );
  987. /* Disable packet receive and transmit */
  988. config = readq ( lmac->regs + BGX_CMR_CONFIG );
  989. config &= ~( BGX_CMR_CONFIG_DATA_PKT_TX_EN |
  990. BGX_CMR_CONFIG_DATA_PKT_RX_EN );
  991. writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) );
  992. /* Unregister network device */
  993. unregister_netdev ( lmac->vnic->netdev );
  994. /* Reset device */
  995. txnic_lmac_reset ( lmac );
  996. /* Free virtual NIC */
  997. txnic_free ( lmac->vnic );
  998. lmac->vnic = NULL;
  999. }
  1000. /**
  1001. * Probe all LMACs on a BGX Ethernet interface
  1002. *
  1003. * @v pf Physical function
  1004. * @v bgx BGX Ethernet interface
  1005. * @ret rc Return status code
  1006. */
  1007. static int txnic_lmac_probe_all ( struct txnic_pf *pf, struct txnic_bgx *bgx ) {
  1008. unsigned int bgx_idx;
  1009. int lmac_idx;
  1010. int count;
  1011. int rc;
  1012. /* Sanity checks */
  1013. bgx_idx = bgx->idx;
  1014. assert ( pf->node == bgx->node );
  1015. assert ( pf->bgx[bgx_idx] == NULL );
  1016. assert ( bgx->pf == NULL );
  1017. /* Associate BGX with physical function */
  1018. pf->bgx[bgx_idx] = bgx;
  1019. bgx->pf = pf;
  1020. /* Probe all LMACs */
  1021. count = bgx->count;
  1022. for ( lmac_idx = 0 ; lmac_idx < count ; lmac_idx++ ) {
  1023. if ( ( rc = txnic_lmac_probe ( &bgx->lmac[lmac_idx] ) ) != 0 )
  1024. goto err_probe;
  1025. }
  1026. return 0;
  1027. lmac_idx = count;
  1028. err_probe:
  1029. for ( lmac_idx-- ; lmac_idx >= 0 ; lmac_idx-- )
  1030. txnic_lmac_remove ( &bgx->lmac[lmac_idx] );
  1031. pf->bgx[bgx_idx] = NULL;
  1032. bgx->pf = NULL;
  1033. return rc;
  1034. }
  1035. /**
  1036. * Remove all LMACs on a BGX Ethernet interface
  1037. *
  1038. * @v pf Physical function
  1039. * @v bgx BGX Ethernet interface
  1040. */
  1041. static void txnic_lmac_remove_all ( struct txnic_pf *pf,
  1042. struct txnic_bgx *bgx ) {
  1043. unsigned int lmac_idx;
  1044. /* Sanity checks */
  1045. assert ( pf->bgx[bgx->idx] == bgx );
  1046. assert ( bgx->pf == pf );
  1047. /* Remove all LMACs */
  1048. for ( lmac_idx = 0 ; lmac_idx < bgx->count ; lmac_idx++ )
  1049. txnic_lmac_remove ( &bgx->lmac[lmac_idx] );
  1050. /* Disassociate BGX from physical function */
  1051. pf->bgx[bgx->idx] = NULL;
  1052. bgx->pf = NULL;
  1053. }
  1054. /******************************************************************************
  1055. *
  1056. * NIC physical function interface
  1057. *
  1058. ******************************************************************************
  1059. */
  1060. /**
  1061. * Probe PCI device
  1062. *
  1063. * @v pci PCI device
  1064. * @ret rc Return status code
  1065. */
  1066. static int txnic_pf_probe ( struct pci_device *pci ) {
  1067. struct txnic_pf *pf;
  1068. struct txnic_bgx *bgx;
  1069. unsigned long membase;
  1070. unsigned int i;
  1071. int rc;
  1072. /* Allocate and initialise structure */
  1073. pf = zalloc ( sizeof ( *pf ) );
  1074. if ( ! pf ) {
  1075. rc = -ENOMEM;
  1076. goto err_alloc;
  1077. }
  1078. pf->pci = pci;
  1079. pci_set_drvdata ( pci, pf );
  1080. /* Get base addresses */
  1081. membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 );
  1082. pf->vf_membase = pciea_bar_start ( pci, PCIEA_BEI_VF_BAR_0 );
  1083. pf->vf_stride = pciea_bar_size ( pci, PCIEA_BEI_VF_BAR_0 );
  1084. /* Calculate node ID */
  1085. pf->node = txnic_address_node ( membase );
  1086. DBGC ( TXNICCOL ( pf ), "TXNIC %d/*/* PF %s at %#lx (VF %#lx+%#lx)\n",
  1087. pf->node, pci->dev.name, membase, pf->vf_membase, pf->vf_stride);
  1088. /* Fix up PCI device */
  1089. adjust_pci_device ( pci );
  1090. /* Map registers */
  1091. pf->regs = ioremap ( membase, TXNIC_PF_BAR_SIZE );
  1092. if ( ! pf->regs ) {
  1093. rc = -ENODEV;
  1094. goto err_ioremap;
  1095. }
  1096. /* Configure physical function */
  1097. writeq ( TXNIC_PF_CFG_ENA, ( pf->regs + TXNIC_PF_CFG ) );
  1098. writeq ( ( TXNIC_PF_BP_CFG_BP_POLL_ENA |
  1099. TXNIC_PF_BP_CFG_BP_POLL_DLY_DEFAULT ),
  1100. ( pf->regs + TXNIC_PF_BP_CFG ) );
  1101. for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
  1102. writeq ( ( TXNIC_PF_INTF_SEND_CFG_BLOCK_BGX |
  1103. TXNIC_PF_INTF_SEND_CFG_BLOCK ( i ) ),
  1104. ( pf->regs + TXNIC_PF_INTF_SEND_CFG ( i ) ) );
  1105. writeq ( ( TXNIC_PF_INTF_BP_CFG_BP_ENA |
  1106. TXNIC_PF_INTF_BP_CFG_BP_ID_BGX |
  1107. TXNIC_PF_INTF_BP_CFG_BP_ID ( i ) ),
  1108. ( pf->regs + TXNIC_PF_INTF_BP_CFG ( i ) ) );
  1109. }
  1110. writeq ( ( TXNIC_PF_PKIND_CFG_LENERR_EN |
  1111. TXNIC_PF_PKIND_CFG_MAXLEN_DISABLE |
  1112. TXNIC_PF_PKIND_CFG_MINLEN_DISABLE ),
  1113. ( pf->regs + TXNIC_PF_PKIND_CFG(0) ) );
  1114. /* Add to list of physical functions */
  1115. list_add_tail ( &pf->list, &txnic_pfs );
  1116. /* Probe all LMACs, if applicable */
  1117. list_for_each_entry ( bgx, &txnic_bgxs, list ) {
  1118. if ( bgx->node != pf->node )
  1119. continue;
  1120. if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 )
  1121. goto err_probe;
  1122. }
  1123. return 0;
  1124. err_probe:
  1125. for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
  1126. if ( pf->bgx[i] )
  1127. txnic_lmac_remove_all ( pf, pf->bgx[i] );
  1128. }
  1129. list_del ( &pf->list );
  1130. writeq ( 0, ( pf->regs + TXNIC_PF_CFG ) );
  1131. iounmap ( pf->regs );
  1132. err_ioremap:
  1133. free ( pf );
  1134. err_alloc:
  1135. return rc;
  1136. }
  1137. /**
  1138. * Remove PCI device
  1139. *
  1140. * @v pci PCI device
  1141. */
  1142. static void txnic_pf_remove ( struct pci_device *pci ) {
  1143. struct txnic_pf *pf = pci_get_drvdata ( pci );
  1144. unsigned int i;
  1145. /* Remove all LMACs, if applicable */
  1146. for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
  1147. if ( pf->bgx[i] )
  1148. txnic_lmac_remove_all ( pf, pf->bgx[i] );
  1149. }
  1150. /* Remove from list of physical functions */
  1151. list_del ( &pf->list );
  1152. /* Unmap registers */
  1153. iounmap ( pf->regs );
  1154. /* Free physical function */
  1155. free ( pf );
  1156. }
  1157. /** NIC physical function PCI device IDs */
  1158. static struct pci_device_id txnic_pf_ids[] = {
  1159. PCI_ROM ( 0x177d, 0xa01e, "thunder-pf", "ThunderX NIC PF", 0 ),
  1160. };
  1161. /** NIC physical function PCI driver */
  1162. struct pci_driver txnic_pf_driver __pci_driver = {
  1163. .ids = txnic_pf_ids,
  1164. .id_count = ( sizeof ( txnic_pf_ids ) / sizeof ( txnic_pf_ids[0] ) ),
  1165. .probe = txnic_pf_probe,
  1166. .remove = txnic_pf_remove,
  1167. };
  1168. /******************************************************************************
  1169. *
  1170. * BGX interface
  1171. *
  1172. ******************************************************************************
  1173. */
  1174. /** LMAC types */
  1175. static struct txnic_lmac_type txnic_lmac_types[] = {
  1176. [TXNIC_LMAC_XAUI] = {
  1177. .name = "XAUI",
  1178. .count = 1,
  1179. .lane_to_sds = 0xe4,
  1180. },
  1181. [TXNIC_LMAC_RXAUI] = {
  1182. .name = "RXAUI",
  1183. .count = 2,
  1184. .lane_to_sds = 0x0e04,
  1185. },
  1186. [TXNIC_LMAC_10G_R] = {
  1187. .name = "10GBASE-R",
  1188. .count = 4,
  1189. .lane_to_sds = 0x00000000,
  1190. },
  1191. [TXNIC_LMAC_40G_R] = {
  1192. .name = "40GBASE-R",
  1193. .count = 1,
  1194. .lane_to_sds = 0xe4,
  1195. },
  1196. };
  1197. /**
  1198. * Detect BGX Ethernet interface LMAC type
  1199. *
  1200. * @v bgx BGX Ethernet interface
  1201. * @ret type LMAC type, or negative error
  1202. */
  1203. static int txnic_bgx_detect ( struct txnic_bgx *bgx ) {
  1204. uint64_t config;
  1205. uint64_t br_pmd_control;
  1206. uint64_t rx_lmacs;
  1207. unsigned int type;
  1208. /* We assume that the early (pre-UEFI) firmware will have
  1209. * configured at least the LMAC 0 type and use of link
  1210. * training, and may have overridden the number of LMACs.
  1211. */
  1212. /* Determine type from LMAC 0 */
  1213. config = readq ( bgx->regs + BGX_CMR_CONFIG );
  1214. type = BGX_CMR_CONFIG_LMAC_TYPE_GET ( config );
  1215. if ( ( type >= ( sizeof ( txnic_lmac_types ) /
  1216. sizeof ( txnic_lmac_types[0] ) ) ) ||
  1217. ( txnic_lmac_types[type].count == 0 ) ) {
  1218. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX unknown type %d\n",
  1219. bgx->node, bgx->idx, type );
  1220. return -ENOTTY;
  1221. }
  1222. bgx->type = &txnic_lmac_types[type];
  1223. /* Check whether link training is required */
  1224. br_pmd_control = readq ( bgx->regs + BGX_SPU_BR_PMD_CONTROL );
  1225. bgx->training =
  1226. ( !! ( br_pmd_control & BGX_SPU_BR_PMD_CONTROL_TRAIN_EN ) );
  1227. /* Determine number of LMACs */
  1228. rx_lmacs = readq ( bgx->regs + BGX_CMR_RX_LMACS );
  1229. bgx->count = BGX_CMR_RX_LMACS_LMACS_GET ( rx_lmacs );
  1230. if ( ( bgx->count == TXNIC_NUM_LMAC ) &&
  1231. ( bgx->type->count != TXNIC_NUM_LMAC ) ) {
  1232. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* assuming %d LMACs\n",
  1233. bgx->node, bgx->idx, bgx->type->count );
  1234. bgx->count = bgx->type->count;
  1235. }
  1236. return type;
  1237. }
  1238. /**
  1239. * Initialise BGX Ethernet interface
  1240. *
  1241. * @v bgx BGX Ethernet interface
  1242. * @v type LMAC type
  1243. */
  1244. static void txnic_bgx_init ( struct txnic_bgx *bgx, unsigned int type ) {
  1245. uint64_t global_config;
  1246. uint32_t lane_to_sds;
  1247. unsigned int i;
  1248. /* Set number of LMACs */
  1249. writeq ( BGX_CMR_RX_LMACS_LMACS_SET ( bgx->count ),
  1250. ( bgx->regs + BGX_CMR_RX_LMACS ) );
  1251. writeq ( BGX_CMR_TX_LMACS_LMACS_SET ( bgx->count ),
  1252. ( bgx->regs + BGX_CMR_TX_LMACS ) );
  1253. /* Set LMAC types and lane mappings, and disable all LMACs */
  1254. lane_to_sds = bgx->type->lane_to_sds;
  1255. for ( i = 0 ; i < bgx->count ; i++ ) {
  1256. writeq ( ( BGX_CMR_CONFIG_LMAC_TYPE_SET ( type ) |
  1257. BGX_CMR_CONFIG_LANE_TO_SDS ( lane_to_sds ) ),
  1258. ( bgx->regs + BGX_LMAC ( i ) + BGX_CMR_CONFIG ) );
  1259. lane_to_sds >>= 8;
  1260. }
  1261. /* Reset all MAC address filtering */
  1262. for ( i = 0 ; i < TXNIC_NUM_DMAC ; i++ )
  1263. writeq ( 0, ( bgx->regs + BGX_CMR_RX_DMAC_CAM ( i ) ) );
  1264. /* Reset NCSI steering */
  1265. for ( i = 0 ; i < TXNIC_NUM_STEERING ; i++ )
  1266. writeq ( 0, ( bgx->regs + BGX_CMR_RX_STEERING ( i ) ) );
  1267. /* Enable backpressure to all channels */
  1268. writeq ( BGX_CMR_CHAN_MSK_AND_ALL ( bgx->count ),
  1269. ( bgx->regs + BGX_CMR_CHAN_MSK_AND ) );
  1270. /* Strip FCS */
  1271. global_config = readq ( bgx->regs + BGX_CMR_GLOBAL_CONFIG );
  1272. global_config |= BGX_CMR_GLOBAL_CONFIG_FCS_STRIP;
  1273. writeq ( global_config, ( bgx->regs + BGX_CMR_GLOBAL_CONFIG ) );
  1274. }
  1275. /**
  1276. * Get MAC address
  1277. *
  1278. * @v lmac Logical MAC
  1279. */
  1280. static void txnic_bgx_mac ( struct txnic_lmac *lmac ) {
  1281. struct txnic_bgx *bgx = lmac->bgx;
  1282. unsigned int lmac_idx = TXNIC_LMAC_IDX ( lmac->idx );
  1283. uint64_t mac;
  1284. EFI_STATUS efirc;
  1285. int rc;
  1286. /* Extract MAC from Board Configuration protocol, if available */
  1287. if ( txcfg ) {
  1288. if ( ( efirc = txcfg->GetLmacProp ( txcfg, bgx->node, bgx->idx,
  1289. lmac_idx, MAC_ADDRESS,
  1290. sizeof ( mac ),
  1291. &mac ) ) == 0 ) {
  1292. lmac->mac.be64 = cpu_to_be64 ( mac );
  1293. } else {
  1294. rc = -EEFI ( efirc );
  1295. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d could not get "
  1296. "MAC address: %s\n", bgx->node, bgx->idx,
  1297. lmac->idx, strerror ( rc ) );
  1298. }
  1299. } else {
  1300. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no board "
  1301. "configuration protocol\n", bgx->node, bgx->idx,
  1302. lmac->idx );
  1303. }
  1304. /* Use random MAC address if none available */
  1305. if ( ! lmac->mac.be64 ) {
  1306. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no MAC address\n",
  1307. bgx->node, bgx->idx, lmac->idx );
  1308. eth_random_addr ( lmac->mac.raw );
  1309. }
  1310. }
  1311. /**
  1312. * Initialise Super PHY Unit (SPU)
  1313. *
  1314. * @v lmac Logical MAC
  1315. */
  1316. static void txnic_bgx_spu_init ( struct txnic_lmac *lmac ) {
  1317. struct txnic_bgx *bgx = lmac->bgx;
  1318. /* Reset PHY */
  1319. writeq ( BGX_SPU_CONTROL1_RESET, ( lmac->regs + BGX_SPU_CONTROL1 ) );
  1320. mdelay ( BGX_SPU_RESET_DELAY_MS );
  1321. /* Power down PHY */
  1322. writeq ( BGX_SPU_CONTROL1_LO_PWR, ( lmac->regs + BGX_SPU_CONTROL1 ) );
  1323. /* Configure training, if applicable */
  1324. if ( bgx->training ) {
  1325. writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LP_CUP ) );
  1326. writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_CUP ) );
  1327. writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_REP ) );
  1328. writeq ( BGX_SPU_BR_PMD_CONTROL_TRAIN_EN,
  1329. ( lmac->regs + BGX_SPU_BR_PMD_CONTROL ) );
  1330. }
  1331. /* Disable forward error correction */
  1332. writeq ( 0, ( lmac->regs + BGX_SPU_FEC_CONTROL ) );
  1333. /* Disable autonegotiation */
  1334. writeq ( 0, ( lmac->regs + BGX_SPU_AN_CONTROL ) );
  1335. /* Power up PHY */
  1336. writeq ( 0, ( lmac->regs + BGX_SPU_CONTROL1 ) );
  1337. }
  1338. /**
  1339. * Initialise LMAC
  1340. *
  1341. * @v bgx BGX Ethernet interface
  1342. * @v lmac_idx LMAC index
  1343. */
  1344. static void txnic_bgx_lmac_init ( struct txnic_bgx *bgx,
  1345. unsigned int lmac_idx ) {
  1346. struct txnic_lmac *lmac = &bgx->lmac[lmac_idx];
  1347. uint64_t config;
  1348. /* Record associated BGX */
  1349. lmac->bgx = bgx;
  1350. /* Set register base address (already mapped) */
  1351. lmac->regs = ( bgx->regs + BGX_LMAC ( lmac_idx ) );
  1352. /* Calculate virtual NIC index */
  1353. lmac->idx = TXNIC_VNIC_IDX ( bgx->idx, lmac_idx );
  1354. /* Set MAC address */
  1355. txnic_bgx_mac ( lmac );
  1356. /* Initialise PHY */
  1357. txnic_bgx_spu_init ( lmac );
  1358. /* Accept all multicasts and broadcasts */
  1359. writeq ( ( BGX_CMR_RX_DMAC_CTL_MCST_MODE_ACCEPT |
  1360. BGX_CMR_RX_DMAC_CTL_BCST_ACCEPT ),
  1361. ( lmac->regs + BGX_CMR_RX_DMAC_CTL ) );
  1362. /* Enable LMAC */
  1363. config = readq ( lmac->regs + BGX_CMR_CONFIG );
  1364. config |= ( BGX_CMR_CONFIG_ENABLE |
  1365. BGX_CMR_CONFIG_DATA_PKT_RX_EN |
  1366. BGX_CMR_CONFIG_DATA_PKT_TX_EN );
  1367. writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) );
  1368. }
  1369. /**
  1370. * Probe PCI device
  1371. *
  1372. * @v pci PCI device
  1373. * @ret rc Return status code
  1374. */
  1375. static int txnic_bgx_probe ( struct pci_device *pci ) {
  1376. struct txnic_bgx *bgx;
  1377. struct txnic_pf *pf;
  1378. unsigned long membase;
  1379. unsigned int i;
  1380. int type;
  1381. int rc;
  1382. /* Allocate and initialise structure */
  1383. bgx = zalloc ( sizeof ( *bgx ) );
  1384. if ( ! bgx ) {
  1385. rc = -ENOMEM;
  1386. goto err_alloc;
  1387. }
  1388. bgx->pci = pci;
  1389. pci_set_drvdata ( pci, bgx );
  1390. /* Get base address */
  1391. membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 );
  1392. /* Calculate node ID and index */
  1393. bgx->node = txnic_address_node ( membase );
  1394. bgx->idx = txnic_address_bgx ( membase );
  1395. /* Fix up PCI device */
  1396. adjust_pci_device ( pci );
  1397. /* Map registers */
  1398. bgx->regs = ioremap ( membase, TXNIC_BGX_BAR_SIZE );
  1399. if ( ! bgx->regs ) {
  1400. rc = -ENODEV;
  1401. goto err_ioremap;
  1402. }
  1403. /* Detect LMAC type */
  1404. if ( ( type = txnic_bgx_detect ( bgx ) ) < 0 ) {
  1405. rc = type;
  1406. goto err_detect;
  1407. }
  1408. DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX %s at %#lx %dx %s%s\n",
  1409. bgx->node, bgx->idx, pci->dev.name, membase, bgx->count,
  1410. bgx->type->name, ( bgx->training ? "(training)" : "" ) );
  1411. /* Initialise interface */
  1412. txnic_bgx_init ( bgx, type );
  1413. /* Initialise all LMACs */
  1414. for ( i = 0 ; i < bgx->count ; i++ )
  1415. txnic_bgx_lmac_init ( bgx, i );
  1416. /* Add to list of BGX devices */
  1417. list_add_tail ( &bgx->list, &txnic_bgxs );
  1418. /* Probe all LMACs, if applicable */
  1419. list_for_each_entry ( pf, &txnic_pfs, list ) {
  1420. if ( pf->node != bgx->node )
  1421. continue;
  1422. if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 )
  1423. goto err_probe;
  1424. }
  1425. return 0;
  1426. if ( bgx->pf )
  1427. txnic_lmac_remove_all ( bgx->pf, bgx );
  1428. list_del ( &bgx->list );
  1429. err_probe:
  1430. err_detect:
  1431. iounmap ( bgx->regs );
  1432. err_ioremap:
  1433. free ( bgx );
  1434. err_alloc:
  1435. return rc;
  1436. }
  1437. /**
  1438. * Remove PCI device
  1439. *
  1440. * @v pci PCI device
  1441. */
  1442. static void txnic_bgx_remove ( struct pci_device *pci ) {
  1443. struct txnic_bgx *bgx = pci_get_drvdata ( pci );
  1444. /* Remove all LMACs, if applicable */
  1445. if ( bgx->pf )
  1446. txnic_lmac_remove_all ( bgx->pf, bgx );
  1447. /* Remove from list of BGX devices */
  1448. list_del ( &bgx->list );
  1449. /* Unmap registers */
  1450. iounmap ( bgx->regs );
  1451. /* Free BGX device */
  1452. free ( bgx );
  1453. }
  1454. /** BGX PCI device IDs */
  1455. static struct pci_device_id txnic_bgx_ids[] = {
  1456. PCI_ROM ( 0x177d, 0xa026, "thunder-bgx", "ThunderX BGX", 0 ),
  1457. };
  1458. /** BGX PCI driver */
  1459. struct pci_driver txnic_bgx_driver __pci_driver = {
  1460. .ids = txnic_bgx_ids,
  1461. .id_count = ( sizeof ( txnic_bgx_ids ) / sizeof ( txnic_bgx_ids[0] ) ),
  1462. .probe = txnic_bgx_probe,
  1463. .remove = txnic_bgx_remove,
  1464. };