You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

intelxl.c 45KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626
  1. /*
  2. * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <string.h>
  26. #include <stdio.h>
  27. #include <unistd.h>
  28. #include <errno.h>
  29. #include <byteswap.h>
  30. #include <ipxe/netdevice.h>
  31. #include <ipxe/ethernet.h>
  32. #include <ipxe/if_ether.h>
  33. #include <ipxe/vlan.h>
  34. #include <ipxe/iobuf.h>
  35. #include <ipxe/malloc.h>
  36. #include <ipxe/pci.h>
  37. #include <ipxe/version.h>
  38. #include "intelxl.h"
  39. /** @file
  40. *
  41. * Intel 40 Gigabit Ethernet network card driver
  42. *
  43. */
  44. /******************************************************************************
  45. *
  46. * Device reset
  47. *
  48. ******************************************************************************
  49. */
  50. /**
  51. * Reset hardware
  52. *
  53. * @v intelxl Intel device
  54. * @ret rc Return status code
  55. */
  56. static int intelxl_reset ( struct intelxl_nic *intelxl ) {
  57. uint32_t pfgen_ctrl;
  58. /* Perform a global software reset */
  59. pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
  60. writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
  61. intelxl->regs + INTELXL_PFGEN_CTRL );
  62. mdelay ( INTELXL_RESET_DELAY_MS );
  63. return 0;
  64. }
  65. /******************************************************************************
  66. *
  67. * MAC address
  68. *
  69. ******************************************************************************
  70. */
  71. /**
  72. * Fetch initial MAC address and maximum frame size
  73. *
  74. * @v intelxl Intel device
  75. * @v netdev Network device
  76. * @ret rc Return status code
  77. */
  78. static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
  79. struct net_device *netdev ) {
  80. union intelxl_receive_address mac;
  81. uint32_t prtgl_sal;
  82. uint32_t prtgl_sah;
  83. size_t mfs;
  84. /* Read NVM-loaded address */
  85. prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
  86. prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
  87. mac.reg.low = cpu_to_le32 ( prtgl_sal );
  88. mac.reg.high = cpu_to_le32 ( prtgl_sah );
  89. /* Check that address is valid */
  90. if ( ! is_valid_ether_addr ( mac.raw ) ) {
  91. DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
  92. intelxl, eth_ntoa ( mac.raw ) );
  93. return -ENOENT;
  94. }
  95. /* Copy MAC address */
  96. DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
  97. intelxl, eth_ntoa ( mac.raw ) );
  98. memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
  99. /* Get maximum frame size */
  100. mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
  101. netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
  102. return 0;
  103. }
  104. /******************************************************************************
  105. *
  106. * Admin queue
  107. *
  108. ******************************************************************************
  109. */
  110. /** Admin queue register offsets */
  111. static const struct intelxl_admin_offsets intelxl_admin_offsets = {
  112. .bal = INTELXL_ADMIN_BAL,
  113. .bah = INTELXL_ADMIN_BAH,
  114. .len = INTELXL_ADMIN_LEN,
  115. .head = INTELXL_ADMIN_HEAD,
  116. .tail = INTELXL_ADMIN_TAIL,
  117. };
  118. /**
  119. * Create admin queue
  120. *
  121. * @v intelxl Intel device
  122. * @v admin Admin queue
  123. * @ret rc Return status code
  124. */
  125. static int intelxl_create_admin ( struct intelxl_nic *intelxl,
  126. struct intelxl_admin *admin ) {
  127. size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
  128. size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
  129. const struct intelxl_admin_offsets *regs = admin->regs;
  130. void *admin_regs = ( intelxl->regs + admin->base );
  131. physaddr_t address;
  132. /* Allocate admin queue */
  133. admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
  134. if ( ! admin->buf )
  135. return -ENOMEM;
  136. admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
  137. /* Initialise admin queue */
  138. memset ( admin->desc, 0, len );
  139. /* Reset head and tail registers */
  140. writel ( 0, admin_regs + regs->head );
  141. writel ( 0, admin_regs + regs->tail );
  142. /* Reset queue index */
  143. admin->index = 0;
  144. /* Program queue address */
  145. address = virt_to_bus ( admin->desc );
  146. writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
  147. if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
  148. writel ( ( ( ( uint64_t ) address ) >> 32 ),
  149. admin_regs + regs->bah );
  150. } else {
  151. writel ( 0, admin_regs + regs->bah );
  152. }
  153. /* Program queue length and enable queue */
  154. writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
  155. INTELXL_ADMIN_LEN_ENABLE ),
  156. admin_regs + regs->len );
  157. DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
  158. "[%08llx,%08llx)\n", intelxl,
  159. ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
  160. ( ( unsigned long long ) address ),
  161. ( ( unsigned long long ) address + len ),
  162. ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
  163. ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
  164. buf_len ) ) );
  165. return 0;
  166. }
  167. /**
  168. * Destroy admin queue
  169. *
  170. * @v intelxl Intel device
  171. * @v admin Admin queue
  172. */
  173. static void intelxl_destroy_admin ( struct intelxl_nic *intelxl,
  174. struct intelxl_admin *admin ) {
  175. size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
  176. size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
  177. const struct intelxl_admin_offsets *regs = admin->regs;
  178. void *admin_regs = ( intelxl->regs + admin->base );
  179. /* Disable queue */
  180. writel ( 0, admin_regs + regs->len );
  181. /* Free queue */
  182. free_dma ( admin->buf, ( buf_len + len ) );
  183. }
  184. /**
  185. * Get next admin command queue descriptor
  186. *
  187. * @v intelxl Intel device
  188. * @ret cmd Command descriptor
  189. */
  190. static struct intelxl_admin_descriptor *
  191. intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
  192. struct intelxl_admin *admin = &intelxl->command;
  193. struct intelxl_admin_descriptor *cmd;
  194. /* Get and initialise next descriptor */
  195. cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
  196. memset ( cmd, 0, sizeof ( *cmd ) );
  197. return cmd;
  198. }
  199. /**
  200. * Get next admin command queue data buffer
  201. *
  202. * @v intelxl Intel device
  203. * @ret buf Data buffer
  204. */
  205. static union intelxl_admin_buffer *
  206. intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
  207. struct intelxl_admin *admin = &intelxl->command;
  208. union intelxl_admin_buffer *buf;
  209. /* Get next data buffer */
  210. buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
  211. memset ( buf, 0, sizeof ( *buf ) );
  212. return buf;
  213. }
  214. /**
  215. * Initialise admin event queue descriptor
  216. *
  217. * @v intelxl Intel device
  218. * @v index Event queue index
  219. */
  220. static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
  221. unsigned int index ) {
  222. struct intelxl_admin *admin = &intelxl->event;
  223. struct intelxl_admin_descriptor *evt;
  224. union intelxl_admin_buffer *buf;
  225. uint64_t address;
  226. /* Initialise descriptor */
  227. evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
  228. buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
  229. address = virt_to_bus ( buf );
  230. evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
  231. evt->len = cpu_to_le16 ( sizeof ( *buf ) );
  232. evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
  233. evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
  234. }
  235. /**
  236. * Issue admin queue command
  237. *
  238. * @v intelxl Intel device
  239. * @ret rc Return status code
  240. */
  241. static int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
  242. struct intelxl_admin *admin = &intelxl->command;
  243. const struct intelxl_admin_offsets *regs = admin->regs;
  244. void *admin_regs = ( intelxl->regs + admin->base );
  245. struct intelxl_admin_descriptor *cmd;
  246. union intelxl_admin_buffer *buf;
  247. uint64_t address;
  248. uint32_t cookie;
  249. unsigned int index;
  250. unsigned int tail;
  251. unsigned int i;
  252. int rc;
  253. /* Get next queue entry */
  254. index = admin->index++;
  255. tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
  256. cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
  257. buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
  258. DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x:\n",
  259. intelxl, index, le16_to_cpu ( cmd->opcode ) );
  260. /* Sanity checks */
  261. assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
  262. assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
  263. assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
  264. assert ( cmd->ret == 0 );
  265. /* Populate data buffer address if applicable */
  266. if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
  267. address = virt_to_bus ( buf );
  268. cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
  269. cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
  270. }
  271. /* Populate cookie */
  272. cmd->cookie = cpu_to_le32 ( index );
  273. /* Record cookie */
  274. cookie = cmd->cookie;
  275. /* Post command descriptor */
  276. DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
  277. if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
  278. DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
  279. le16_to_cpu ( cmd->len ) );
  280. }
  281. wmb();
  282. writel ( tail, admin_regs + regs->tail );
  283. /* Wait for completion */
  284. for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
  285. /* If response is not complete, delay 1ms and retry */
  286. if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
  287. mdelay ( 1 );
  288. continue;
  289. }
  290. DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
  291. intelxl, index );
  292. DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
  293. sizeof ( *cmd ) );
  294. /* Check for cookie mismatch */
  295. if ( cmd->cookie != cookie ) {
  296. DBGC ( intelxl, "INTELXL %p admin command %#x bad "
  297. "cookie %#x\n", intelxl, index,
  298. le32_to_cpu ( cmd->cookie ) );
  299. rc = -EPROTO;
  300. goto err;
  301. }
  302. /* Check for errors */
  303. if ( cmd->ret != 0 ) {
  304. DBGC ( intelxl, "INTELXL %p admin command %#x error "
  305. "%d\n", intelxl, index,
  306. le16_to_cpu ( cmd->ret ) );
  307. rc = -EIO;
  308. goto err;
  309. }
  310. /* Success */
  311. return 0;
  312. }
  313. rc = -ETIMEDOUT;
  314. DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
  315. intelxl, index );
  316. err:
  317. DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
  318. return rc;
  319. }
  320. /**
  321. * Get firmware version
  322. *
  323. * @v intelxl Intel device
  324. * @ret rc Return status code
  325. */
  326. static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
  327. struct intelxl_admin_descriptor *cmd;
  328. struct intelxl_admin_version_params *version;
  329. unsigned int api;
  330. int rc;
  331. /* Populate descriptor */
  332. cmd = intelxl_admin_command_descriptor ( intelxl );
  333. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
  334. version = &cmd->params.version;
  335. /* Issue command */
  336. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  337. return rc;
  338. api = le16_to_cpu ( version->api.major );
  339. DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
  340. intelxl, le16_to_cpu ( version->firmware.major ),
  341. le16_to_cpu ( version->firmware.minor ),
  342. api, le16_to_cpu ( version->api.minor ) );
  343. /* Check for API compatibility */
  344. if ( api > INTELXL_ADMIN_API_MAJOR ) {
  345. DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
  346. intelxl, api );
  347. return -ENOTSUP;
  348. }
  349. return 0;
  350. }
  351. /**
  352. * Report driver version
  353. *
  354. * @v intelxl Intel device
  355. * @ret rc Return status code
  356. */
  357. static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
  358. struct intelxl_admin_descriptor *cmd;
  359. struct intelxl_admin_driver_params *driver;
  360. union intelxl_admin_buffer *buf;
  361. int rc;
  362. /* Populate descriptor */
  363. cmd = intelxl_admin_command_descriptor ( intelxl );
  364. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
  365. cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
  366. cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
  367. driver = &cmd->params.driver;
  368. driver->major = product_major_version;
  369. driver->minor = product_minor_version;
  370. buf = intelxl_admin_command_buffer ( intelxl );
  371. snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
  372. ( product_name[0] ? product_name : product_short_name ) );
  373. /* Issue command */
  374. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  375. return rc;
  376. return 0;
  377. }
  378. /**
  379. * Shutdown admin queues
  380. *
  381. * @v intelxl Intel device
  382. * @ret rc Return status code
  383. */
  384. static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
  385. struct intelxl_admin_descriptor *cmd;
  386. struct intelxl_admin_shutdown_params *shutdown;
  387. int rc;
  388. /* Populate descriptor */
  389. cmd = intelxl_admin_command_descriptor ( intelxl );
  390. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
  391. shutdown = &cmd->params.shutdown;
  392. shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
  393. /* Issue command */
  394. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  395. return rc;
  396. return 0;
  397. }
  398. /**
  399. * Get switch configuration
  400. *
  401. * @v intelxl Intel device
  402. * @ret rc Return status code
  403. */
  404. static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
  405. struct intelxl_admin_descriptor *cmd;
  406. struct intelxl_admin_switch_params *sw;
  407. union intelxl_admin_buffer *buf;
  408. int rc;
  409. /* Populate descriptor */
  410. cmd = intelxl_admin_command_descriptor ( intelxl );
  411. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
  412. cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
  413. cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
  414. sw = &cmd->params.sw;
  415. buf = intelxl_admin_command_buffer ( intelxl );
  416. /* Get each configuration in turn */
  417. do {
  418. /* Issue command */
  419. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  420. return rc;
  421. /* Dump raw configuration */
  422. DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
  423. intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
  424. DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
  425. /* Parse response */
  426. if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
  427. intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
  428. DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
  429. "downlink %#04x conn %#02x\n", intelxl,
  430. intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
  431. le16_to_cpu ( buf->sw.cfg.downlink ),
  432. buf->sw.cfg.connection );
  433. }
  434. } while ( sw->next );
  435. /* Check that we found a VSI */
  436. if ( ! intelxl->vsi ) {
  437. DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
  438. return -ENOENT;
  439. }
  440. return 0;
  441. }
  442. /**
  443. * Get VSI parameters
  444. *
  445. * @v intelxl Intel device
  446. * @ret rc Return status code
  447. */
  448. static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
  449. struct intelxl_admin_descriptor *cmd;
  450. struct intelxl_admin_vsi_params *vsi;
  451. union intelxl_admin_buffer *buf;
  452. int rc;
  453. /* Populate descriptor */
  454. cmd = intelxl_admin_command_descriptor ( intelxl );
  455. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
  456. cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
  457. cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
  458. vsi = &cmd->params.vsi;
  459. vsi->vsi = cpu_to_le16 ( intelxl->vsi );
  460. buf = intelxl_admin_command_buffer ( intelxl );
  461. /* Issue command */
  462. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  463. return rc;
  464. /* Parse response */
  465. intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
  466. intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
  467. DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
  468. intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
  469. return 0;
  470. }
  471. /**
  472. * Set VSI promiscuous modes
  473. *
  474. * @v intelxl Intel device
  475. * @ret rc Return status code
  476. */
  477. static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
  478. struct intelxl_admin_descriptor *cmd;
  479. struct intelxl_admin_promisc_params *promisc;
  480. uint16_t flags;
  481. int rc;
  482. /* Populate descriptor */
  483. cmd = intelxl_admin_command_descriptor ( intelxl );
  484. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
  485. flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
  486. INTELXL_ADMIN_PROMISC_FL_MULTICAST |
  487. INTELXL_ADMIN_PROMISC_FL_BROADCAST |
  488. INTELXL_ADMIN_PROMISC_FL_VLAN );
  489. promisc = &cmd->params.promisc;
  490. promisc->flags = cpu_to_le16 ( flags );
  491. promisc->valid = cpu_to_le16 ( flags );
  492. promisc->vsi = cpu_to_le16 ( intelxl->vsi );
  493. /* Issue command */
  494. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  495. return rc;
  496. return 0;
  497. }
  498. /**
  499. * Restart autonegotiation
  500. *
  501. * @v intelxl Intel device
  502. * @ret rc Return status code
  503. */
  504. static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
  505. struct intelxl_admin_descriptor *cmd;
  506. struct intelxl_admin_autoneg_params *autoneg;
  507. int rc;
  508. /* Populate descriptor */
  509. cmd = intelxl_admin_command_descriptor ( intelxl );
  510. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
  511. autoneg = &cmd->params.autoneg;
  512. autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
  513. INTELXL_ADMIN_AUTONEG_FL_ENABLE );
  514. /* Issue command */
  515. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  516. return rc;
  517. return 0;
  518. }
  519. /**
  520. * Get link status
  521. *
  522. * @v netdev Network device
  523. * @ret rc Return status code
  524. */
  525. static int intelxl_admin_link ( struct net_device *netdev ) {
  526. struct intelxl_nic *intelxl = netdev->priv;
  527. struct intelxl_admin_descriptor *cmd;
  528. struct intelxl_admin_link_params *link;
  529. int rc;
  530. /* Populate descriptor */
  531. cmd = intelxl_admin_command_descriptor ( intelxl );
  532. cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
  533. link = &cmd->params.link;
  534. link->notify = INTELXL_ADMIN_LINK_NOTIFY;
  535. /* Issue command */
  536. if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
  537. return rc;
  538. DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
  539. intelxl, link->phy, link->speed, link->status );
  540. /* Update network device */
  541. if ( link->status & INTELXL_ADMIN_LINK_UP ) {
  542. netdev_link_up ( netdev );
  543. } else {
  544. netdev_link_down ( netdev );
  545. }
  546. return 0;
  547. }
  548. /**
  549. * Refill admin event queue
  550. *
  551. * @v intelxl Intel device
  552. */
  553. static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
  554. struct intelxl_admin *admin = &intelxl->event;
  555. const struct intelxl_admin_offsets *regs = admin->regs;
  556. void *admin_regs = ( intelxl->regs + admin->base );
  557. unsigned int tail;
  558. /* Update tail pointer */
  559. tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
  560. INTELXL_ADMIN_NUM_DESC );
  561. wmb();
  562. writel ( tail, admin_regs + regs->tail );
  563. }
  564. /**
  565. * Poll admin event queue
  566. *
  567. * @v netdev Network device
  568. */
  569. static void intelxl_poll_admin ( struct net_device *netdev ) {
  570. struct intelxl_nic *intelxl = netdev->priv;
  571. struct intelxl_admin *admin = &intelxl->event;
  572. struct intelxl_admin_descriptor *evt;
  573. union intelxl_admin_buffer *buf;
  574. /* Check for events */
  575. while ( 1 ) {
  576. /* Get next event descriptor and data buffer */
  577. evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
  578. buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
  579. /* Stop if descriptor is not yet completed */
  580. if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
  581. return;
  582. DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
  583. intelxl, admin->index );
  584. DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
  585. sizeof ( *evt ) );
  586. if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
  587. DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
  588. le16_to_cpu ( evt->len ) );
  589. }
  590. /* Handle event */
  591. switch ( evt->opcode ) {
  592. case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
  593. intelxl_admin_link ( netdev );
  594. break;
  595. default:
  596. DBGC ( intelxl, "INTELXL %p admin event %#x "
  597. "unrecognised opcode %#04x\n", intelxl,
  598. admin->index, le16_to_cpu ( evt->opcode ) );
  599. break;
  600. }
  601. /* Reset descriptor and refill queue */
  602. intelxl_admin_event_init ( intelxl, admin->index );
  603. admin->index++;
  604. intelxl_refill_admin ( intelxl );
  605. }
  606. }
  607. /**
  608. * Open admin queues
  609. *
  610. * @v intelxl Intel device
  611. * @ret rc Return status code
  612. */
  613. static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
  614. unsigned int i;
  615. int rc;
  616. /* Create admin event queue */
  617. if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->event ) ) != 0 )
  618. goto err_create_event;
  619. /* Create admin command queue */
  620. if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->command ) ) != 0 )
  621. goto err_create_command;
  622. /* Initialise all admin event queue descriptors */
  623. for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
  624. intelxl_admin_event_init ( intelxl, i );
  625. /* Post all descriptors to event queue */
  626. intelxl_refill_admin ( intelxl );
  627. /* Get firmware version */
  628. if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
  629. goto err_version;
  630. /* Report driver version */
  631. if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
  632. goto err_driver;
  633. return 0;
  634. err_driver:
  635. err_version:
  636. intelxl_destroy_admin ( intelxl, &intelxl->command );
  637. err_create_command:
  638. intelxl_destroy_admin ( intelxl, &intelxl->event );
  639. err_create_event:
  640. return rc;
  641. }
  642. /**
  643. * Close admin queues
  644. *
  645. * @v intelxl Intel device
  646. */
  647. static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
  648. /* Shut down admin queues */
  649. intelxl_admin_shutdown ( intelxl );
  650. /* Destroy admin command queue */
  651. intelxl_destroy_admin ( intelxl, &intelxl->command );
  652. /* Destroy admin event queue */
  653. intelxl_destroy_admin ( intelxl, &intelxl->event );
  654. }
  655. /******************************************************************************
  656. *
  657. * Descriptor rings
  658. *
  659. ******************************************************************************
  660. */
  661. /**
  662. * Dump queue context (for debugging)
  663. *
  664. * @v intelxl Intel device
  665. * @v op Context operation
  666. * @v len Size of context
  667. */
  668. static __attribute__ (( unused )) void
  669. intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
  670. struct intelxl_context_line line;
  671. uint32_t pfcm_lanctxctl;
  672. uint32_t pfcm_lanctxstat;
  673. unsigned int queue;
  674. unsigned int index;
  675. unsigned int i;
  676. /* Do nothing unless debug output is enabled */
  677. if ( ! DBG_EXTRA )
  678. return;
  679. /* Dump context */
  680. DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
  681. for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
  682. /* Start context operation */
  683. queue = ( intelxl->base + intelxl->queue );
  684. pfcm_lanctxctl =
  685. ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
  686. INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
  687. INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
  688. writel ( pfcm_lanctxctl,
  689. intelxl->regs + INTELXL_PFCM_LANCTXCTL );
  690. /* Wait for operation to complete */
  691. for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
  692. /* Check if operation is complete */
  693. pfcm_lanctxstat = readl ( intelxl->regs +
  694. INTELXL_PFCM_LANCTXSTAT );
  695. if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
  696. break;
  697. /* Delay */
  698. mdelay ( 1 );
  699. }
  700. /* Read context data */
  701. for ( i = 0 ; i < ( sizeof ( line ) /
  702. sizeof ( line.raw[0] ) ) ; i++ ) {
  703. line.raw[i] = readl ( intelxl->regs +
  704. INTELXL_PFCM_LANCTXDATA ( i ) );
  705. }
  706. DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
  707. &line, sizeof ( line ) );
  708. }
  709. }
  710. /**
  711. * Program queue context line
  712. *
  713. * @v intelxl Intel device
  714. * @v line Queue context line
  715. * @v index Line number
  716. * @v op Context operation
  717. * @ret rc Return status code
  718. */
  719. static int intelxl_context_line ( struct intelxl_nic *intelxl,
  720. struct intelxl_context_line *line,
  721. unsigned int index, uint32_t op ) {
  722. uint32_t pfcm_lanctxctl;
  723. uint32_t pfcm_lanctxstat;
  724. unsigned int queue;
  725. unsigned int i;
  726. /* Write context data */
  727. for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
  728. writel ( le32_to_cpu ( line->raw[i] ),
  729. intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
  730. }
  731. /* Start context operation */
  732. queue = ( intelxl->base + intelxl->queue );
  733. pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
  734. INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
  735. INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
  736. writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
  737. /* Wait for operation to complete */
  738. for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
  739. /* Check if operation is complete */
  740. pfcm_lanctxstat = readl ( intelxl->regs +
  741. INTELXL_PFCM_LANCTXSTAT );
  742. if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
  743. return 0;
  744. /* Delay */
  745. mdelay ( 1 );
  746. }
  747. DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
  748. intelxl, pfcm_lanctxctl );
  749. return -ETIMEDOUT;
  750. }
  751. /**
  752. * Program queue context
  753. *
  754. * @v intelxl Intel device
  755. * @v line Queue context lines
  756. * @v len Size of context
  757. * @v op Context operation
  758. * @ret rc Return status code
  759. */
  760. static int intelxl_context ( struct intelxl_nic *intelxl,
  761. struct intelxl_context_line *line,
  762. size_t len, uint32_t op ) {
  763. unsigned int index;
  764. int rc;
  765. DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
  766. intelxl, op, len );
  767. DBGC2_HDA ( intelxl, 0, line, len );
  768. /* Program one line at a time */
  769. for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
  770. if ( ( rc = intelxl_context_line ( intelxl, line++, index,
  771. op ) ) != 0 )
  772. return rc;
  773. }
  774. return 0;
  775. }
  776. /**
  777. * Program transmit queue context
  778. *
  779. * @v intelxl Intel device
  780. * @v address Descriptor ring base address
  781. * @ret rc Return status code
  782. */
  783. static int intelxl_context_tx ( struct intelxl_nic *intelxl,
  784. physaddr_t address ) {
  785. union {
  786. struct intelxl_context_tx tx;
  787. struct intelxl_context_line line;
  788. } ctx;
  789. int rc;
  790. /* Initialise context */
  791. memset ( &ctx, 0, sizeof ( ctx ) );
  792. ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
  793. ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
  794. ctx.tx.count =
  795. cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
  796. ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
  797. /* Program context */
  798. if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
  799. INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
  800. return rc;
  801. return 0;
  802. }
  803. /**
  804. * Program receive queue context
  805. *
  806. * @v intelxl Intel device
  807. * @v address Descriptor ring base address
  808. * @ret rc Return status code
  809. */
  810. static int intelxl_context_rx ( struct intelxl_nic *intelxl,
  811. physaddr_t address ) {
  812. union {
  813. struct intelxl_context_rx rx;
  814. struct intelxl_context_line line;
  815. } ctx;
  816. uint64_t base_count;
  817. int rc;
  818. /* Initialise context */
  819. memset ( &ctx, 0, sizeof ( ctx ) );
  820. base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
  821. ctx.rx.base_count = cpu_to_le64 ( base_count );
  822. ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
  823. ctx.rx.flags = INTELXL_CTX_RX_FL_CRCSTRIP;
  824. ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
  825. /* Program context */
  826. if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
  827. INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
  828. return rc;
  829. return 0;
  830. }
  831. /**
  832. * Enable descriptor ring
  833. *
  834. * @v intelxl Intel device
  835. * @v ring Descriptor ring
  836. * @ret rc Return status code
  837. */
  838. static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
  839. struct intelxl_ring *ring ) {
  840. void *ring_regs = ( intelxl->regs + ring->reg );
  841. uint32_t qxx_ena;
  842. /* Enable ring */
  843. writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
  844. udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
  845. qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
  846. if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
  847. DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
  848. "%#08x\n", intelxl, ring->reg, qxx_ena );
  849. return -EIO;
  850. }
  851. return 0;
  852. }
  853. /**
  854. * Disable descriptor ring
  855. *
  856. * @v intelxl Intel device
  857. * @v ring Descriptor ring
  858. * @ret rc Return status code
  859. */
  860. static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
  861. struct intelxl_ring *ring ) {
  862. void *ring_regs = ( intelxl->regs + ring->reg );
  863. uint32_t qxx_ena;
  864. unsigned int i;
  865. /* Disable ring */
  866. writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
  867. /* Wait for ring to be disabled */
  868. for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
  869. /* Check if ring is disabled */
  870. qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
  871. if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
  872. return 0;
  873. /* Delay */
  874. mdelay ( 1 );
  875. }
  876. DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
  877. "%#08x\n", intelxl, ring->reg, qxx_ena );
  878. return -ETIMEDOUT;
  879. }
  880. /**
  881. * Create descriptor ring
  882. *
  883. * @v intelxl Intel device
  884. * @v ring Descriptor ring
  885. * @ret rc Return status code
  886. */
  887. static int intelxl_create_ring ( struct intelxl_nic *intelxl,
  888. struct intelxl_ring *ring ) {
  889. void *ring_regs = ( intelxl->regs + ring->reg );
  890. physaddr_t address;
  891. int rc;
  892. /* Allocate descriptor ring */
  893. ring->desc = malloc_dma ( ring->len, INTELXL_ALIGN );
  894. if ( ! ring->desc ) {
  895. rc = -ENOMEM;
  896. goto err_alloc;
  897. }
  898. /* Initialise descriptor ring */
  899. memset ( ring->desc, 0, ring->len );
  900. /* Reset tail pointer */
  901. writel ( 0, ( ring_regs + INTELXL_QXX_TAIL ) );
  902. /* Program queue context */
  903. address = virt_to_bus ( ring->desc );
  904. if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
  905. goto err_context;
  906. /* Enable ring */
  907. if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
  908. goto err_enable;
  909. /* Reset counters */
  910. ring->prod = 0;
  911. ring->cons = 0;
  912. DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
  913. intelxl, ring->reg, ( ( unsigned long long ) address ),
  914. ( ( unsigned long long ) address + ring->len ) );
  915. return 0;
  916. intelxl_disable_ring ( intelxl, ring );
  917. err_enable:
  918. err_context:
  919. free_dma ( ring->desc, ring->len );
  920. err_alloc:
  921. return rc;
  922. }
  923. /**
  924. * Destroy descriptor ring
  925. *
  926. * @v intelxl Intel device
  927. * @v ring Descriptor ring
  928. */
  929. static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
  930. struct intelxl_ring *ring ) {
  931. int rc;
  932. /* Disable ring */
  933. if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
  934. /* Leak memory; there's nothing else we can do */
  935. return;
  936. }
  937. /* Free descriptor ring */
  938. free_dma ( ring->desc, ring->len );
  939. ring->desc = NULL;
  940. }
  941. /**
  942. * Refill receive descriptor ring
  943. *
  944. * @v intelxl Intel device
  945. */
  946. static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
  947. struct intelxl_rx_data_descriptor *rx;
  948. struct io_buffer *iobuf;
  949. unsigned int rx_idx;
  950. unsigned int rx_tail;
  951. physaddr_t address;
  952. unsigned int refilled = 0;
  953. /* Refill ring */
  954. while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
  955. /* Allocate I/O buffer */
  956. iobuf = alloc_iob ( intelxl->mfs );
  957. if ( ! iobuf ) {
  958. /* Wait for next refill */
  959. break;
  960. }
  961. /* Get next receive descriptor */
  962. rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
  963. rx = &intelxl->rx.desc[rx_idx].rx;
  964. /* Populate receive descriptor */
  965. address = virt_to_bus ( iobuf->data );
  966. rx->address = cpu_to_le64 ( address );
  967. rx->flags = 0;
  968. /* Record I/O buffer */
  969. assert ( intelxl->rx_iobuf[rx_idx] == NULL );
  970. intelxl->rx_iobuf[rx_idx] = iobuf;
  971. DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
  972. rx_idx, ( ( unsigned long long ) address ),
  973. ( ( unsigned long long ) address + intelxl->mfs ) );
  974. refilled++;
  975. }
  976. /* Push descriptors to card, if applicable */
  977. if ( refilled ) {
  978. wmb();
  979. rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
  980. writel ( rx_tail,
  981. ( intelxl->regs + intelxl->rx.reg + INTELXL_QXX_TAIL));
  982. }
  983. }
  984. /******************************************************************************
  985. *
  986. * Network device interface
  987. *
  988. ******************************************************************************
  989. */
  990. /**
  991. * Open network device
  992. *
  993. * @v netdev Network device
  994. * @ret rc Return status code
  995. */
  996. static int intelxl_open ( struct net_device *netdev ) {
  997. struct intelxl_nic *intelxl = netdev->priv;
  998. union intelxl_receive_address mac;
  999. unsigned int queue;
  1000. uint32_t prtgl_sal;
  1001. uint32_t prtgl_sah;
  1002. int rc;
  1003. /* Calculate maximum frame size */
  1004. intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
  1005. INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
  1006. /* Program MAC address and maximum frame size */
  1007. memset ( &mac, 0, sizeof ( mac ) );
  1008. memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
  1009. prtgl_sal = le32_to_cpu ( mac.reg.low );
  1010. prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
  1011. INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
  1012. writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
  1013. writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
  1014. /* Associate transmit queue to PF */
  1015. writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
  1016. INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
  1017. ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
  1018. /* Clear transmit pre queue disable */
  1019. queue = ( intelxl->base + intelxl->queue );
  1020. writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
  1021. INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
  1022. ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
  1023. /* Reset transmit queue head */
  1024. writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
  1025. /* Create receive descriptor ring */
  1026. if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
  1027. goto err_create_rx;
  1028. /* Create transmit descriptor ring */
  1029. if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
  1030. goto err_create_tx;
  1031. /* Fill receive ring */
  1032. intelxl_refill_rx ( intelxl );
  1033. /* Restart autonegotiation */
  1034. intelxl_admin_autoneg ( intelxl );
  1035. /* Update link state */
  1036. intelxl_admin_link ( netdev );
  1037. return 0;
  1038. writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
  1039. INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
  1040. ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
  1041. udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
  1042. intelxl_destroy_ring ( intelxl, &intelxl->tx );
  1043. err_create_tx:
  1044. intelxl_destroy_ring ( intelxl, &intelxl->rx );
  1045. err_create_rx:
  1046. return rc;
  1047. }
  1048. /**
  1049. * Close network device
  1050. *
  1051. * @v netdev Network device
  1052. */
  1053. static void intelxl_close ( struct net_device *netdev ) {
  1054. struct intelxl_nic *intelxl = netdev->priv;
  1055. unsigned int queue;
  1056. unsigned int i;
  1057. /* Dump contexts (for debugging) */
  1058. intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
  1059. sizeof ( struct intelxl_context_tx ) );
  1060. intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
  1061. sizeof ( struct intelxl_context_rx ) );
  1062. /* Pre-disable transmit queue */
  1063. queue = ( intelxl->base + intelxl->queue );
  1064. writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
  1065. INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
  1066. ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
  1067. udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
  1068. /* Destroy transmit descriptor ring */
  1069. intelxl_destroy_ring ( intelxl, &intelxl->tx );
  1070. /* Destroy receive descriptor ring */
  1071. intelxl_destroy_ring ( intelxl, &intelxl->rx );
  1072. /* Discard any unused receive buffers */
  1073. for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
  1074. if ( intelxl->rx_iobuf[i] )
  1075. free_iob ( intelxl->rx_iobuf[i] );
  1076. intelxl->rx_iobuf[i] = NULL;
  1077. }
  1078. }
  1079. /**
  1080. * Transmit packet
  1081. *
  1082. * @v netdev Network device
  1083. * @v iobuf I/O buffer
  1084. * @ret rc Return status code
  1085. */
  1086. static int intelxl_transmit ( struct net_device *netdev,
  1087. struct io_buffer *iobuf ) {
  1088. struct intelxl_nic *intelxl = netdev->priv;
  1089. struct intelxl_tx_data_descriptor *tx;
  1090. unsigned int tx_idx;
  1091. unsigned int tx_tail;
  1092. physaddr_t address;
  1093. size_t len;
  1094. /* Get next transmit descriptor */
  1095. if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
  1096. DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
  1097. intelxl );
  1098. return -ENOBUFS;
  1099. }
  1100. tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
  1101. tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
  1102. tx = &intelxl->tx.desc[tx_idx].tx;
  1103. /* Populate transmit descriptor */
  1104. address = virt_to_bus ( iobuf->data );
  1105. len = iob_len ( iobuf );
  1106. tx->address = cpu_to_le64 ( address );
  1107. tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
  1108. tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
  1109. INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
  1110. wmb();
  1111. /* Notify card that there are packets ready to transmit */
  1112. writel ( tx_tail,
  1113. ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_TAIL ) );
  1114. DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
  1115. ( ( unsigned long long ) address ),
  1116. ( ( unsigned long long ) address + len ) );
  1117. return 0;
  1118. }
  1119. /**
  1120. * Poll for completed packets
  1121. *
  1122. * @v netdev Network device
  1123. */
  1124. static void intelxl_poll_tx ( struct net_device *netdev ) {
  1125. struct intelxl_nic *intelxl = netdev->priv;
  1126. struct intelxl_tx_writeback_descriptor *tx_wb;
  1127. unsigned int tx_idx;
  1128. /* Check for completed packets */
  1129. while ( intelxl->tx.cons != intelxl->tx.prod ) {
  1130. /* Get next transmit descriptor */
  1131. tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
  1132. tx_wb = &intelxl->tx.desc[tx_idx].tx_wb;
  1133. /* Stop if descriptor is still in use */
  1134. if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
  1135. return;
  1136. DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
  1137. intelxl, tx_idx );
  1138. /* Complete TX descriptor */
  1139. netdev_tx_complete_next ( netdev );
  1140. intelxl->tx.cons++;
  1141. }
  1142. }
  1143. /**
  1144. * Poll for received packets
  1145. *
  1146. * @v netdev Network device
  1147. */
  1148. static void intelxl_poll_rx ( struct net_device *netdev ) {
  1149. struct intelxl_nic *intelxl = netdev->priv;
  1150. struct intelxl_rx_writeback_descriptor *rx_wb;
  1151. struct io_buffer *iobuf;
  1152. unsigned int rx_idx;
  1153. unsigned int tag;
  1154. size_t len;
  1155. /* Check for received packets */
  1156. while ( intelxl->rx.cons != intelxl->rx.prod ) {
  1157. /* Get next receive descriptor */
  1158. rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
  1159. rx_wb = &intelxl->rx.desc[rx_idx].rx_wb;
  1160. /* Stop if descriptor is still in use */
  1161. if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
  1162. return;
  1163. /* Populate I/O buffer */
  1164. iobuf = intelxl->rx_iobuf[rx_idx];
  1165. intelxl->rx_iobuf[rx_idx] = NULL;
  1166. len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
  1167. iob_put ( iobuf, len );
  1168. /* Find VLAN device, if applicable */
  1169. if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
  1170. tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
  1171. } else {
  1172. tag = 0;
  1173. }
  1174. /* Hand off to network stack */
  1175. if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
  1176. DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
  1177. "flags %08x)\n", intelxl, rx_idx, len,
  1178. le32_to_cpu ( rx_wb->flags ) );
  1179. vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
  1180. } else {
  1181. DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
  1182. "%zd)\n", intelxl, rx_idx, len );
  1183. vlan_netdev_rx ( netdev, tag, iobuf );
  1184. }
  1185. intelxl->rx.cons++;
  1186. }
  1187. }
  1188. /**
  1189. * Poll for completed and received packets
  1190. *
  1191. * @v netdev Network device
  1192. */
  1193. static void intelxl_poll ( struct net_device *netdev ) {
  1194. struct intelxl_nic *intelxl = netdev->priv;
  1195. /* Acknowledge interrupts, if applicable */
  1196. if ( netdev_irq_enabled ( netdev ) ) {
  1197. writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
  1198. INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
  1199. intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
  1200. }
  1201. /* Poll for completed packets */
  1202. intelxl_poll_tx ( netdev );
  1203. /* Poll for received packets */
  1204. intelxl_poll_rx ( netdev );
  1205. /* Poll for admin events */
  1206. intelxl_poll_admin ( netdev );
  1207. /* Refill RX ring */
  1208. intelxl_refill_rx ( intelxl );
  1209. }
  1210. /**
  1211. * Enable or disable interrupts
  1212. *
  1213. * @v netdev Network device
  1214. * @v enable Interrupts should be enabled
  1215. */
  1216. static void intelxl_irq ( struct net_device *netdev, int enable ) {
  1217. struct intelxl_nic *intelxl = netdev->priv;
  1218. if ( enable ) {
  1219. writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
  1220. intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
  1221. } else {
  1222. writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
  1223. }
  1224. }
  1225. /** Network device operations */
  1226. static struct net_device_operations intelxl_operations = {
  1227. .open = intelxl_open,
  1228. .close = intelxl_close,
  1229. .transmit = intelxl_transmit,
  1230. .poll = intelxl_poll,
  1231. .irq = intelxl_irq,
  1232. };
  1233. /******************************************************************************
  1234. *
  1235. * PCI interface
  1236. *
  1237. ******************************************************************************
  1238. */
  1239. /**
  1240. * Probe PCI device
  1241. *
  1242. * @v pci PCI device
  1243. * @ret rc Return status code
  1244. */
  1245. static int intelxl_probe ( struct pci_device *pci ) {
  1246. struct net_device *netdev;
  1247. struct intelxl_nic *intelxl;
  1248. uint32_t pfgen_portnum;
  1249. uint32_t pflan_qalloc;
  1250. int rc;
  1251. /* Allocate and initialise net device */
  1252. netdev = alloc_etherdev ( sizeof ( *intelxl ) );
  1253. if ( ! netdev ) {
  1254. rc = -ENOMEM;
  1255. goto err_alloc;
  1256. }
  1257. netdev_init ( netdev, &intelxl_operations );
  1258. intelxl = netdev->priv;
  1259. pci_set_drvdata ( pci, netdev );
  1260. netdev->dev = &pci->dev;
  1261. memset ( intelxl, 0, sizeof ( *intelxl ) );
  1262. intelxl->pf = PCI_FUNC ( pci->busdevfn );
  1263. intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
  1264. &intelxl_admin_offsets );
  1265. intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
  1266. &intelxl_admin_offsets );
  1267. intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
  1268. intelxl_context_tx );
  1269. intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
  1270. intelxl_context_rx );
  1271. /* Fix up PCI device */
  1272. adjust_pci_device ( pci );
  1273. /* Map registers */
  1274. intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
  1275. if ( ! intelxl->regs ) {
  1276. rc = -ENODEV;
  1277. goto err_ioremap;
  1278. }
  1279. /* Reset the NIC */
  1280. if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
  1281. goto err_reset;
  1282. /* Get port number and base queue number */
  1283. pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
  1284. intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
  1285. pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
  1286. intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
  1287. DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
  1288. intelxl, intelxl->pf, intelxl->port, intelxl->base,
  1289. INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
  1290. /* Fetch MAC address and maximum frame size */
  1291. if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
  1292. goto err_fetch_mac;
  1293. /* Open admin queues */
  1294. if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
  1295. goto err_open_admin;
  1296. /* Get switch configuration */
  1297. if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
  1298. goto err_admin_switch;
  1299. /* Get VSI configuration */
  1300. if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
  1301. goto err_admin_vsi;
  1302. /* Configure switch for promiscuous mode */
  1303. if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
  1304. goto err_admin_promisc;
  1305. /* Configure queue register addresses */
  1306. intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
  1307. intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
  1308. /* Configure interrupt causes */
  1309. writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
  1310. INTELXL_QINT_TQCTL_CAUSE_ENA ),
  1311. intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
  1312. writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
  1313. INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
  1314. INTELXL_QINT_RQCTL_CAUSE_ENA ),
  1315. intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
  1316. writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
  1317. INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
  1318. intelxl->regs + INTELXL_PFINT_LNKLST0 );
  1319. writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
  1320. intelxl->regs + INTELXL_PFINT_ICR0_ENA );
  1321. /* Register network device */
  1322. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  1323. goto err_register_netdev;
  1324. /* Set initial link state */
  1325. intelxl_admin_link ( netdev );
  1326. return 0;
  1327. unregister_netdev ( netdev );
  1328. err_register_netdev:
  1329. err_admin_promisc:
  1330. err_admin_vsi:
  1331. err_admin_switch:
  1332. intelxl_close_admin ( intelxl );
  1333. err_open_admin:
  1334. err_fetch_mac:
  1335. intelxl_reset ( intelxl );
  1336. err_reset:
  1337. iounmap ( intelxl->regs );
  1338. err_ioremap:
  1339. netdev_nullify ( netdev );
  1340. netdev_put ( netdev );
  1341. err_alloc:
  1342. return rc;
  1343. }
  1344. /**
  1345. * Remove PCI device
  1346. *
  1347. * @v pci PCI device
  1348. */
  1349. static void intelxl_remove ( struct pci_device *pci ) {
  1350. struct net_device *netdev = pci_get_drvdata ( pci );
  1351. struct intelxl_nic *intelxl = netdev->priv;
  1352. /* Unregister network device */
  1353. unregister_netdev ( netdev );
  1354. /* Close admin queues */
  1355. intelxl_close_admin ( intelxl );
  1356. /* Reset the NIC */
  1357. intelxl_reset ( intelxl );
  1358. /* Free network device */
  1359. iounmap ( intelxl->regs );
  1360. netdev_nullify ( netdev );
  1361. netdev_put ( netdev );
  1362. }
  1363. /** PCI device IDs */
  1364. static struct pci_device_id intelxl_nics[] = {
  1365. PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
  1366. PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
  1367. PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
  1368. PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
  1369. PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
  1370. PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
  1371. PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
  1372. PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
  1373. PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
  1374. PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
  1375. PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
  1376. PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
  1377. PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
  1378. PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
  1379. PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
  1380. PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
  1381. PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
  1382. PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
  1383. PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
  1384. };
  1385. /** PCI driver */
  1386. struct pci_driver intelxl_driver __pci_driver = {
  1387. .ids = intelxl_nics,
  1388. .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
  1389. .probe = intelxl_probe,
  1390. .remove = intelxl_remove,
  1391. };