You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

phantom.c 56KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941
  1. /*
  2. * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. #include <stdint.h>
  19. #include <stdlib.h>
  20. #include <string.h>
  21. #include <unistd.h>
  22. #include <errno.h>
  23. #include <assert.h>
  24. #include <byteswap.h>
  25. #include <gpxe/pci.h>
  26. #include <gpxe/malloc.h>
  27. #include <gpxe/iobuf.h>
  28. #include <gpxe/netdevice.h>
  29. #include <gpxe/if_ether.h>
  30. #include <gpxe/ethernet.h>
  31. #include <gpxe/spi.h>
  32. #include "phantom.h"
  33. /**
  34. * @file
  35. *
  36. * NetXen Phantom NICs
  37. *
  38. */
  39. /** Maximum time to wait for SPI lock */
  40. #define PHN_SPI_LOCK_TIMEOUT_MS 100
  41. /** Maximum time to wait for SPI command to be issued */
  42. #define PHN_SPI_CMD_TIMEOUT_MS 100
  43. /** Maximum time to wait for command PEG to initialise
  44. *
  45. * BUGxxxx
  46. *
  47. * The command PEG will currently report initialisation complete only
  48. * when at least one PHY has detected a link (so that the global PHY
  49. * clock can be set to 10G/1G as appropriate). This can take a very,
  50. * very long time.
  51. *
  52. * A future firmware revision should decouple PHY initialisation from
  53. * firmware initialisation, at which point the command PEG will report
  54. * initialisation complete much earlier, and this timeout can be
  55. * reduced.
  56. */
  57. #define PHN_CMDPEG_INIT_TIMEOUT_SEC 50
  58. /** Maximum time to wait for receive PEG to initialise */
  59. #define PHN_RCVPEG_INIT_TIMEOUT_SEC 2
  60. /** Maximum time to wait for firmware to accept a command */
  61. #define PHN_ISSUE_CMD_TIMEOUT_MS 2000
  62. /** Maximum time to wait for test memory */
  63. #define PHN_TEST_MEM_TIMEOUT_MS 100
  64. /** Link state poll frequency
  65. *
  66. * The link state will be checked once in every N calls to poll().
  67. */
  68. #define PHN_LINK_POLL_FREQUENCY 4096
  69. /** Number of RX descriptors */
  70. #define PHN_NUM_RDS 32
  71. /** RX maximum fill level. Must be strictly less than PHN_NUM_RDS. */
  72. #define PHN_RDS_MAX_FILL 16
  73. /** RX buffer size */
  74. #define PHN_RX_BUFSIZE ( 32 /* max LL padding added by card */ + \
  75. ETH_FRAME_LEN )
  76. /** Number of RX status descriptors */
  77. #define PHN_NUM_SDS 32
  78. /** Number of TX descriptors */
  79. #define PHN_NUM_CDS 8
  80. /** A Phantom descriptor ring set */
  81. struct phantom_descriptor_rings {
  82. /** RX descriptors */
  83. struct phantom_rds rds[PHN_NUM_RDS];
  84. /** RX status descriptors */
  85. struct phantom_sds sds[PHN_NUM_SDS];
  86. /** TX descriptors */
  87. union phantom_cds cds[PHN_NUM_CDS];
  88. /** TX consumer index */
  89. volatile uint32_t cmd_cons;
  90. };
  91. /** A Phantom NIC port */
  92. struct phantom_nic_port {
  93. /** Phantom NIC containing this port */
  94. struct phantom_nic *phantom;
  95. /** Port number */
  96. unsigned int port;
  97. /** RX context ID */
  98. uint16_t rx_context_id;
  99. /** RX descriptor producer CRB offset */
  100. unsigned long rds_producer_crb;
  101. /** RX status descriptor consumer CRB offset */
  102. unsigned long sds_consumer_crb;
  103. /** RX producer index */
  104. unsigned int rds_producer_idx;
  105. /** RX consumer index */
  106. unsigned int rds_consumer_idx;
  107. /** RX status consumer index */
  108. unsigned int sds_consumer_idx;
  109. /** RX I/O buffers */
  110. struct io_buffer *rds_iobuf[PHN_RDS_MAX_FILL];
  111. /** TX context ID */
  112. uint16_t tx_context_id;
  113. /** TX descriptor producer CRB offset */
  114. unsigned long cds_producer_crb;
  115. /** TX producer index */
  116. unsigned int cds_producer_idx;
  117. /** TX consumer index */
  118. unsigned int cds_consumer_idx;
  119. /** TX I/O buffers */
  120. struct io_buffer *cds_iobuf[PHN_NUM_CDS];
  121. /** Link state poll timer */
  122. unsigned long link_poll_timer;
  123. /** Descriptor rings */
  124. struct phantom_descriptor_rings *desc;
  125. };
  126. /** RX context creation request and response buffers */
  127. struct phantom_create_rx_ctx_rqrsp {
  128. struct {
  129. struct nx_hostrq_rx_ctx_s rx_ctx;
  130. struct nx_hostrq_rds_ring_s rds;
  131. struct nx_hostrq_sds_ring_s sds;
  132. } __unm_dma_aligned hostrq;
  133. struct {
  134. struct nx_cardrsp_rx_ctx_s rx_ctx;
  135. struct nx_cardrsp_rds_ring_s rds;
  136. struct nx_cardrsp_sds_ring_s sds;
  137. } __unm_dma_aligned cardrsp;
  138. };
  139. /** TX context creation request and response buffers */
  140. struct phantom_create_tx_ctx_rqrsp {
  141. struct {
  142. struct nx_hostrq_tx_ctx_s tx_ctx;
  143. } __unm_dma_aligned hostrq;
  144. struct {
  145. struct nx_cardrsp_tx_ctx_s tx_ctx;
  146. } __unm_dma_aligned cardrsp;
  147. };
  148. /** A Phantom DMA buffer area */
  149. union phantom_dma_buffer {
  150. /** Dummy area required for (read-only) self-tests */
  151. uint8_t dummy_dma[UNM_DUMMY_DMA_SIZE];
  152. /** RX context creation request and response buffers */
  153. struct phantom_create_rx_ctx_rqrsp create_rx_ctx;
  154. /** TX context creation request and response buffers */
  155. struct phantom_create_tx_ctx_rqrsp create_tx_ctx;
  156. };
  157. /** A Phantom NIC */
  158. struct phantom_nic {
  159. /** BAR 0 */
  160. void *bar0;
  161. /** Current CRB window */
  162. unsigned long crb_window;
  163. /** CRB window access method */
  164. unsigned long ( *crb_access ) ( struct phantom_nic *phantom,
  165. unsigned long reg );
  166. /** Number of ports */
  167. int num_ports;
  168. /** Per-port network devices */
  169. struct net_device *netdev[UNM_FLASH_NUM_PORTS];
  170. /** DMA buffers */
  171. union phantom_dma_buffer *dma_buf;
  172. /** Flash memory SPI bus */
  173. struct spi_bus spi_bus;
  174. /** Flash memory SPI device */
  175. struct spi_device flash;
  176. /** Last known link state */
  177. uint32_t link_state;
  178. };
  179. /***************************************************************************
  180. *
  181. * CRB register access
  182. *
  183. */
  184. /**
  185. * Prepare for access to CRB register via 128MB BAR
  186. *
  187. * @v phantom Phantom NIC
  188. * @v reg Register offset within abstract address space
  189. * @ret offset Register offset within PCI BAR0
  190. */
  191. static unsigned long phantom_crb_access_128m ( struct phantom_nic *phantom,
  192. unsigned long reg ) {
  193. static const uint32_t reg_window[] = {
  194. [UNM_CRB_BLK_PCIE] = 0x0000000,
  195. [UNM_CRB_BLK_CAM] = 0x2000000,
  196. [UNM_CRB_BLK_ROMUSB] = 0x2000000,
  197. [UNM_CRB_BLK_TEST] = 0x0000000,
  198. };
  199. static const uint32_t reg_bases[] = {
  200. [UNM_CRB_BLK_PCIE] = 0x6100000,
  201. [UNM_CRB_BLK_CAM] = 0x6200000,
  202. [UNM_CRB_BLK_ROMUSB] = 0x7300000,
  203. [UNM_CRB_BLK_TEST] = 0x6200000,
  204. };
  205. unsigned int block = UNM_CRB_BLK ( reg );
  206. unsigned long offset = UNM_CRB_OFFSET ( reg );
  207. uint32_t window = reg_window[block];
  208. uint32_t verify_window;
  209. if ( phantom->crb_window != window ) {
  210. /* Write to the CRB window register */
  211. writel ( window, phantom->bar0 + UNM_128M_CRB_WINDOW );
  212. /* Ensure that the write has reached the card */
  213. verify_window = readl ( phantom->bar0 + UNM_128M_CRB_WINDOW );
  214. assert ( verify_window == window );
  215. /* Record new window */
  216. phantom->crb_window = window;
  217. }
  218. return ( reg_bases[block] + offset );
  219. }
  220. /**
  221. * Prepare for access to CRB register via 32MB BAR
  222. *
  223. * @v phantom Phantom NIC
  224. * @v reg Register offset within abstract address space
  225. * @ret offset Register offset within PCI BAR0
  226. */
  227. static unsigned long phantom_crb_access_32m ( struct phantom_nic *phantom,
  228. unsigned long reg ) {
  229. static const uint32_t reg_window[] = {
  230. [UNM_CRB_BLK_PCIE] = 0x0000000,
  231. [UNM_CRB_BLK_CAM] = 0x2000000,
  232. [UNM_CRB_BLK_ROMUSB] = 0x2000000,
  233. [UNM_CRB_BLK_TEST] = 0x0000000,
  234. };
  235. static const uint32_t reg_bases[] = {
  236. [UNM_CRB_BLK_PCIE] = 0x0100000,
  237. [UNM_CRB_BLK_CAM] = 0x0200000,
  238. [UNM_CRB_BLK_ROMUSB] = 0x1300000,
  239. [UNM_CRB_BLK_TEST] = 0x0200000,
  240. };
  241. unsigned int block = UNM_CRB_BLK ( reg );
  242. unsigned long offset = UNM_CRB_OFFSET ( reg );
  243. uint32_t window = reg_window[block];
  244. uint32_t verify_window;
  245. if ( phantom->crb_window != window ) {
  246. /* Write to the CRB window register */
  247. writel ( window, phantom->bar0 + UNM_32M_CRB_WINDOW );
  248. /* Ensure that the write has reached the card */
  249. verify_window = readl ( phantom->bar0 + UNM_32M_CRB_WINDOW );
  250. assert ( verify_window == window );
  251. /* Record new window */
  252. phantom->crb_window = window;
  253. }
  254. return ( reg_bases[block] + offset );
  255. }
  256. /**
  257. * Prepare for access to CRB register via 2MB BAR
  258. *
  259. * @v phantom Phantom NIC
  260. * @v reg Register offset within abstract address space
  261. * @ret offset Register offset within PCI BAR0
  262. */
  263. static unsigned long phantom_crb_access_2m ( struct phantom_nic *phantom,
  264. unsigned long reg ) {
  265. static const uint32_t reg_window_hi[] = {
  266. [UNM_CRB_BLK_PCIE] = 0x77300000,
  267. [UNM_CRB_BLK_CAM] = 0x41600000,
  268. [UNM_CRB_BLK_ROMUSB] = 0x42100000,
  269. [UNM_CRB_BLK_TEST] = 0x29500000,
  270. };
  271. unsigned int block = UNM_CRB_BLK ( reg );
  272. unsigned long offset = UNM_CRB_OFFSET ( reg );
  273. uint32_t window = ( reg_window_hi[block] | ( offset & 0x000f0000 ) );
  274. uint32_t verify_window;
  275. if ( phantom->crb_window != window ) {
  276. /* Write to the CRB window register */
  277. writel ( window, phantom->bar0 + UNM_2M_CRB_WINDOW );
  278. /* Ensure that the write has reached the card */
  279. verify_window = readl ( phantom->bar0 + UNM_2M_CRB_WINDOW );
  280. assert ( verify_window == window );
  281. /* Record new window */
  282. phantom->crb_window = window;
  283. }
  284. return ( 0x1e0000 + ( offset & 0xffff ) );
  285. }
  286. /**
  287. * Read from Phantom CRB register
  288. *
  289. * @v phantom Phantom NIC
  290. * @v reg Register offset within abstract address space
  291. * @ret value Register value
  292. */
  293. static uint32_t phantom_readl ( struct phantom_nic *phantom,
  294. unsigned long reg ) {
  295. unsigned long offset;
  296. offset = phantom->crb_access ( phantom, reg );
  297. return readl ( phantom->bar0 + offset );
  298. }
  299. /**
  300. * Write to Phantom CRB register
  301. *
  302. * @v phantom Phantom NIC
  303. * @v value Register value
  304. * @v reg Register offset within abstract address space
  305. */
  306. static void phantom_writel ( struct phantom_nic *phantom, uint32_t value,
  307. unsigned long reg ) {
  308. unsigned long offset;
  309. offset = phantom->crb_access ( phantom, reg );
  310. writel ( value, phantom->bar0 + offset );
  311. }
  312. /**
  313. * Write to Phantom CRB HI/LO register pair
  314. *
  315. * @v phantom Phantom NIC
  316. * @v value Register value
  317. * @v lo_offset LO register offset within CRB
  318. * @v hi_offset HI register offset within CRB
  319. */
  320. static inline void phantom_write_hilo ( struct phantom_nic *phantom,
  321. uint64_t value,
  322. unsigned long lo_offset,
  323. unsigned long hi_offset ) {
  324. uint32_t lo = ( value & 0xffffffffUL );
  325. uint32_t hi = ( value >> 32 );
  326. phantom_writel ( phantom, lo, lo_offset );
  327. phantom_writel ( phantom, hi, hi_offset );
  328. }
  329. /***************************************************************************
  330. *
  331. * Firmware message buffer access (for debug)
  332. *
  333. */
  334. /**
  335. * Read from Phantom test memory
  336. *
  337. * @v phantom Phantom NIC
  338. * @v offset Offset within test memory
  339. * @v buf 8-byte buffer to fill
  340. * @ret rc Return status code
  341. */
  342. static int phantom_read_test_mem ( struct phantom_nic *phantom,
  343. uint64_t offset, uint32_t buf[2] ) {
  344. unsigned int retries;
  345. uint32_t test_control;
  346. phantom_write_hilo ( phantom, offset, UNM_TEST_ADDR_LO,
  347. UNM_TEST_ADDR_HI );
  348. phantom_writel ( phantom, UNM_TEST_CONTROL_ENABLE, UNM_TEST_CONTROL );
  349. phantom_writel ( phantom,
  350. ( UNM_TEST_CONTROL_ENABLE | UNM_TEST_CONTROL_START ),
  351. UNM_TEST_CONTROL );
  352. for ( retries = 0 ; retries < PHN_TEST_MEM_TIMEOUT_MS ; retries++ ) {
  353. test_control = phantom_readl ( phantom, UNM_TEST_CONTROL );
  354. if ( ( test_control & UNM_TEST_CONTROL_BUSY ) == 0 ) {
  355. buf[0] = phantom_readl ( phantom, UNM_TEST_RDDATA_LO );
  356. buf[1] = phantom_readl ( phantom, UNM_TEST_RDDATA_HI );
  357. return 0;
  358. }
  359. mdelay ( 1 );
  360. }
  361. DBGC ( phantom, "Phantom %p timed out waiting for test memory\n",
  362. phantom );
  363. return -ETIMEDOUT;
  364. }
  365. /**
  366. * Dump Phantom firmware dmesg log
  367. *
  368. * @v phantom Phantom NIC
  369. * @v log Log number
  370. */
  371. static void phantom_dmesg ( struct phantom_nic *phantom, unsigned int log ) {
  372. uint32_t head;
  373. uint32_t tail;
  374. uint32_t len;
  375. uint32_t sig;
  376. uint32_t offset;
  377. union {
  378. uint8_t bytes[8];
  379. uint32_t dwords[2];
  380. } buf;
  381. unsigned int i;
  382. int rc;
  383. /* Optimise out for non-debug builds */
  384. if ( ! DBG_LOG )
  385. return;
  386. head = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_HEAD ( log ) );
  387. len = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_LEN ( log ) );
  388. tail = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_TAIL ( log ) );
  389. sig = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_SIG ( log ) );
  390. DBGC ( phantom, "Phantom %p firmware dmesg buffer %d (%08lx-%08lx)\n",
  391. phantom, log, head, tail );
  392. assert ( ( head & 0x07 ) == 0 );
  393. if ( sig != UNM_CAM_RAM_DMESG_SIG_MAGIC ) {
  394. DBGC ( phantom, "Warning: bad signature %08lx (want %08lx)\n",
  395. sig, UNM_CAM_RAM_DMESG_SIG_MAGIC );
  396. }
  397. for ( offset = head ; offset < tail ; offset += 8 ) {
  398. if ( ( rc = phantom_read_test_mem ( phantom, offset,
  399. buf.dwords ) ) != 0 ) {
  400. DBGC ( phantom, "Phantom %p could not read from test "
  401. "memory: %s\n", phantom, strerror ( rc ) );
  402. break;
  403. }
  404. for ( i = 0 ; ( ( i < sizeof ( buf ) ) &&
  405. ( offset + i ) < tail ) ; i++ ) {
  406. DBG ( "%c", buf.bytes[i] );
  407. }
  408. }
  409. DBG ( "\n" );
  410. }
  411. /**
  412. * Dump Phantom firmware dmesg logs
  413. *
  414. * @v phantom Phantom NIC
  415. */
  416. static void __attribute__ (( unused ))
  417. phantom_dmesg_all ( struct phantom_nic *phantom ) {
  418. unsigned int i;
  419. for ( i = 0 ; i < UNM_CAM_RAM_NUM_DMESG_BUFFERS ; i++ )
  420. phantom_dmesg ( phantom, i );
  421. }
  422. /***************************************************************************
  423. *
  424. * SPI bus access (for flash memory)
  425. *
  426. */
  427. /**
  428. * Acquire Phantom SPI lock
  429. *
  430. * @v phantom Phantom NIC
  431. * @ret rc Return status code
  432. */
  433. static int phantom_spi_lock ( struct phantom_nic *phantom ) {
  434. unsigned int retries;
  435. uint32_t pcie_sem2_lock;
  436. for ( retries = 0 ; retries < PHN_SPI_LOCK_TIMEOUT_MS ; retries++ ) {
  437. pcie_sem2_lock = phantom_readl ( phantom, UNM_PCIE_SEM2_LOCK );
  438. if ( pcie_sem2_lock != 0 )
  439. return 0;
  440. mdelay ( 1 );
  441. }
  442. DBGC ( phantom, "Phantom %p timed out waiting for SPI lock\n",
  443. phantom );
  444. return -ETIMEDOUT;
  445. }
  446. /**
  447. * Wait for Phantom SPI command to complete
  448. *
  449. * @v phantom Phantom NIC
  450. * @ret rc Return status code
  451. */
  452. static int phantom_spi_wait ( struct phantom_nic *phantom ) {
  453. unsigned int retries;
  454. uint32_t glb_status;
  455. for ( retries = 0 ; retries < PHN_SPI_CMD_TIMEOUT_MS ; retries++ ) {
  456. glb_status = phantom_readl ( phantom, UNM_ROMUSB_GLB_STATUS );
  457. if ( glb_status & UNM_ROMUSB_GLB_STATUS_ROM_DONE )
  458. return 0;
  459. mdelay ( 1 );
  460. }
  461. DBGC ( phantom, "Phantom %p timed out waiting for SPI command\n",
  462. phantom );
  463. return -ETIMEDOUT;
  464. }
  465. /**
  466. * Release Phantom SPI lock
  467. *
  468. * @v phantom Phantom NIC
  469. */
  470. static void phantom_spi_unlock ( struct phantom_nic *phantom ) {
  471. phantom_readl ( phantom, UNM_PCIE_SEM2_UNLOCK );
  472. }
  473. /**
  474. * Read/write data via Phantom SPI bus
  475. *
  476. * @v bus SPI bus
  477. * @v device SPI device
  478. * @v command Command
  479. * @v address Address to read/write (<0 for no address)
  480. * @v data_out TX data buffer (or NULL)
  481. * @v data_in RX data buffer (or NULL)
  482. * @v len Length of data buffer(s)
  483. * @ret rc Return status code
  484. */
  485. static int phantom_spi_rw ( struct spi_bus *bus,
  486. struct spi_device *device,
  487. unsigned int command, int address,
  488. const void *data_out, void *data_in,
  489. size_t len ) {
  490. struct phantom_nic *phantom =
  491. container_of ( bus, struct phantom_nic, spi_bus );
  492. uint32_t data;
  493. int rc;
  494. DBGCP ( phantom, "Phantom %p SPI command %x at %x+%zx\n",
  495. phantom, command, address, len );
  496. if ( data_out )
  497. DBGCP_HDA ( phantom, address, data_out, len );
  498. /* We support only exactly 4-byte reads */
  499. if ( len != UNM_SPI_BLKSIZE ) {
  500. DBGC ( phantom, "Phantom %p invalid SPI length %zx\n",
  501. phantom, len );
  502. return -EINVAL;
  503. }
  504. /* Acquire SPI lock */
  505. if ( ( rc = phantom_spi_lock ( phantom ) ) != 0 )
  506. goto err_lock;
  507. /* Issue SPI command as per the PRM */
  508. if ( data_out ) {
  509. memcpy ( &data, data_out, sizeof ( data ) );
  510. phantom_writel ( phantom, data, UNM_ROMUSB_ROM_WDATA );
  511. }
  512. phantom_writel ( phantom, address, UNM_ROMUSB_ROM_ADDRESS );
  513. phantom_writel ( phantom, ( device->address_len / 8 ),
  514. UNM_ROMUSB_ROM_ABYTE_CNT );
  515. udelay ( 100 ); /* according to PRM */
  516. phantom_writel ( phantom, 0, UNM_ROMUSB_ROM_DUMMY_BYTE_CNT );
  517. phantom_writel ( phantom, command, UNM_ROMUSB_ROM_INSTR_OPCODE );
  518. /* Wait for SPI command to complete */
  519. if ( ( rc = phantom_spi_wait ( phantom ) ) != 0 )
  520. goto err_wait;
  521. /* Reset address byte count and dummy byte count, because the
  522. * PRM asks us to.
  523. */
  524. phantom_writel ( phantom, 0, UNM_ROMUSB_ROM_ABYTE_CNT );
  525. udelay ( 100 ); /* according to PRM */
  526. phantom_writel ( phantom, 0, UNM_ROMUSB_ROM_DUMMY_BYTE_CNT );
  527. /* Read data, if applicable */
  528. if ( data_in ) {
  529. data = phantom_readl ( phantom, UNM_ROMUSB_ROM_RDATA );
  530. memcpy ( data_in, &data, sizeof ( data ) );
  531. DBGCP_HDA ( phantom, address, data_in, len );
  532. }
  533. err_wait:
  534. phantom_spi_unlock ( phantom );
  535. err_lock:
  536. return rc;
  537. }
  538. /***************************************************************************
  539. *
  540. * Firmware interface
  541. *
  542. */
  543. /**
  544. * Wait for firmware to accept command
  545. *
  546. * @v phantom Phantom NIC
  547. * @ret rc Return status code
  548. */
  549. static int phantom_wait_for_cmd ( struct phantom_nic *phantom ) {
  550. unsigned int retries;
  551. uint32_t cdrp;
  552. for ( retries = 0 ; retries < PHN_ISSUE_CMD_TIMEOUT_MS ; retries++ ) {
  553. mdelay ( 1 );
  554. cdrp = phantom_readl ( phantom, UNM_NIC_REG_NX_CDRP );
  555. if ( NX_CDRP_IS_RSP ( cdrp ) ) {
  556. switch ( NX_CDRP_FORM_RSP ( cdrp ) ) {
  557. case NX_CDRP_RSP_OK:
  558. return 0;
  559. case NX_CDRP_RSP_FAIL:
  560. return -EIO;
  561. case NX_CDRP_RSP_TIMEOUT:
  562. return -ETIMEDOUT;
  563. default:
  564. return -EPROTO;
  565. }
  566. }
  567. }
  568. DBGC ( phantom, "Phantom %p timed out waiting for firmware to accept "
  569. "command\n", phantom );
  570. return -ETIMEDOUT;
  571. }
  572. /**
  573. * Issue command to firmware
  574. *
  575. * @v phantom_port Phantom NIC port
  576. * @v command Firmware command
  577. * @v arg1 Argument 1
  578. * @v arg2 Argument 2
  579. * @v arg3 Argument 3
  580. * @ret rc Return status code
  581. */
  582. static int phantom_issue_cmd ( struct phantom_nic_port *phantom_port,
  583. uint32_t command, uint32_t arg1, uint32_t arg2,
  584. uint32_t arg3 ) {
  585. struct phantom_nic *phantom = phantom_port->phantom;
  586. uint32_t signature;
  587. int rc;
  588. /* Issue command */
  589. signature = NX_CDRP_SIGNATURE_MAKE ( phantom_port->port,
  590. NXHAL_VERSION );
  591. DBGC2 ( phantom, "Phantom %p port %d issuing command %08lx (%08lx, "
  592. "%08lx, %08lx)\n", phantom, phantom_port->port,
  593. command, arg1, arg2, arg3 );
  594. phantom_writel ( phantom, signature, UNM_NIC_REG_NX_SIGN );
  595. phantom_writel ( phantom, arg1, UNM_NIC_REG_NX_ARG1 );
  596. phantom_writel ( phantom, arg2, UNM_NIC_REG_NX_ARG2 );
  597. phantom_writel ( phantom, arg3, UNM_NIC_REG_NX_ARG3 );
  598. phantom_writel ( phantom, NX_CDRP_FORM_CMD ( command ),
  599. UNM_NIC_REG_NX_CDRP );
  600. /* Wait for command to be accepted */
  601. if ( ( rc = phantom_wait_for_cmd ( phantom ) ) != 0 ) {
  602. DBGC ( phantom, "Phantom %p could not issue command: %s\n",
  603. phantom, strerror ( rc ) );
  604. return rc;
  605. }
  606. return 0;
  607. }
  608. /**
  609. * Issue buffer-format command to firmware
  610. *
  611. * @v phantom_port Phantom NIC port
  612. * @v command Firmware command
  613. * @v buffer Buffer to pass to firmware
  614. * @v len Length of buffer
  615. * @ret rc Return status code
  616. */
  617. static int phantom_issue_buf_cmd ( struct phantom_nic_port *phantom_port,
  618. uint32_t command, void *buffer,
  619. size_t len ) {
  620. uint64_t physaddr;
  621. physaddr = virt_to_bus ( buffer );
  622. return phantom_issue_cmd ( phantom_port, command, ( physaddr >> 32 ),
  623. ( physaddr & 0xffffffffUL ), len );
  624. }
  625. /**
  626. * Create Phantom RX context
  627. *
  628. * @v phantom_port Phantom NIC port
  629. * @ret rc Return status code
  630. */
  631. static int phantom_create_rx_ctx ( struct phantom_nic_port *phantom_port ) {
  632. struct phantom_nic *phantom = phantom_port->phantom;
  633. struct phantom_create_rx_ctx_rqrsp *buf;
  634. int rc;
  635. /* Prepare request */
  636. buf = &phantom->dma_buf->create_rx_ctx;
  637. memset ( buf, 0, sizeof ( *buf ) );
  638. buf->hostrq.rx_ctx.host_rsp_dma_addr =
  639. cpu_to_le64 ( virt_to_bus ( &buf->cardrsp ) );
  640. buf->hostrq.rx_ctx.capabilities[0] =
  641. cpu_to_le32 ( NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN );
  642. buf->hostrq.rx_ctx.host_int_crb_mode =
  643. cpu_to_le32 ( NX_HOST_INT_CRB_MODE_SHARED );
  644. buf->hostrq.rx_ctx.host_rds_crb_mode =
  645. cpu_to_le32 ( NX_HOST_RDS_CRB_MODE_UNIQUE );
  646. buf->hostrq.rx_ctx.rds_ring_offset = cpu_to_le32 ( 0 );
  647. buf->hostrq.rx_ctx.sds_ring_offset =
  648. cpu_to_le32 ( sizeof ( buf->hostrq.rds ) );
  649. buf->hostrq.rx_ctx.num_rds_rings = cpu_to_le16 ( 1 );
  650. buf->hostrq.rx_ctx.num_sds_rings = cpu_to_le16 ( 1 );
  651. buf->hostrq.rds.host_phys_addr =
  652. cpu_to_le64 ( virt_to_bus ( phantom_port->desc->rds ) );
  653. buf->hostrq.rds.buff_size = cpu_to_le64 ( PHN_RX_BUFSIZE );
  654. buf->hostrq.rds.ring_size = cpu_to_le32 ( PHN_NUM_RDS );
  655. buf->hostrq.rds.ring_kind = cpu_to_le32 ( NX_RDS_RING_TYPE_NORMAL );
  656. buf->hostrq.sds.host_phys_addr =
  657. cpu_to_le64 ( virt_to_bus ( phantom_port->desc->sds ) );
  658. buf->hostrq.sds.ring_size = cpu_to_le32 ( PHN_NUM_SDS );
  659. DBGC ( phantom, "Phantom %p port %d creating RX context\n",
  660. phantom, phantom_port->port );
  661. DBGC2_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  662. &buf->hostrq, sizeof ( buf->hostrq ) );
  663. /* Issue request */
  664. if ( ( rc = phantom_issue_buf_cmd ( phantom_port,
  665. NX_CDRP_CMD_CREATE_RX_CTX,
  666. &buf->hostrq,
  667. sizeof ( buf->hostrq ) ) ) != 0 ) {
  668. DBGC ( phantom, "Phantom %p port %d could not create RX "
  669. "context: %s\n",
  670. phantom, phantom_port->port, strerror ( rc ) );
  671. DBGC ( phantom, "Request:\n" );
  672. DBGC_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  673. &buf->hostrq, sizeof ( buf->hostrq ) );
  674. DBGC ( phantom, "Response:\n" );
  675. DBGC_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  676. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  677. return rc;
  678. }
  679. /* Retrieve context parameters */
  680. phantom_port->rx_context_id =
  681. le16_to_cpu ( buf->cardrsp.rx_ctx.context_id );
  682. phantom_port->rds_producer_crb =
  683. ( UNM_CAM_RAM +
  684. le32_to_cpu ( buf->cardrsp.rds.host_producer_crb ));
  685. phantom_port->sds_consumer_crb =
  686. ( UNM_CAM_RAM +
  687. le32_to_cpu ( buf->cardrsp.sds.host_consumer_crb ));
  688. DBGC ( phantom, "Phantom %p port %d created RX context (id %04x, "
  689. "port phys %02x virt %02x)\n", phantom, phantom_port->port,
  690. phantom_port->rx_context_id, buf->cardrsp.rx_ctx.phys_port,
  691. buf->cardrsp.rx_ctx.virt_port );
  692. DBGC2_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  693. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  694. DBGC ( phantom, "Phantom %p port %d RDS producer CRB is %08lx\n",
  695. phantom, phantom_port->port, phantom_port->rds_producer_crb );
  696. DBGC ( phantom, "Phantom %p port %d SDS consumer CRB is %08lx\n",
  697. phantom, phantom_port->port, phantom_port->sds_consumer_crb );
  698. return 0;
  699. }
  700. /**
  701. * Destroy Phantom RX context
  702. *
  703. * @v phantom_port Phantom NIC port
  704. * @ret rc Return status code
  705. */
  706. static void phantom_destroy_rx_ctx ( struct phantom_nic_port *phantom_port ) {
  707. struct phantom_nic *phantom = phantom_port->phantom;
  708. int rc;
  709. DBGC ( phantom, "Phantom %p port %d destroying RX context (id %04x)\n",
  710. phantom, phantom_port->port, phantom_port->rx_context_id );
  711. /* Issue request */
  712. if ( ( rc = phantom_issue_cmd ( phantom_port,
  713. NX_CDRP_CMD_DESTROY_RX_CTX,
  714. phantom_port->rx_context_id,
  715. NX_DESTROY_CTX_RESET, 0 ) ) != 0 ) {
  716. DBGC ( phantom, "Phantom %p port %d could not destroy RX "
  717. "context: %s\n",
  718. phantom, phantom_port->port, strerror ( rc ) );
  719. /* We're probably screwed */
  720. return;
  721. }
  722. /* Clear context parameters */
  723. phantom_port->rx_context_id = 0;
  724. phantom_port->rds_producer_crb = 0;
  725. phantom_port->sds_consumer_crb = 0;
  726. /* Reset software counters */
  727. phantom_port->rds_producer_idx = 0;
  728. phantom_port->rds_consumer_idx = 0;
  729. phantom_port->sds_consumer_idx = 0;
  730. }
  731. /**
  732. * Create Phantom TX context
  733. *
  734. * @v phantom_port Phantom NIC port
  735. * @ret rc Return status code
  736. */
  737. static int phantom_create_tx_ctx ( struct phantom_nic_port *phantom_port ) {
  738. struct phantom_nic *phantom = phantom_port->phantom;
  739. struct phantom_create_tx_ctx_rqrsp *buf;
  740. int rc;
  741. /* Prepare request */
  742. buf = &phantom->dma_buf->create_tx_ctx;
  743. memset ( buf, 0, sizeof ( *buf ) );
  744. buf->hostrq.tx_ctx.host_rsp_dma_addr =
  745. cpu_to_le64 ( virt_to_bus ( &buf->cardrsp ) );
  746. buf->hostrq.tx_ctx.cmd_cons_dma_addr =
  747. cpu_to_le64 ( virt_to_bus ( &phantom_port->desc->cmd_cons ) );
  748. buf->hostrq.tx_ctx.dummy_dma_addr =
  749. cpu_to_le64 ( virt_to_bus ( phantom->dma_buf->dummy_dma ) );
  750. buf->hostrq.tx_ctx.capabilities[0] =
  751. cpu_to_le32 ( NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN );
  752. buf->hostrq.tx_ctx.host_int_crb_mode =
  753. cpu_to_le32 ( NX_HOST_INT_CRB_MODE_SHARED );
  754. buf->hostrq.tx_ctx.cds_ring.host_phys_addr =
  755. cpu_to_le64 ( virt_to_bus ( phantom_port->desc->cds ) );
  756. buf->hostrq.tx_ctx.cds_ring.ring_size = cpu_to_le32 ( PHN_NUM_CDS );
  757. DBGC ( phantom, "Phantom %p port %d creating TX context\n",
  758. phantom, phantom_port->port );
  759. DBGC2_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  760. &buf->hostrq, sizeof ( buf->hostrq ) );
  761. /* Issue request */
  762. if ( ( rc = phantom_issue_buf_cmd ( phantom_port,
  763. NX_CDRP_CMD_CREATE_TX_CTX,
  764. &buf->hostrq,
  765. sizeof ( buf->hostrq ) ) ) != 0 ) {
  766. DBGC ( phantom, "Phantom %p port %d could not create TX "
  767. "context: %s\n",
  768. phantom, phantom_port->port, strerror ( rc ) );
  769. DBGC ( phantom, "Request:\n" );
  770. DBGC_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  771. &buf->hostrq, sizeof ( buf->hostrq ) );
  772. DBGC ( phantom, "Response:\n" );
  773. DBGC_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  774. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  775. return rc;
  776. }
  777. /* Retrieve context parameters */
  778. phantom_port->tx_context_id =
  779. le16_to_cpu ( buf->cardrsp.tx_ctx.context_id );
  780. phantom_port->cds_producer_crb =
  781. ( UNM_CAM_RAM +
  782. le32_to_cpu(buf->cardrsp.tx_ctx.cds_ring.host_producer_crb));
  783. DBGC ( phantom, "Phantom %p port %d created TX context (id %04x, "
  784. "port phys %02x virt %02x)\n", phantom, phantom_port->port,
  785. phantom_port->tx_context_id, buf->cardrsp.tx_ctx.phys_port,
  786. buf->cardrsp.tx_ctx.virt_port );
  787. DBGC2_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  788. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  789. DBGC ( phantom, "Phantom %p port %d CDS producer CRB is %08lx\n",
  790. phantom, phantom_port->port, phantom_port->cds_producer_crb );
  791. return 0;
  792. }
  793. /**
  794. * Destroy Phantom TX context
  795. *
  796. * @v phantom_port Phantom NIC port
  797. * @ret rc Return status code
  798. */
  799. static void phantom_destroy_tx_ctx ( struct phantom_nic_port *phantom_port ) {
  800. struct phantom_nic *phantom = phantom_port->phantom;
  801. int rc;
  802. DBGC ( phantom, "Phantom %p port %d destroying TX context (id %04x)\n",
  803. phantom, phantom_port->port, phantom_port->tx_context_id );
  804. /* Issue request */
  805. if ( ( rc = phantom_issue_cmd ( phantom_port,
  806. NX_CDRP_CMD_DESTROY_TX_CTX,
  807. phantom_port->tx_context_id,
  808. NX_DESTROY_CTX_RESET, 0 ) ) != 0 ) {
  809. DBGC ( phantom, "Phantom %p port %d could not destroy TX "
  810. "context: %s\n",
  811. phantom, phantom_port->port, strerror ( rc ) );
  812. /* We're probably screwed */
  813. return;
  814. }
  815. /* Clear context parameters */
  816. phantom_port->tx_context_id = 0;
  817. phantom_port->cds_producer_crb = 0;
  818. /* Reset software counters */
  819. phantom_port->cds_producer_idx = 0;
  820. phantom_port->cds_consumer_idx = 0;
  821. }
  822. /***************************************************************************
  823. *
  824. * Descriptor ring management
  825. *
  826. */
  827. /**
  828. * Allocate Phantom RX descriptor
  829. *
  830. * @v phantom_port Phantom NIC port
  831. * @ret index RX descriptor index, or negative error
  832. */
  833. static int phantom_alloc_rds ( struct phantom_nic_port *phantom_port ) {
  834. struct phantom_nic *phantom = phantom_port->phantom;
  835. unsigned int rds_producer_idx;
  836. unsigned int next_rds_producer_idx;
  837. /* Check for space in the ring. RX descriptors are consumed
  838. * out of order, but they are *read* by the hardware in strict
  839. * order. We maintain a pessimistic consumer index, which is
  840. * guaranteed never to be an overestimate of the number of
  841. * descriptors read by the hardware.
  842. */
  843. rds_producer_idx = phantom_port->rds_producer_idx;
  844. next_rds_producer_idx = ( ( rds_producer_idx + 1 ) % PHN_NUM_RDS );
  845. if ( next_rds_producer_idx == phantom_port->rds_consumer_idx ) {
  846. DBGC ( phantom, "Phantom %p port %d RDS ring full (index %d "
  847. "not consumed)\n", phantom, phantom_port->port,
  848. next_rds_producer_idx );
  849. return -ENOBUFS;
  850. }
  851. return rds_producer_idx;
  852. }
  853. /**
  854. * Post Phantom RX descriptor
  855. *
  856. * @v phantom_port Phantom NIC port
  857. * @v rds RX descriptor
  858. */
  859. static void phantom_post_rds ( struct phantom_nic_port *phantom_port,
  860. struct phantom_rds *rds ) {
  861. struct phantom_nic *phantom = phantom_port->phantom;
  862. unsigned int rds_producer_idx;
  863. unsigned int next_rds_producer_idx;
  864. struct phantom_rds *entry;
  865. /* Copy descriptor to ring */
  866. rds_producer_idx = phantom_port->rds_producer_idx;
  867. entry = &phantom_port->desc->rds[rds_producer_idx];
  868. memcpy ( entry, rds, sizeof ( *entry ) );
  869. DBGC2 ( phantom, "Phantom %p port %d posting RDS %ld (slot %d):\n",
  870. phantom, phantom_port->port, NX_GET ( rds, handle ),
  871. rds_producer_idx );
  872. DBGC2_HDA ( phantom, virt_to_bus ( entry ), entry, sizeof ( *entry ) );
  873. /* Update producer index */
  874. next_rds_producer_idx = ( ( rds_producer_idx + 1 ) % PHN_NUM_RDS );
  875. phantom_port->rds_producer_idx = next_rds_producer_idx;
  876. wmb();
  877. phantom_writel ( phantom, phantom_port->rds_producer_idx,
  878. phantom_port->rds_producer_crb );
  879. }
  880. /**
  881. * Allocate Phantom TX descriptor
  882. *
  883. * @v phantom_port Phantom NIC port
  884. * @ret index TX descriptor index, or negative error
  885. */
  886. static int phantom_alloc_cds ( struct phantom_nic_port *phantom_port ) {
  887. struct phantom_nic *phantom = phantom_port->phantom;
  888. unsigned int cds_producer_idx;
  889. unsigned int next_cds_producer_idx;
  890. /* Check for space in the ring. TX descriptors are consumed
  891. * in strict order, so we just check for a collision against
  892. * the consumer index.
  893. */
  894. cds_producer_idx = phantom_port->cds_producer_idx;
  895. next_cds_producer_idx = ( ( cds_producer_idx + 1 ) % PHN_NUM_CDS );
  896. if ( next_cds_producer_idx == phantom_port->cds_consumer_idx ) {
  897. DBGC ( phantom, "Phantom %p port %d CDS ring full (index %d "
  898. "not consumed)\n", phantom, phantom_port->port,
  899. next_cds_producer_idx );
  900. return -ENOBUFS;
  901. }
  902. return cds_producer_idx;
  903. }
  904. /**
  905. * Post Phantom TX descriptor
  906. *
  907. * @v phantom_port Phantom NIC port
  908. * @v cds TX descriptor
  909. */
  910. static void phantom_post_cds ( struct phantom_nic_port *phantom_port,
  911. union phantom_cds *cds ) {
  912. struct phantom_nic *phantom = phantom_port->phantom;
  913. unsigned int cds_producer_idx;
  914. unsigned int next_cds_producer_idx;
  915. union phantom_cds *entry;
  916. /* Copy descriptor to ring */
  917. cds_producer_idx = phantom_port->cds_producer_idx;
  918. entry = &phantom_port->desc->cds[cds_producer_idx];
  919. memcpy ( entry, cds, sizeof ( *entry ) );
  920. DBGC2 ( phantom, "Phantom %p port %d posting CDS %d:\n",
  921. phantom, phantom_port->port, cds_producer_idx );
  922. DBGC2_HDA ( phantom, virt_to_bus ( entry ), entry, sizeof ( *entry ) );
  923. /* Update producer index */
  924. next_cds_producer_idx = ( ( cds_producer_idx + 1 ) % PHN_NUM_CDS );
  925. phantom_port->cds_producer_idx = next_cds_producer_idx;
  926. wmb();
  927. phantom_writel ( phantom, phantom_port->cds_producer_idx,
  928. phantom_port->cds_producer_crb );
  929. }
  930. /***************************************************************************
  931. *
  932. * MAC address management
  933. *
  934. */
  935. /**
  936. * Add/remove MAC address
  937. *
  938. * @v phantom_port Phantom NIC port
  939. * @v ll_addr MAC address to add or remove
  940. * @v opcode MAC request opcode
  941. * @ret rc Return status code
  942. */
  943. static int phantom_update_macaddr ( struct phantom_nic_port *phantom_port,
  944. const uint8_t *ll_addr,
  945. unsigned int opcode ) {
  946. union phantom_cds cds;
  947. int index;
  948. /* Get descriptor ring entry */
  949. index = phantom_alloc_cds ( phantom_port );
  950. if ( index < 0 )
  951. return index;
  952. /* Fill descriptor ring entry */
  953. memset ( &cds, 0, sizeof ( cds ) );
  954. NX_FILL_1 ( &cds, 0,
  955. nic_request.common.opcode, UNM_NIC_REQUEST );
  956. NX_FILL_2 ( &cds, 1,
  957. nic_request.header.opcode, UNM_MAC_EVENT,
  958. nic_request.header.context_id, phantom_port->port );
  959. NX_FILL_7 ( &cds, 2,
  960. nic_request.body.mac_request.opcode, opcode,
  961. nic_request.body.mac_request.mac_addr_0, ll_addr[0],
  962. nic_request.body.mac_request.mac_addr_1, ll_addr[1],
  963. nic_request.body.mac_request.mac_addr_2, ll_addr[2],
  964. nic_request.body.mac_request.mac_addr_3, ll_addr[3],
  965. nic_request.body.mac_request.mac_addr_4, ll_addr[4],
  966. nic_request.body.mac_request.mac_addr_5, ll_addr[5] );
  967. /* Post descriptor */
  968. phantom_post_cds ( phantom_port, &cds );
  969. return 0;
  970. }
  971. /**
  972. * Add MAC address
  973. *
  974. * @v phantom_port Phantom NIC port
  975. * @v ll_addr MAC address to add or remove
  976. * @ret rc Return status code
  977. */
  978. static inline int phantom_add_macaddr ( struct phantom_nic_port *phantom_port,
  979. const uint8_t *ll_addr ) {
  980. struct phantom_nic *phantom = phantom_port->phantom;
  981. DBGC ( phantom, "Phantom %p port %d adding MAC address %s\n",
  982. phantom, phantom_port->port, eth_ntoa ( ll_addr ) );
  983. return phantom_update_macaddr ( phantom_port, ll_addr, UNM_MAC_ADD );
  984. }
  985. /**
  986. * Remove MAC address
  987. *
  988. * @v phantom_port Phantom NIC port
  989. * @v ll_addr MAC address to add or remove
  990. * @ret rc Return status code
  991. */
  992. static inline int phantom_del_macaddr ( struct phantom_nic_port *phantom_port,
  993. const uint8_t *ll_addr ) {
  994. struct phantom_nic *phantom = phantom_port->phantom;
  995. DBGC ( phantom, "Phantom %p port %d removing MAC address %s\n",
  996. phantom, phantom_port->port, eth_ntoa ( ll_addr ) );
  997. return phantom_update_macaddr ( phantom_port, ll_addr, UNM_MAC_DEL );
  998. }
  999. /***************************************************************************
  1000. *
  1001. * Link state detection
  1002. *
  1003. */
  1004. /**
  1005. * Poll link state
  1006. *
  1007. * @v phantom Phantom NIC
  1008. */
  1009. static void phantom_poll_link_state ( struct phantom_nic *phantom ) {
  1010. struct net_device *netdev;
  1011. struct phantom_nic_port *phantom_port;
  1012. uint32_t xg_state_p3;
  1013. unsigned int link;
  1014. int i;
  1015. /* Read link state */
  1016. xg_state_p3 = phantom_readl ( phantom, UNM_NIC_REG_XG_STATE_P3 );
  1017. /* If there is no change, do nothing */
  1018. if ( phantom->link_state == xg_state_p3 )
  1019. return;
  1020. /* Record new link state */
  1021. DBGC ( phantom, "Phantom %p new link state %08lx (was %08lx)\n",
  1022. phantom, xg_state_p3, phantom->link_state );
  1023. phantom->link_state = xg_state_p3;
  1024. /* Indicate per-port link state to gPXE */
  1025. for ( i = 0 ; i < phantom->num_ports ; i++ ) {
  1026. netdev = phantom->netdev[i];
  1027. phantom_port = netdev_priv ( netdev );
  1028. link = UNM_NIC_REG_XG_STATE_P3_LINK ( phantom_port->port,
  1029. phantom->link_state );
  1030. switch ( link ) {
  1031. case UNM_NIC_REG_XG_STATE_P3_LINK_UP:
  1032. DBGC ( phantom, "Phantom %p port %d link is up\n",
  1033. phantom, phantom_port->port );
  1034. netdev_link_up ( netdev );
  1035. break;
  1036. case UNM_NIC_REG_XG_STATE_P3_LINK_DOWN:
  1037. DBGC ( phantom, "Phantom %p port %d link is down\n",
  1038. phantom, phantom_port->port );
  1039. netdev_link_down ( netdev );
  1040. break;
  1041. default:
  1042. DBGC ( phantom, "Phantom %p port %d bad link state "
  1043. "%d\n", phantom, phantom_port->port, link );
  1044. break;
  1045. }
  1046. }
  1047. }
  1048. /***************************************************************************
  1049. *
  1050. * Main driver body
  1051. *
  1052. */
  1053. /**
  1054. * Refill descriptor ring
  1055. *
  1056. * @v netdev Net device
  1057. */
  1058. static void phantom_refill_rx_ring ( struct net_device *netdev ) {
  1059. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1060. struct io_buffer *iobuf;
  1061. struct phantom_rds rds;
  1062. unsigned int handle;
  1063. int index;
  1064. for ( handle = 0 ; handle < PHN_RDS_MAX_FILL ; handle++ ) {
  1065. /* Skip this index if the descriptor has not yet been
  1066. * consumed.
  1067. */
  1068. if ( phantom_port->rds_iobuf[handle] != NULL )
  1069. continue;
  1070. /* Allocate descriptor ring entry */
  1071. index = phantom_alloc_rds ( phantom_port );
  1072. assert ( PHN_RDS_MAX_FILL < PHN_NUM_RDS );
  1073. assert ( index >= 0 ); /* Guaranteed by MAX_FILL < NUM_RDS ) */
  1074. /* Try to allocate an I/O buffer */
  1075. iobuf = alloc_iob ( PHN_RX_BUFSIZE );
  1076. if ( ! iobuf ) {
  1077. /* Failure is non-fatal; we will retry later */
  1078. netdev_rx_err ( netdev, NULL, -ENOMEM );
  1079. break;
  1080. }
  1081. /* Fill descriptor ring entry */
  1082. memset ( &rds, 0, sizeof ( rds ) );
  1083. NX_FILL_2 ( &rds, 0,
  1084. handle, handle,
  1085. length, iob_len ( iobuf ) );
  1086. NX_FILL_1 ( &rds, 1,
  1087. dma_addr, virt_to_bus ( iobuf->data ) );
  1088. /* Record I/O buffer */
  1089. assert ( phantom_port->rds_iobuf[handle] == NULL );
  1090. phantom_port->rds_iobuf[handle] = iobuf;
  1091. /* Post descriptor */
  1092. phantom_post_rds ( phantom_port, &rds );
  1093. }
  1094. }
  1095. /**
  1096. * Open NIC
  1097. *
  1098. * @v netdev Net device
  1099. * @ret rc Return status code
  1100. */
  1101. static int phantom_open ( struct net_device *netdev ) {
  1102. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1103. int rc;
  1104. /* Allocate and zero descriptor rings */
  1105. phantom_port->desc = malloc_dma ( sizeof ( *(phantom_port->desc) ),
  1106. UNM_DMA_BUFFER_ALIGN );
  1107. if ( ! phantom_port->desc ) {
  1108. rc = -ENOMEM;
  1109. goto err_alloc_desc;
  1110. }
  1111. memset ( phantom_port->desc, 0, sizeof ( *(phantom_port->desc) ) );
  1112. /* Create RX context */
  1113. if ( ( rc = phantom_create_rx_ctx ( phantom_port ) ) != 0 )
  1114. goto err_create_rx_ctx;
  1115. /* Create TX context */
  1116. if ( ( rc = phantom_create_tx_ctx ( phantom_port ) ) != 0 )
  1117. goto err_create_tx_ctx;
  1118. /* Fill the RX descriptor ring */
  1119. phantom_refill_rx_ring ( netdev );
  1120. /* Add MAC addresses
  1121. *
  1122. * BUG5583
  1123. *
  1124. * We would like to be able to enable receiving all multicast
  1125. * packets (or, failing that, promiscuous mode), but the
  1126. * firmware doesn't currently support this.
  1127. */
  1128. if ( ( rc = phantom_add_macaddr ( phantom_port,
  1129. netdev->ll_protocol->ll_broadcast ) ) != 0 )
  1130. goto err_add_macaddr_broadcast;
  1131. if ( ( rc = phantom_add_macaddr ( phantom_port,
  1132. netdev->ll_addr ) ) != 0 )
  1133. goto err_add_macaddr_unicast;
  1134. return 0;
  1135. phantom_del_macaddr ( phantom_port, netdev->ll_addr );
  1136. err_add_macaddr_unicast:
  1137. phantom_del_macaddr ( phantom_port,
  1138. netdev->ll_protocol->ll_broadcast );
  1139. err_add_macaddr_broadcast:
  1140. phantom_destroy_tx_ctx ( phantom_port );
  1141. err_create_tx_ctx:
  1142. phantom_destroy_rx_ctx ( phantom_port );
  1143. err_create_rx_ctx:
  1144. free_dma ( phantom_port->desc, sizeof ( *(phantom_port->desc) ) );
  1145. phantom_port->desc = NULL;
  1146. err_alloc_desc:
  1147. return rc;
  1148. }
  1149. /**
  1150. * Close NIC
  1151. *
  1152. * @v netdev Net device
  1153. */
  1154. static void phantom_close ( struct net_device *netdev ) {
  1155. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1156. struct io_buffer *iobuf;
  1157. unsigned int i;
  1158. /* BUG5671
  1159. *
  1160. * When the last TX context is destroyed, the firmware will
  1161. * pause the Egress Packet Generator (EPG). The corresponding
  1162. * code that is supposed to unpause the EPG when the first TX
  1163. * context is created is #if 0'd out (with a comment saying
  1164. * FIXME). The net result of this is that if you close and
  1165. * then reopen the interface, you will no longer be able to
  1166. * transmit packets.
  1167. */
  1168. /* Shut down the port */
  1169. phantom_del_macaddr ( phantom_port, netdev->ll_addr );
  1170. phantom_del_macaddr ( phantom_port,
  1171. netdev->ll_protocol->ll_broadcast );
  1172. phantom_destroy_tx_ctx ( phantom_port );
  1173. phantom_destroy_rx_ctx ( phantom_port );
  1174. free_dma ( phantom_port->desc, sizeof ( *(phantom_port->desc) ) );
  1175. phantom_port->desc = NULL;
  1176. /* Flush any uncompleted descriptors */
  1177. for ( i = 0 ; i < PHN_RDS_MAX_FILL ; i++ ) {
  1178. iobuf = phantom_port->rds_iobuf[i];
  1179. if ( iobuf ) {
  1180. free_iob ( iobuf );
  1181. phantom_port->rds_iobuf[i] = NULL;
  1182. }
  1183. }
  1184. for ( i = 0 ; i < PHN_NUM_CDS ; i++ ) {
  1185. iobuf = phantom_port->cds_iobuf[i];
  1186. if ( iobuf ) {
  1187. netdev_tx_complete_err ( netdev, iobuf, -ECANCELED );
  1188. phantom_port->cds_iobuf[i] = NULL;
  1189. }
  1190. }
  1191. }
  1192. /**
  1193. * Transmit packet
  1194. *
  1195. * @v netdev Network device
  1196. * @v iobuf I/O buffer
  1197. * @ret rc Return status code
  1198. */
  1199. static int phantom_transmit ( struct net_device *netdev,
  1200. struct io_buffer *iobuf ) {
  1201. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1202. union phantom_cds cds;
  1203. int index;
  1204. /* Get descriptor ring entry */
  1205. index = phantom_alloc_cds ( phantom_port );
  1206. if ( index < 0 )
  1207. return index;
  1208. /* Fill descriptor ring entry */
  1209. memset ( &cds, 0, sizeof ( cds ) );
  1210. NX_FILL_3 ( &cds, 0,
  1211. tx.opcode, UNM_TX_ETHER_PKT,
  1212. tx.num_buffers, 1,
  1213. tx.length, iob_len ( iobuf ) );
  1214. NX_FILL_2 ( &cds, 2,
  1215. tx.port, phantom_port->port,
  1216. tx.context_id, phantom_port->port );
  1217. NX_FILL_1 ( &cds, 4,
  1218. tx.buffer1_dma_addr, virt_to_bus ( iobuf->data ) );
  1219. NX_FILL_1 ( &cds, 5,
  1220. tx.buffer1_length, iob_len ( iobuf ) );
  1221. /* Record I/O buffer */
  1222. assert ( phantom_port->cds_iobuf[index] == NULL );
  1223. phantom_port->cds_iobuf[index] = iobuf;
  1224. /* Post descriptor */
  1225. phantom_post_cds ( phantom_port, &cds );
  1226. return 0;
  1227. }
  1228. /**
  1229. * Poll for received packets
  1230. *
  1231. * @v netdev Network device
  1232. */
  1233. static void phantom_poll ( struct net_device *netdev ) {
  1234. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1235. struct phantom_nic *phantom = phantom_port->phantom;
  1236. struct io_buffer *iobuf;
  1237. unsigned int cds_consumer_idx;
  1238. unsigned int raw_new_cds_consumer_idx;
  1239. unsigned int new_cds_consumer_idx;
  1240. unsigned int rds_consumer_idx;
  1241. unsigned int sds_consumer_idx;
  1242. struct phantom_sds *sds;
  1243. unsigned int sds_handle;
  1244. unsigned int sds_opcode;
  1245. /* Check for TX completions */
  1246. cds_consumer_idx = phantom_port->cds_consumer_idx;
  1247. raw_new_cds_consumer_idx = phantom_port->desc->cmd_cons;
  1248. new_cds_consumer_idx = le32_to_cpu ( raw_new_cds_consumer_idx );
  1249. while ( cds_consumer_idx != new_cds_consumer_idx ) {
  1250. DBGC2 ( phantom, "Phantom %p port %d CDS %d complete\n",
  1251. phantom, phantom_port->port, cds_consumer_idx );
  1252. /* Completions may be for commands other than TX, so
  1253. * there may not always be an associated I/O buffer.
  1254. */
  1255. if ( ( iobuf = phantom_port->cds_iobuf[cds_consumer_idx] ) ) {
  1256. netdev_tx_complete ( netdev, iobuf );
  1257. phantom_port->cds_iobuf[cds_consumer_idx] = NULL;
  1258. }
  1259. cds_consumer_idx = ( ( cds_consumer_idx + 1 ) % PHN_NUM_CDS );
  1260. phantom_port->cds_consumer_idx = cds_consumer_idx;
  1261. }
  1262. /* Check for received packets */
  1263. rds_consumer_idx = phantom_port->rds_consumer_idx;
  1264. sds_consumer_idx = phantom_port->sds_consumer_idx;
  1265. while ( 1 ) {
  1266. sds = &phantom_port->desc->sds[sds_consumer_idx];
  1267. if ( NX_GET ( sds, owner ) == 0 )
  1268. break;
  1269. DBGC2 ( phantom, "Phantom %p port %d SDS %d status:\n",
  1270. phantom, phantom_port->port, sds_consumer_idx );
  1271. DBGC2_HDA ( phantom, virt_to_bus ( sds ), sds, sizeof (*sds) );
  1272. /* Check received opcode */
  1273. sds_opcode = NX_GET ( sds, opcode );
  1274. switch ( sds_opcode ) {
  1275. case UNM_RXPKT_DESC:
  1276. case UNM_SYN_OFFLOAD:
  1277. /* Process received packet */
  1278. sds_handle = NX_GET ( sds, handle );
  1279. iobuf = phantom_port->rds_iobuf[sds_handle];
  1280. assert ( iobuf != NULL );
  1281. iob_put ( iobuf, NX_GET ( sds, total_length ) );
  1282. iob_pull ( iobuf, NX_GET ( sds, pkt_offset ) );
  1283. DBGC2 ( phantom, "Phantom %p port %d RDS %d "
  1284. "complete\n",
  1285. phantom, phantom_port->port, sds_handle );
  1286. netdev_rx ( netdev, iobuf );
  1287. phantom_port->rds_iobuf[sds_handle] = NULL;
  1288. break;
  1289. default:
  1290. DBGC ( phantom, "Phantom %p port %d unexpected SDS "
  1291. "opcode %02x\n",
  1292. phantom, phantom_port->port, sds_opcode );
  1293. DBGC_HDA ( phantom, virt_to_bus ( sds ),
  1294. sds, sizeof ( *sds ) );
  1295. break;
  1296. }
  1297. /* Update RDS consumer counter. This is a lower bound
  1298. * for the number of descriptors that have been read
  1299. * by the hardware, since the hardware must have read
  1300. * at least one descriptor for each completion that we
  1301. * receive.
  1302. */
  1303. rds_consumer_idx = ( ( rds_consumer_idx + 1 ) % PHN_NUM_RDS );
  1304. phantom_port->rds_consumer_idx = rds_consumer_idx;
  1305. /* Clear status descriptor */
  1306. memset ( sds, 0, sizeof ( *sds ) );
  1307. /* Update SDS consumer index */
  1308. sds_consumer_idx = ( ( sds_consumer_idx + 1 ) % PHN_NUM_SDS );
  1309. phantom_port->sds_consumer_idx = sds_consumer_idx;
  1310. wmb();
  1311. phantom_writel ( phantom, phantom_port->sds_consumer_idx,
  1312. phantom_port->sds_consumer_crb );
  1313. }
  1314. /* Refill the RX descriptor ring */
  1315. phantom_refill_rx_ring ( netdev );
  1316. /* Occasionally poll the link state */
  1317. if ( phantom_port->link_poll_timer-- == 0 ) {
  1318. phantom_poll_link_state ( phantom );
  1319. /* Reset the link poll timer */
  1320. phantom_port->link_poll_timer = PHN_LINK_POLL_FREQUENCY;
  1321. }
  1322. }
  1323. /**
  1324. * Enable/disable interrupts
  1325. *
  1326. * @v netdev Network device
  1327. * @v enable Interrupts should be enabled
  1328. */
  1329. static void phantom_irq ( struct net_device *netdev, int enable ) {
  1330. struct phantom_nic_port *phantom_port = netdev_priv ( netdev );
  1331. struct phantom_nic *phantom = phantom_port->phantom;
  1332. static const unsigned long sw_int_mask_reg[UNM_FLASH_NUM_PORTS] = {
  1333. UNM_NIC_REG_SW_INT_MASK_0,
  1334. UNM_NIC_REG_SW_INT_MASK_1,
  1335. UNM_NIC_REG_SW_INT_MASK_2,
  1336. UNM_NIC_REG_SW_INT_MASK_3
  1337. };
  1338. phantom_writel ( phantom,
  1339. ( enable ? 1 : 0 ),
  1340. sw_int_mask_reg[phantom_port->port] );
  1341. }
  1342. /** Phantom net device operations */
  1343. static struct net_device_operations phantom_operations = {
  1344. .open = phantom_open,
  1345. .close = phantom_close,
  1346. .transmit = phantom_transmit,
  1347. .poll = phantom_poll,
  1348. .irq = phantom_irq,
  1349. };
  1350. /**
  1351. * Map Phantom CRB window
  1352. *
  1353. * @v phantom Phantom NIC
  1354. * @ret rc Return status code
  1355. */
  1356. static int phantom_map_crb ( struct phantom_nic *phantom,
  1357. struct pci_device *pci ) {
  1358. unsigned long bar0_start;
  1359. unsigned long bar0_size;
  1360. /* CRB window is always in the last 32MB of BAR0 (which may be
  1361. * a 32MB or a 128MB BAR).
  1362. */
  1363. bar0_start = pci_bar_start ( pci, PCI_BASE_ADDRESS_0 );
  1364. bar0_size = pci_bar_size ( pci, PCI_BASE_ADDRESS_0 );
  1365. DBGC ( phantom, "Phantom %p BAR0 is %08lx+%lx\n",
  1366. phantom, bar0_start, bar0_size );
  1367. switch ( bar0_size ) {
  1368. case ( 128 * 1024 * 1024 ) :
  1369. DBGC ( phantom, "Phantom %p has 128MB BAR\n", phantom );
  1370. phantom->crb_access = phantom_crb_access_128m;
  1371. break;
  1372. case ( 32 * 1024 * 1024 ) :
  1373. DBGC ( phantom, "Phantom %p has 32MB BAR\n", phantom );
  1374. phantom->crb_access = phantom_crb_access_32m;
  1375. break;
  1376. case ( 2 * 1024 * 1024 ) :
  1377. DBGC ( phantom, "Phantom %p has 2MB BAR\n", phantom );
  1378. phantom->crb_access = phantom_crb_access_2m;
  1379. break;
  1380. default:
  1381. DBGC ( phantom, "Phantom %p has bad BAR size\n", phantom );
  1382. return -EINVAL;
  1383. }
  1384. phantom->bar0 = ioremap ( bar0_start, bar0_size );
  1385. if ( ! phantom->bar0 ) {
  1386. DBGC ( phantom, "Phantom %p could not map BAR0\n", phantom );
  1387. return -EIO;
  1388. }
  1389. /* Mark current CRB window as invalid, so that the first
  1390. * read/write will set the current window.
  1391. */
  1392. phantom->crb_window = -1UL;
  1393. return 0;
  1394. }
  1395. /**
  1396. * Read Phantom flash contents
  1397. *
  1398. * @v phantom Phantom NIC
  1399. * @ret rc Return status code
  1400. */
  1401. static int phantom_read_flash ( struct phantom_nic *phantom ) {
  1402. struct unm_board_info board_info;
  1403. int rc;
  1404. /* Initialise flash access */
  1405. phantom->spi_bus.rw = phantom_spi_rw;
  1406. phantom->flash.bus = &phantom->spi_bus;
  1407. init_m25p32 ( &phantom->flash );
  1408. /* Phantom doesn't support greater than 4-byte block sizes */
  1409. phantom->flash.nvs.block_size = UNM_SPI_BLKSIZE;
  1410. /* Read and verify board information */
  1411. if ( ( rc = nvs_read ( &phantom->flash.nvs, UNM_BRDCFG_START,
  1412. &board_info, sizeof ( board_info ) ) ) != 0 ) {
  1413. DBGC ( phantom, "Phantom %p could not read board info: %s\n",
  1414. phantom, strerror ( rc ) );
  1415. return rc;
  1416. }
  1417. if ( board_info.magic != UNM_BDINFO_MAGIC ) {
  1418. DBGC ( phantom, "Phantom %p has bad board info magic %lx\n",
  1419. phantom, board_info.magic );
  1420. DBGC_HD ( phantom, &board_info, sizeof ( board_info ) );
  1421. return -EINVAL;
  1422. }
  1423. if ( board_info.header_version != UNM_BDINFO_VERSION ) {
  1424. DBGC ( phantom, "Phantom %p has bad board info version %lx\n",
  1425. phantom, board_info.header_version );
  1426. DBGC_HD ( phantom, &board_info, sizeof ( board_info ) );
  1427. return -EINVAL;
  1428. }
  1429. /* Identify board type and number of ports */
  1430. switch ( board_info.board_type ) {
  1431. case UNM_BRDTYPE_P3_4_GB:
  1432. phantom->num_ports = 4;
  1433. break;
  1434. case UNM_BRDTYPE_P3_HMEZ:
  1435. case UNM_BRDTYPE_P3_IMEZ:
  1436. case UNM_BRDTYPE_P3_10G_CX4:
  1437. case UNM_BRDTYPE_P3_10G_CX4_LP:
  1438. case UNM_BRDTYPE_P3_10G_SFP_PLUS:
  1439. case UNM_BRDTYPE_P3_XG_LOM:
  1440. phantom->num_ports = 2;
  1441. break;
  1442. case UNM_BRDTYPE_P3_10000_BASE_T:
  1443. case UNM_BRDTYPE_P3_10G_XFP:
  1444. phantom->num_ports = 1;
  1445. break;
  1446. default:
  1447. DBGC ( phantom, "Phantom %p unrecognised board type %#lx; "
  1448. "assuming single-port\n",
  1449. phantom, board_info.board_type );
  1450. phantom->num_ports = 1;
  1451. break;
  1452. }
  1453. DBGC ( phantom, "Phantom %p board type is %#lx (%d ports)\n",
  1454. phantom, board_info.board_type, phantom->num_ports );
  1455. return 0;
  1456. }
  1457. /**
  1458. * Initialise the Phantom command PEG
  1459. *
  1460. * @v phantom Phantom NIC
  1461. * @ret rc Return status code
  1462. */
  1463. static int phantom_init_cmdpeg ( struct phantom_nic *phantom ) {
  1464. uint32_t cold_boot;
  1465. uint32_t sw_reset;
  1466. physaddr_t dummy_dma_phys;
  1467. unsigned int retries;
  1468. uint32_t cmdpeg_state;
  1469. uint32_t last_cmdpeg_state = 0;
  1470. /* If this was a cold boot, check that the hardware came up ok */
  1471. cold_boot = phantom_readl ( phantom, UNM_CAM_RAM_COLD_BOOT );
  1472. if ( cold_boot == UNM_CAM_RAM_COLD_BOOT_MAGIC ) {
  1473. DBGC ( phantom, "Phantom %p coming up from cold boot\n",
  1474. phantom );
  1475. sw_reset = phantom_readl ( phantom, UNM_ROMUSB_GLB_SW_RESET );
  1476. if ( sw_reset != UNM_ROMUSB_GLB_SW_RESET_MAGIC ) {
  1477. DBGC ( phantom, "Phantom %p reset failed: %08lx\n",
  1478. phantom, sw_reset );
  1479. return -EIO;
  1480. }
  1481. } else {
  1482. DBGC ( phantom, "Phantom %p coming up from warm boot "
  1483. "(%08lx)\n", phantom, cold_boot );
  1484. }
  1485. /* Clear cold-boot flag */
  1486. phantom_writel ( phantom, 0, UNM_CAM_RAM_COLD_BOOT );
  1487. /* Set port modes */
  1488. phantom_writel ( phantom, UNM_CAM_RAM_PORT_MODE_AUTO_NEG,
  1489. UNM_CAM_RAM_PORT_MODE );
  1490. phantom_writel ( phantom, UNM_CAM_RAM_PORT_MODE_AUTO_NEG_1G,
  1491. UNM_CAM_RAM_WOL_PORT_MODE );
  1492. /* Pass dummy DMA area to card */
  1493. dummy_dma_phys = virt_to_bus ( phantom->dma_buf->dummy_dma );
  1494. DBGC ( phantom, "Phantom %p dummy DMA at %08lx\n",
  1495. phantom, dummy_dma_phys );
  1496. phantom_write_hilo ( phantom, dummy_dma_phys,
  1497. UNM_NIC_REG_DUMMY_BUF_ADDR_LO,
  1498. UNM_NIC_REG_DUMMY_BUF_ADDR_HI );
  1499. phantom_writel ( phantom, UNM_NIC_REG_DUMMY_BUF_INIT,
  1500. UNM_NIC_REG_DUMMY_BUF );
  1501. /* Tell the hardware that tuning is complete */
  1502. phantom_writel ( phantom, 1, UNM_ROMUSB_GLB_PEGTUNE_DONE );
  1503. /* Wait for command PEG to finish initialising */
  1504. DBGC ( phantom, "Phantom %p initialising command PEG (will take up to "
  1505. "%d seconds)...\n", phantom, PHN_CMDPEG_INIT_TIMEOUT_SEC );
  1506. for ( retries = 0; retries < PHN_CMDPEG_INIT_TIMEOUT_SEC; retries++ ) {
  1507. cmdpeg_state = phantom_readl ( phantom,
  1508. UNM_NIC_REG_CMDPEG_STATE );
  1509. if ( cmdpeg_state != last_cmdpeg_state ) {
  1510. DBGC ( phantom, "Phantom %p command PEG state is "
  1511. "%08lx after %d seconds...\n",
  1512. phantom, cmdpeg_state, retries );
  1513. last_cmdpeg_state = cmdpeg_state;
  1514. }
  1515. if ( cmdpeg_state == UNM_NIC_REG_CMDPEG_STATE_INITIALIZED ) {
  1516. /* Acknowledge the PEG initialisation */
  1517. phantom_writel ( phantom,
  1518. UNM_NIC_REG_CMDPEG_STATE_INITIALIZE_ACK,
  1519. UNM_NIC_REG_CMDPEG_STATE );
  1520. return 0;
  1521. }
  1522. mdelay ( 1000 );
  1523. }
  1524. DBGC ( phantom, "Phantom %p timed out waiting for command PEG to "
  1525. "initialise (status %08lx)\n", phantom, cmdpeg_state );
  1526. return -ETIMEDOUT;
  1527. }
  1528. /**
  1529. * Read Phantom MAC address
  1530. *
  1531. * @v phanton_port Phantom NIC port
  1532. * @v ll_addr Buffer to fill with MAC address
  1533. */
  1534. static void phantom_get_macaddr ( struct phantom_nic_port *phantom_port,
  1535. uint8_t *ll_addr ) {
  1536. struct phantom_nic *phantom = phantom_port->phantom;
  1537. union {
  1538. uint8_t mac_addr[2][ETH_ALEN];
  1539. uint32_t dwords[3];
  1540. } u;
  1541. unsigned long offset;
  1542. int i;
  1543. /* Read the three dwords that include this MAC address and one other */
  1544. offset = ( UNM_CAM_RAM_MAC_ADDRS +
  1545. ( 12 * ( phantom_port->port / 2 ) ) );
  1546. for ( i = 0 ; i < 3 ; i++, offset += 4 ) {
  1547. u.dwords[i] = phantom_readl ( phantom, offset );
  1548. }
  1549. /* Copy out the relevant MAC address */
  1550. for ( i = 0 ; i < ETH_ALEN ; i++ ) {
  1551. ll_addr[ ETH_ALEN - i - 1 ] =
  1552. u.mac_addr[ phantom_port->port & 1 ][i];
  1553. }
  1554. DBGC ( phantom, "Phantom %p port %d MAC address is %s\n",
  1555. phantom, phantom_port->port, eth_ntoa ( ll_addr ) );
  1556. }
  1557. /**
  1558. * Initialise Phantom receive PEG
  1559. *
  1560. * @v phantom Phantom NIC
  1561. * @ret rc Return status code
  1562. */
  1563. static int phantom_init_rcvpeg ( struct phantom_nic *phantom ) {
  1564. unsigned int retries;
  1565. uint32_t rcvpeg_state;
  1566. uint32_t last_rcvpeg_state = 0;
  1567. DBGC ( phantom, "Phantom %p initialising receive PEG (will take up to "
  1568. "%d seconds)...\n", phantom, PHN_RCVPEG_INIT_TIMEOUT_SEC );
  1569. for ( retries = 0; retries < PHN_RCVPEG_INIT_TIMEOUT_SEC; retries++ ) {
  1570. rcvpeg_state = phantom_readl ( phantom,
  1571. UNM_NIC_REG_RCVPEG_STATE );
  1572. if ( rcvpeg_state != last_rcvpeg_state ) {
  1573. DBGC ( phantom, "Phantom %p receive PEG state is "
  1574. "%08lx after %d seconds...\n",
  1575. phantom, rcvpeg_state, retries );
  1576. last_rcvpeg_state = rcvpeg_state;
  1577. }
  1578. if ( rcvpeg_state == UNM_NIC_REG_RCVPEG_STATE_INITIALIZED )
  1579. return 0;
  1580. mdelay ( 1000 );
  1581. }
  1582. DBGC ( phantom, "Phantom %p timed out waiting for receive PEG to "
  1583. "initialise (status %08lx)\n", phantom, rcvpeg_state );
  1584. return -ETIMEDOUT;
  1585. }
  1586. /**
  1587. * Probe PCI device
  1588. *
  1589. * @v pci PCI device
  1590. * @v id PCI ID
  1591. * @ret rc Return status code
  1592. */
  1593. static int phantom_probe ( struct pci_device *pci,
  1594. const struct pci_device_id *id __unused ) {
  1595. struct phantom_nic *phantom;
  1596. struct net_device *netdev;
  1597. struct phantom_nic_port *phantom_port;
  1598. int i;
  1599. int rc;
  1600. /* Phantom NICs expose multiple PCI functions, used for
  1601. * virtualisation. Ignore everything except function 0.
  1602. */
  1603. if ( PCI_FUNC ( pci->devfn ) != 0 )
  1604. return -ENODEV;
  1605. /* Allocate Phantom device */
  1606. phantom = zalloc ( sizeof ( *phantom ) );
  1607. if ( ! phantom ) {
  1608. rc = -ENOMEM;
  1609. goto err_alloc_phantom;
  1610. }
  1611. pci_set_drvdata ( pci, phantom );
  1612. /* Fix up PCI device */
  1613. adjust_pci_device ( pci );
  1614. /* Map CRB */
  1615. if ( ( rc = phantom_map_crb ( phantom, pci ) ) != 0 )
  1616. goto err_map_crb;
  1617. /* Read flash information */
  1618. if ( ( rc = phantom_read_flash ( phantom ) ) != 0 )
  1619. goto err_read_flash;
  1620. /* Allocate net devices for each port */
  1621. for ( i = 0 ; i < phantom->num_ports ; i++ ) {
  1622. netdev = alloc_etherdev ( sizeof ( *phantom_port ) );
  1623. if ( ! netdev ) {
  1624. rc = -ENOMEM;
  1625. goto err_alloc_etherdev;
  1626. }
  1627. phantom->netdev[i] = netdev;
  1628. netdev_init ( netdev, &phantom_operations );
  1629. phantom_port = netdev_priv ( netdev );
  1630. netdev->dev = &pci->dev;
  1631. phantom_port->phantom = phantom;
  1632. phantom_port->port = i;
  1633. }
  1634. /* Allocate dummy DMA buffer and perform initial hardware handshake */
  1635. phantom->dma_buf = malloc_dma ( sizeof ( *(phantom->dma_buf) ),
  1636. UNM_DMA_BUFFER_ALIGN );
  1637. if ( ! phantom->dma_buf )
  1638. goto err_dma_buf;
  1639. if ( ( rc = phantom_init_cmdpeg ( phantom ) ) != 0 )
  1640. goto err_init_cmdpeg;
  1641. /* Read MAC addresses */
  1642. for ( i = 0 ; i < phantom->num_ports ; i++ ) {
  1643. phantom_port = netdev_priv ( phantom->netdev[i] );
  1644. phantom_get_macaddr ( phantom_port,
  1645. phantom->netdev[i]->ll_addr );
  1646. }
  1647. /* Initialise the receive firmware */
  1648. if ( ( rc = phantom_init_rcvpeg ( phantom ) ) != 0 )
  1649. goto err_init_rcvpeg;
  1650. /* Register network devices */
  1651. for ( i = 0 ; i < phantom->num_ports ; i++ ) {
  1652. if ( ( rc = register_netdev ( phantom->netdev[i] ) ) != 0 ) {
  1653. DBGC ( phantom, "Phantom %p could not register port "
  1654. "%d: %s\n", phantom, i, strerror ( rc ) );
  1655. goto err_register_netdev;
  1656. }
  1657. }
  1658. return 0;
  1659. i = ( phantom->num_ports - 1 );
  1660. err_register_netdev:
  1661. for ( ; i >= 0 ; i-- )
  1662. unregister_netdev ( phantom->netdev[i] );
  1663. err_init_rcvpeg:
  1664. err_init_cmdpeg:
  1665. free_dma ( phantom->dma_buf, sizeof ( *(phantom->dma_buf) ) );
  1666. phantom->dma_buf = NULL;
  1667. err_dma_buf:
  1668. i = ( phantom->num_ports - 1 );
  1669. err_alloc_etherdev:
  1670. for ( ; i >= 0 ; i-- ) {
  1671. netdev_nullify ( phantom->netdev[i] );
  1672. netdev_put ( phantom->netdev[i] );
  1673. }
  1674. err_read_flash:
  1675. err_map_crb:
  1676. free ( phantom );
  1677. err_alloc_phantom:
  1678. return rc;
  1679. }
  1680. /**
  1681. * Remove PCI device
  1682. *
  1683. * @v pci PCI device
  1684. */
  1685. static void phantom_remove ( struct pci_device *pci ) {
  1686. struct phantom_nic *phantom = pci_get_drvdata ( pci );
  1687. int i;
  1688. for ( i = ( phantom->num_ports - 1 ) ; i >= 0 ; i-- )
  1689. unregister_netdev ( phantom->netdev[i] );
  1690. free_dma ( phantom->dma_buf, sizeof ( *(phantom->dma_buf) ) );
  1691. phantom->dma_buf = NULL;
  1692. for ( i = ( phantom->num_ports - 1 ) ; i >= 0 ; i-- ) {
  1693. netdev_nullify ( phantom->netdev[i] );
  1694. netdev_put ( phantom->netdev[i] );
  1695. }
  1696. free ( phantom );
  1697. }
  1698. /** Phantom PCI IDs */
  1699. static struct pci_device_id phantom_nics[] = {
  1700. PCI_ROM ( 0x4040, 0x0100, "nx", "NX" ),
  1701. };
  1702. /** Phantom PCI driver */
  1703. struct pci_driver phantom_driver __pci_driver = {
  1704. .ids = phantom_nics,
  1705. .id_count = ( sizeof ( phantom_nics ) / sizeof ( phantom_nics[0] ) ),
  1706. .probe = phantom_probe,
  1707. .remove = phantom_remove,
  1708. };