You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

phantom.c 60KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183
  1. /*
  2. * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
  3. * Copyright (C) 2008 NetXen, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of the
  8. * License, or any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA.
  19. *
  20. * You can also choose to distribute this program under the terms of
  21. * the Unmodified Binary Distribution Licence (as given in the file
  22. * COPYING.UBDL), provided that you have satisfied its requirements.
  23. */
  24. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  25. #include <stdint.h>
  26. #include <stdlib.h>
  27. #include <string.h>
  28. #include <unistd.h>
  29. #include <errno.h>
  30. #include <assert.h>
  31. #include <byteswap.h>
  32. #include <ipxe/pci.h>
  33. #include <ipxe/io.h>
  34. #include <ipxe/malloc.h>
  35. #include <ipxe/iobuf.h>
  36. #include <ipxe/netdevice.h>
  37. #include <ipxe/if_ether.h>
  38. #include <ipxe/ethernet.h>
  39. #include <ipxe/spi.h>
  40. #include <ipxe/settings.h>
  41. #include "phantom.h"
  42. /**
  43. * @file
  44. *
  45. * NetXen Phantom NICs
  46. *
  47. */
  48. /** Maximum number of ports */
  49. #define PHN_MAX_NUM_PORTS 8
  50. /** Maximum time to wait for command PEG to initialise
  51. *
  52. * BUGxxxx
  53. *
  54. * The command PEG will currently report initialisation complete only
  55. * when at least one PHY has detected a link (so that the global PHY
  56. * clock can be set to 10G/1G as appropriate). This can take a very,
  57. * very long time.
  58. *
  59. * A future firmware revision should decouple PHY initialisation from
  60. * firmware initialisation, at which point the command PEG will report
  61. * initialisation complete much earlier, and this timeout can be
  62. * reduced.
  63. */
  64. #define PHN_CMDPEG_INIT_TIMEOUT_SEC 50
  65. /** Maximum time to wait for receive PEG to initialise */
  66. #define PHN_RCVPEG_INIT_TIMEOUT_SEC 2
  67. /** Maximum time to wait for firmware to accept a command */
  68. #define PHN_ISSUE_CMD_TIMEOUT_MS 2000
  69. /** Maximum time to wait for test memory */
  70. #define PHN_TEST_MEM_TIMEOUT_MS 100
  71. /** Maximum time to wait for CLP command to be issued */
  72. #define PHN_CLP_CMD_TIMEOUT_MS 500
  73. /** Link state poll frequency
  74. *
  75. * The link state will be checked once in every N calls to poll().
  76. */
  77. #define PHN_LINK_POLL_FREQUENCY 4096
  78. /** Number of RX descriptors */
  79. #define PHN_NUM_RDS 32
  80. /** RX maximum fill level. Must be strictly less than PHN_NUM_RDS. */
  81. #define PHN_RDS_MAX_FILL 16
  82. /** RX buffer size */
  83. #define PHN_RX_BUFSIZE ( 32 /* max LL padding added by card */ + \
  84. ETH_FRAME_LEN )
  85. /** Number of RX status descriptors */
  86. #define PHN_NUM_SDS 32
  87. /** Number of TX descriptors */
  88. #define PHN_NUM_CDS 8
  89. /** A Phantom descriptor ring set */
  90. struct phantom_descriptor_rings {
  91. /** RX descriptors */
  92. struct phantom_rds rds[PHN_NUM_RDS];
  93. /** RX status descriptors */
  94. struct phantom_sds sds[PHN_NUM_SDS];
  95. /** TX descriptors */
  96. union phantom_cds cds[PHN_NUM_CDS];
  97. /** TX consumer index */
  98. volatile uint32_t cmd_cons;
  99. };
  100. /** RX context creation request and response buffers */
  101. struct phantom_create_rx_ctx_rqrsp {
  102. struct {
  103. struct nx_hostrq_rx_ctx_s rx_ctx;
  104. struct nx_hostrq_rds_ring_s rds;
  105. struct nx_hostrq_sds_ring_s sds;
  106. } __unm_dma_aligned hostrq;
  107. struct {
  108. struct nx_cardrsp_rx_ctx_s rx_ctx;
  109. struct nx_cardrsp_rds_ring_s rds;
  110. struct nx_cardrsp_sds_ring_s sds;
  111. } __unm_dma_aligned cardrsp;
  112. };
  113. /** TX context creation request and response buffers */
  114. struct phantom_create_tx_ctx_rqrsp {
  115. struct {
  116. struct nx_hostrq_tx_ctx_s tx_ctx;
  117. } __unm_dma_aligned hostrq;
  118. struct {
  119. struct nx_cardrsp_tx_ctx_s tx_ctx;
  120. } __unm_dma_aligned cardrsp;
  121. };
  122. /** A Phantom NIC */
  123. struct phantom_nic {
  124. /** BAR 0 */
  125. void *bar0;
  126. /** Current CRB window */
  127. unsigned long crb_window;
  128. /** CRB window access method */
  129. unsigned long ( *crb_access ) ( struct phantom_nic *phantom,
  130. unsigned long reg );
  131. /** Port number */
  132. unsigned int port;
  133. /** RX context ID */
  134. uint16_t rx_context_id;
  135. /** RX descriptor producer CRB offset */
  136. unsigned long rds_producer_crb;
  137. /** RX status descriptor consumer CRB offset */
  138. unsigned long sds_consumer_crb;
  139. /** RX interrupt mask CRB offset */
  140. unsigned long sds_irq_mask_crb;
  141. /** RX interrupts enabled */
  142. unsigned int sds_irq_enabled;
  143. /** RX producer index */
  144. unsigned int rds_producer_idx;
  145. /** RX consumer index */
  146. unsigned int rds_consumer_idx;
  147. /** RX status consumer index */
  148. unsigned int sds_consumer_idx;
  149. /** RX I/O buffers */
  150. struct io_buffer *rds_iobuf[PHN_RDS_MAX_FILL];
  151. /** TX context ID */
  152. uint16_t tx_context_id;
  153. /** TX descriptor producer CRB offset */
  154. unsigned long cds_producer_crb;
  155. /** TX producer index */
  156. unsigned int cds_producer_idx;
  157. /** TX consumer index */
  158. unsigned int cds_consumer_idx;
  159. /** TX I/O buffers */
  160. struct io_buffer *cds_iobuf[PHN_NUM_CDS];
  161. /** Descriptor rings */
  162. struct phantom_descriptor_rings *desc;
  163. /** Last known link state */
  164. uint32_t link_state;
  165. /** Link state poll timer */
  166. unsigned long link_poll_timer;
  167. /** Non-volatile settings */
  168. struct settings settings;
  169. };
  170. /** Interrupt mask registers */
  171. static const unsigned long phantom_irq_mask_reg[PHN_MAX_NUM_PORTS] = {
  172. UNM_PCIE_IRQ_MASK_F0,
  173. UNM_PCIE_IRQ_MASK_F1,
  174. UNM_PCIE_IRQ_MASK_F2,
  175. UNM_PCIE_IRQ_MASK_F3,
  176. UNM_PCIE_IRQ_MASK_F4,
  177. UNM_PCIE_IRQ_MASK_F5,
  178. UNM_PCIE_IRQ_MASK_F6,
  179. UNM_PCIE_IRQ_MASK_F7,
  180. };
  181. /** Interrupt status registers */
  182. static const unsigned long phantom_irq_status_reg[PHN_MAX_NUM_PORTS] = {
  183. UNM_PCIE_IRQ_STATUS_F0,
  184. UNM_PCIE_IRQ_STATUS_F1,
  185. UNM_PCIE_IRQ_STATUS_F2,
  186. UNM_PCIE_IRQ_STATUS_F3,
  187. UNM_PCIE_IRQ_STATUS_F4,
  188. UNM_PCIE_IRQ_STATUS_F5,
  189. UNM_PCIE_IRQ_STATUS_F6,
  190. UNM_PCIE_IRQ_STATUS_F7,
  191. };
  192. /***************************************************************************
  193. *
  194. * CRB register access
  195. *
  196. */
  197. /**
  198. * Prepare for access to CRB register via 128MB BAR
  199. *
  200. * @v phantom Phantom NIC
  201. * @v reg Register offset within abstract address space
  202. * @ret offset Register offset within PCI BAR0
  203. */
  204. static unsigned long phantom_crb_access_128m ( struct phantom_nic *phantom,
  205. unsigned long reg ) {
  206. unsigned long offset = ( 0x6000000 + ( reg & 0x1ffffff ) );
  207. uint32_t window = ( reg & 0x2000000 );
  208. uint32_t verify_window;
  209. if ( phantom->crb_window != window ) {
  210. /* Write to the CRB window register */
  211. writel ( window, phantom->bar0 + UNM_128M_CRB_WINDOW );
  212. /* Ensure that the write has reached the card */
  213. verify_window = readl ( phantom->bar0 + UNM_128M_CRB_WINDOW );
  214. assert ( verify_window == window );
  215. /* Record new window */
  216. phantom->crb_window = window;
  217. }
  218. return offset;
  219. }
  220. /**
  221. * Prepare for access to CRB register via 32MB BAR
  222. *
  223. * @v phantom Phantom NIC
  224. * @v reg Register offset within abstract address space
  225. * @ret offset Register offset within PCI BAR0
  226. */
  227. static unsigned long phantom_crb_access_32m ( struct phantom_nic *phantom,
  228. unsigned long reg ) {
  229. unsigned long offset = ( reg & 0x1ffffff );
  230. uint32_t window = ( reg & 0x2000000 );
  231. uint32_t verify_window;
  232. if ( phantom->crb_window != window ) {
  233. /* Write to the CRB window register */
  234. writel ( window, phantom->bar0 + UNM_32M_CRB_WINDOW );
  235. /* Ensure that the write has reached the card */
  236. verify_window = readl ( phantom->bar0 + UNM_32M_CRB_WINDOW );
  237. assert ( verify_window == window );
  238. /* Record new window */
  239. phantom->crb_window = window;
  240. }
  241. return offset;
  242. }
  243. /**
  244. * Prepare for access to CRB register via 2MB BAR
  245. *
  246. * @v phantom Phantom NIC
  247. * @v reg Register offset within abstract address space
  248. * @ret offset Register offset within PCI BAR0
  249. */
  250. static unsigned long phantom_crb_access_2m ( struct phantom_nic *phantom,
  251. unsigned long reg ) {
  252. static const struct {
  253. uint8_t block;
  254. uint16_t window_hi;
  255. } reg_window_hi[] = {
  256. { UNM_CRB_BLK_PCIE, 0x773 },
  257. { UNM_CRB_BLK_CAM, 0x416 },
  258. { UNM_CRB_BLK_ROMUSB, 0x421 },
  259. { UNM_CRB_BLK_TEST, 0x295 },
  260. { UNM_CRB_BLK_PEG_0, 0x340 },
  261. { UNM_CRB_BLK_PEG_1, 0x341 },
  262. { UNM_CRB_BLK_PEG_2, 0x342 },
  263. { UNM_CRB_BLK_PEG_3, 0x343 },
  264. { UNM_CRB_BLK_PEG_4, 0x34b },
  265. };
  266. unsigned int block = UNM_CRB_BLK ( reg );
  267. unsigned long offset = UNM_CRB_OFFSET ( reg );
  268. uint32_t window;
  269. uint32_t verify_window;
  270. unsigned int i;
  271. for ( i = 0 ; i < ( sizeof ( reg_window_hi ) /
  272. sizeof ( reg_window_hi[0] ) ) ; i++ ) {
  273. if ( reg_window_hi[i].block != block )
  274. continue;
  275. window = ( ( reg_window_hi[i].window_hi << 20 ) |
  276. ( offset & 0x000f0000 ) );
  277. if ( phantom->crb_window != window ) {
  278. /* Write to the CRB window register */
  279. writel ( window, phantom->bar0 + UNM_2M_CRB_WINDOW );
  280. /* Ensure that the write has reached the card */
  281. verify_window = readl ( phantom->bar0 +
  282. UNM_2M_CRB_WINDOW );
  283. assert ( verify_window == window );
  284. /* Record new window */
  285. phantom->crb_window = window;
  286. }
  287. return ( 0x1e0000 + ( offset & 0xffff ) );
  288. }
  289. assert ( 0 );
  290. return 0;
  291. }
  292. /**
  293. * Read from Phantom CRB register
  294. *
  295. * @v phantom Phantom NIC
  296. * @v reg Register offset within abstract address space
  297. * @ret value Register value
  298. */
  299. static uint32_t phantom_readl ( struct phantom_nic *phantom,
  300. unsigned long reg ) {
  301. unsigned long offset;
  302. offset = phantom->crb_access ( phantom, reg );
  303. return readl ( phantom->bar0 + offset );
  304. }
  305. /**
  306. * Write to Phantom CRB register
  307. *
  308. * @v phantom Phantom NIC
  309. * @v value Register value
  310. * @v reg Register offset within abstract address space
  311. */
  312. static void phantom_writel ( struct phantom_nic *phantom, uint32_t value,
  313. unsigned long reg ) {
  314. unsigned long offset;
  315. offset = phantom->crb_access ( phantom, reg );
  316. writel ( value, phantom->bar0 + offset );
  317. }
  318. /**
  319. * Write to Phantom CRB HI/LO register pair
  320. *
  321. * @v phantom Phantom NIC
  322. * @v value Register value
  323. * @v lo_offset LO register offset within CRB
  324. * @v hi_offset HI register offset within CRB
  325. */
  326. static inline void phantom_write_hilo ( struct phantom_nic *phantom,
  327. uint64_t value,
  328. unsigned long lo_offset,
  329. unsigned long hi_offset ) {
  330. uint32_t lo = ( value & 0xffffffffUL );
  331. uint32_t hi = ( value >> 32 );
  332. phantom_writel ( phantom, lo, lo_offset );
  333. phantom_writel ( phantom, hi, hi_offset );
  334. }
  335. /***************************************************************************
  336. *
  337. * Firmware message buffer access (for debug)
  338. *
  339. */
  340. /**
  341. * Read from Phantom test memory
  342. *
  343. * @v phantom Phantom NIC
  344. * @v offset Offset within test memory
  345. * @v buf 8-byte buffer to fill
  346. * @ret rc Return status code
  347. */
  348. static int phantom_read_test_mem_block ( struct phantom_nic *phantom,
  349. unsigned long offset,
  350. uint32_t buf[2] ) {
  351. unsigned int retries;
  352. uint32_t test_control;
  353. phantom_write_hilo ( phantom, offset, UNM_TEST_ADDR_LO,
  354. UNM_TEST_ADDR_HI );
  355. phantom_writel ( phantom, UNM_TEST_CONTROL_ENABLE, UNM_TEST_CONTROL );
  356. phantom_writel ( phantom,
  357. ( UNM_TEST_CONTROL_ENABLE | UNM_TEST_CONTROL_START ),
  358. UNM_TEST_CONTROL );
  359. for ( retries = 0 ; retries < PHN_TEST_MEM_TIMEOUT_MS ; retries++ ) {
  360. test_control = phantom_readl ( phantom, UNM_TEST_CONTROL );
  361. if ( ( test_control & UNM_TEST_CONTROL_BUSY ) == 0 ) {
  362. buf[0] = phantom_readl ( phantom, UNM_TEST_RDDATA_LO );
  363. buf[1] = phantom_readl ( phantom, UNM_TEST_RDDATA_HI );
  364. return 0;
  365. }
  366. mdelay ( 1 );
  367. }
  368. DBGC ( phantom, "Phantom %p timed out waiting for test memory\n",
  369. phantom );
  370. return -ETIMEDOUT;
  371. }
  372. /**
  373. * Read single byte from Phantom test memory
  374. *
  375. * @v phantom Phantom NIC
  376. * @v offset Offset within test memory
  377. * @ret byte Byte read, or negative error
  378. */
  379. static int phantom_read_test_mem ( struct phantom_nic *phantom,
  380. unsigned long offset ) {
  381. static union {
  382. uint8_t bytes[8];
  383. uint32_t dwords[2];
  384. } cache;
  385. static unsigned long cache_offset = -1UL;
  386. unsigned long sub_offset;
  387. int rc;
  388. sub_offset = ( offset & ( sizeof ( cache ) - 1 ) );
  389. offset = ( offset & ~( sizeof ( cache ) - 1 ) );
  390. if ( cache_offset != offset ) {
  391. if ( ( rc = phantom_read_test_mem_block ( phantom, offset,
  392. cache.dwords )) !=0 )
  393. return rc;
  394. cache_offset = offset;
  395. }
  396. return cache.bytes[sub_offset];
  397. }
  398. /**
  399. * Dump Phantom firmware dmesg log
  400. *
  401. * @v phantom Phantom NIC
  402. * @v log Log number
  403. * @v max_lines Maximum number of lines to show, or -1 to show all
  404. * @ret rc Return status code
  405. */
  406. static int phantom_dmesg ( struct phantom_nic *phantom, unsigned int log,
  407. unsigned int max_lines ) {
  408. uint32_t head;
  409. uint32_t tail;
  410. uint32_t sig;
  411. uint32_t offset;
  412. int byte;
  413. /* Optimise out for non-debug builds */
  414. if ( ! DBG_LOG )
  415. return 0;
  416. /* Locate log */
  417. head = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_HEAD ( log ) );
  418. tail = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_TAIL ( log ) );
  419. sig = phantom_readl ( phantom, UNM_CAM_RAM_DMESG_SIG ( log ) );
  420. DBGC ( phantom, "Phantom %p firmware dmesg buffer %d (%08x-%08x)\n",
  421. phantom, log, head, tail );
  422. assert ( ( head & 0x07 ) == 0 );
  423. if ( sig != UNM_CAM_RAM_DMESG_SIG_MAGIC ) {
  424. DBGC ( phantom, "Warning: bad signature %08x (want %08lx)\n",
  425. sig, UNM_CAM_RAM_DMESG_SIG_MAGIC );
  426. }
  427. /* Locate start of last (max_lines) lines */
  428. for ( offset = tail ; offset > head ; offset-- ) {
  429. if ( ( byte = phantom_read_test_mem ( phantom,
  430. ( offset - 1 ) ) ) < 0 )
  431. return byte;
  432. if ( ( byte == '\n' ) && ( max_lines-- == 0 ) )
  433. break;
  434. }
  435. /* Print lines */
  436. for ( ; offset < tail ; offset++ ) {
  437. if ( ( byte = phantom_read_test_mem ( phantom, offset ) ) < 0 )
  438. return byte;
  439. DBG ( "%c", byte );
  440. }
  441. DBG ( "\n" );
  442. return 0;
  443. }
  444. /**
  445. * Dump Phantom firmware dmesg logs
  446. *
  447. * @v phantom Phantom NIC
  448. * @v max_lines Maximum number of lines to show, or -1 to show all
  449. */
  450. static void __attribute__ (( unused ))
  451. phantom_dmesg_all ( struct phantom_nic *phantom, unsigned int max_lines ) {
  452. unsigned int i;
  453. for ( i = 0 ; i < UNM_CAM_RAM_NUM_DMESG_BUFFERS ; i++ )
  454. phantom_dmesg ( phantom, i, max_lines );
  455. }
  456. /***************************************************************************
  457. *
  458. * Firmware interface
  459. *
  460. */
  461. /**
  462. * Wait for firmware to accept command
  463. *
  464. * @v phantom Phantom NIC
  465. * @ret rc Return status code
  466. */
  467. static int phantom_wait_for_cmd ( struct phantom_nic *phantom ) {
  468. unsigned int retries;
  469. uint32_t cdrp;
  470. for ( retries = 0 ; retries < PHN_ISSUE_CMD_TIMEOUT_MS ; retries++ ) {
  471. mdelay ( 1 );
  472. cdrp = phantom_readl ( phantom, UNM_NIC_REG_NX_CDRP );
  473. if ( NX_CDRP_IS_RSP ( cdrp ) ) {
  474. switch ( NX_CDRP_FORM_RSP ( cdrp ) ) {
  475. case NX_CDRP_RSP_OK:
  476. return 0;
  477. case NX_CDRP_RSP_FAIL:
  478. return -EIO;
  479. case NX_CDRP_RSP_TIMEOUT:
  480. return -ETIMEDOUT;
  481. default:
  482. return -EPROTO;
  483. }
  484. }
  485. }
  486. DBGC ( phantom, "Phantom %p timed out waiting for firmware to accept "
  487. "command\n", phantom );
  488. return -ETIMEDOUT;
  489. }
  490. /**
  491. * Issue command to firmware
  492. *
  493. * @v phantom Phantom NIC
  494. * @v command Firmware command
  495. * @v arg1 Argument 1
  496. * @v arg2 Argument 2
  497. * @v arg3 Argument 3
  498. * @ret rc Return status code
  499. */
  500. static int phantom_issue_cmd ( struct phantom_nic *phantom,
  501. uint32_t command, uint32_t arg1, uint32_t arg2,
  502. uint32_t arg3 ) {
  503. uint32_t signature;
  504. int rc;
  505. /* Issue command */
  506. signature = NX_CDRP_SIGNATURE_MAKE ( phantom->port,
  507. NXHAL_VERSION );
  508. DBGC2 ( phantom, "Phantom %p issuing command %08x (%08x, %08x, "
  509. "%08x)\n", phantom, command, arg1, arg2, arg3 );
  510. phantom_writel ( phantom, signature, UNM_NIC_REG_NX_SIGN );
  511. phantom_writel ( phantom, arg1, UNM_NIC_REG_NX_ARG1 );
  512. phantom_writel ( phantom, arg2, UNM_NIC_REG_NX_ARG2 );
  513. phantom_writel ( phantom, arg3, UNM_NIC_REG_NX_ARG3 );
  514. phantom_writel ( phantom, NX_CDRP_FORM_CMD ( command ),
  515. UNM_NIC_REG_NX_CDRP );
  516. /* Wait for command to be accepted */
  517. if ( ( rc = phantom_wait_for_cmd ( phantom ) ) != 0 ) {
  518. DBGC ( phantom, "Phantom %p could not issue command: %s\n",
  519. phantom, strerror ( rc ) );
  520. return rc;
  521. }
  522. return 0;
  523. }
  524. /**
  525. * Issue buffer-format command to firmware
  526. *
  527. * @v phantom Phantom NIC
  528. * @v command Firmware command
  529. * @v buffer Buffer to pass to firmware
  530. * @v len Length of buffer
  531. * @ret rc Return status code
  532. */
  533. static int phantom_issue_buf_cmd ( struct phantom_nic *phantom,
  534. uint32_t command, void *buffer,
  535. size_t len ) {
  536. uint64_t physaddr;
  537. physaddr = virt_to_bus ( buffer );
  538. return phantom_issue_cmd ( phantom, command, ( physaddr >> 32 ),
  539. ( physaddr & 0xffffffffUL ), len );
  540. }
  541. /**
  542. * Create Phantom RX context
  543. *
  544. * @v phantom Phantom NIC
  545. * @ret rc Return status code
  546. */
  547. static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) {
  548. struct phantom_create_rx_ctx_rqrsp *buf;
  549. int rc;
  550. /* Allocate context creation buffer */
  551. buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
  552. if ( ! buf ) {
  553. rc = -ENOMEM;
  554. goto out;
  555. }
  556. memset ( buf, 0, sizeof ( *buf ) );
  557. /* Prepare request */
  558. buf->hostrq.rx_ctx.host_rsp_dma_addr =
  559. cpu_to_le64 ( virt_to_bus ( &buf->cardrsp ) );
  560. buf->hostrq.rx_ctx.capabilities[0] =
  561. cpu_to_le32 ( NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN );
  562. buf->hostrq.rx_ctx.host_int_crb_mode =
  563. cpu_to_le32 ( NX_HOST_INT_CRB_MODE_SHARED );
  564. buf->hostrq.rx_ctx.host_rds_crb_mode =
  565. cpu_to_le32 ( NX_HOST_RDS_CRB_MODE_UNIQUE );
  566. buf->hostrq.rx_ctx.rds_ring_offset = cpu_to_le32 ( 0 );
  567. buf->hostrq.rx_ctx.sds_ring_offset =
  568. cpu_to_le32 ( sizeof ( buf->hostrq.rds ) );
  569. buf->hostrq.rx_ctx.num_rds_rings = cpu_to_le16 ( 1 );
  570. buf->hostrq.rx_ctx.num_sds_rings = cpu_to_le16 ( 1 );
  571. buf->hostrq.rds.host_phys_addr =
  572. cpu_to_le64 ( virt_to_bus ( phantom->desc->rds ) );
  573. buf->hostrq.rds.buff_size = cpu_to_le64 ( PHN_RX_BUFSIZE );
  574. buf->hostrq.rds.ring_size = cpu_to_le32 ( PHN_NUM_RDS );
  575. buf->hostrq.rds.ring_kind = cpu_to_le32 ( NX_RDS_RING_TYPE_NORMAL );
  576. buf->hostrq.sds.host_phys_addr =
  577. cpu_to_le64 ( virt_to_bus ( phantom->desc->sds ) );
  578. buf->hostrq.sds.ring_size = cpu_to_le32 ( PHN_NUM_SDS );
  579. DBGC ( phantom, "Phantom %p creating RX context\n", phantom );
  580. DBGC2_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  581. &buf->hostrq, sizeof ( buf->hostrq ) );
  582. /* Issue request */
  583. if ( ( rc = phantom_issue_buf_cmd ( phantom,
  584. NX_CDRP_CMD_CREATE_RX_CTX,
  585. &buf->hostrq,
  586. sizeof ( buf->hostrq ) ) ) != 0 ) {
  587. DBGC ( phantom, "Phantom %p could not create RX context: "
  588. "%s\n", phantom, strerror ( rc ) );
  589. DBGC ( phantom, "Request:\n" );
  590. DBGC_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  591. &buf->hostrq, sizeof ( buf->hostrq ) );
  592. DBGC ( phantom, "Response:\n" );
  593. DBGC_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  594. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  595. goto out;
  596. }
  597. /* Retrieve context parameters */
  598. phantom->rx_context_id =
  599. le16_to_cpu ( buf->cardrsp.rx_ctx.context_id );
  600. phantom->rds_producer_crb =
  601. ( UNM_CAM_RAM +
  602. le32_to_cpu ( buf->cardrsp.rds.host_producer_crb ) );
  603. phantom->sds_consumer_crb =
  604. ( UNM_CAM_RAM +
  605. le32_to_cpu ( buf->cardrsp.sds.host_consumer_crb ) );
  606. phantom->sds_irq_mask_crb =
  607. ( UNM_CAM_RAM +
  608. le32_to_cpu ( buf->cardrsp.sds.interrupt_crb ) );
  609. DBGC ( phantom, "Phantom %p created RX context (id %04x, port phys "
  610. "%02x virt %02x)\n", phantom, phantom->rx_context_id,
  611. buf->cardrsp.rx_ctx.phys_port, buf->cardrsp.rx_ctx.virt_port );
  612. DBGC2_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  613. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  614. DBGC ( phantom, "Phantom %p RDS producer CRB is %08lx\n",
  615. phantom, phantom->rds_producer_crb );
  616. DBGC ( phantom, "Phantom %p SDS consumer CRB is %08lx\n",
  617. phantom, phantom->sds_consumer_crb );
  618. DBGC ( phantom, "Phantom %p SDS interrupt mask CRB is %08lx\n",
  619. phantom, phantom->sds_irq_mask_crb );
  620. out:
  621. free_dma ( buf, sizeof ( *buf ) );
  622. return rc;
  623. }
  624. /**
  625. * Destroy Phantom RX context
  626. *
  627. * @v phantom Phantom NIC
  628. * @ret rc Return status code
  629. */
  630. static void phantom_destroy_rx_ctx ( struct phantom_nic *phantom ) {
  631. int rc;
  632. DBGC ( phantom, "Phantom %p destroying RX context (id %04x)\n",
  633. phantom, phantom->rx_context_id );
  634. /* Issue request */
  635. if ( ( rc = phantom_issue_cmd ( phantom,
  636. NX_CDRP_CMD_DESTROY_RX_CTX,
  637. phantom->rx_context_id,
  638. NX_DESTROY_CTX_RESET, 0 ) ) != 0 ) {
  639. DBGC ( phantom, "Phantom %p could not destroy RX context: "
  640. "%s\n", phantom, strerror ( rc ) );
  641. /* We're probably screwed */
  642. return;
  643. }
  644. /* Clear context parameters */
  645. phantom->rx_context_id = 0;
  646. phantom->rds_producer_crb = 0;
  647. phantom->sds_consumer_crb = 0;
  648. /* Reset software counters */
  649. phantom->rds_producer_idx = 0;
  650. phantom->rds_consumer_idx = 0;
  651. phantom->sds_consumer_idx = 0;
  652. }
  653. /**
  654. * Create Phantom TX context
  655. *
  656. * @v phantom Phantom NIC
  657. * @ret rc Return status code
  658. */
  659. static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) {
  660. struct phantom_create_tx_ctx_rqrsp *buf;
  661. int rc;
  662. /* Allocate context creation buffer */
  663. buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
  664. if ( ! buf ) {
  665. rc = -ENOMEM;
  666. goto out;
  667. }
  668. memset ( buf, 0, sizeof ( *buf ) );
  669. /* Prepare request */
  670. buf->hostrq.tx_ctx.host_rsp_dma_addr =
  671. cpu_to_le64 ( virt_to_bus ( &buf->cardrsp ) );
  672. buf->hostrq.tx_ctx.cmd_cons_dma_addr =
  673. cpu_to_le64 ( virt_to_bus ( &phantom->desc->cmd_cons ) );
  674. buf->hostrq.tx_ctx.capabilities[0] =
  675. cpu_to_le32 ( NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN );
  676. buf->hostrq.tx_ctx.host_int_crb_mode =
  677. cpu_to_le32 ( NX_HOST_INT_CRB_MODE_SHARED );
  678. buf->hostrq.tx_ctx.cds_ring.host_phys_addr =
  679. cpu_to_le64 ( virt_to_bus ( phantom->desc->cds ) );
  680. buf->hostrq.tx_ctx.cds_ring.ring_size = cpu_to_le32 ( PHN_NUM_CDS );
  681. DBGC ( phantom, "Phantom %p creating TX context\n", phantom );
  682. DBGC2_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  683. &buf->hostrq, sizeof ( buf->hostrq ) );
  684. /* Issue request */
  685. if ( ( rc = phantom_issue_buf_cmd ( phantom,
  686. NX_CDRP_CMD_CREATE_TX_CTX,
  687. &buf->hostrq,
  688. sizeof ( buf->hostrq ) ) ) != 0 ) {
  689. DBGC ( phantom, "Phantom %p could not create TX context: "
  690. "%s\n", phantom, strerror ( rc ) );
  691. DBGC ( phantom, "Request:\n" );
  692. DBGC_HDA ( phantom, virt_to_bus ( &buf->hostrq ),
  693. &buf->hostrq, sizeof ( buf->hostrq ) );
  694. DBGC ( phantom, "Response:\n" );
  695. DBGC_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  696. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  697. goto out;
  698. }
  699. /* Retrieve context parameters */
  700. phantom->tx_context_id =
  701. le16_to_cpu ( buf->cardrsp.tx_ctx.context_id );
  702. phantom->cds_producer_crb =
  703. ( UNM_CAM_RAM +
  704. le32_to_cpu(buf->cardrsp.tx_ctx.cds_ring.host_producer_crb));
  705. DBGC ( phantom, "Phantom %p created TX context (id %04x, port phys "
  706. "%02x virt %02x)\n", phantom, phantom->tx_context_id,
  707. buf->cardrsp.tx_ctx.phys_port, buf->cardrsp.tx_ctx.virt_port );
  708. DBGC2_HDA ( phantom, virt_to_bus ( &buf->cardrsp ),
  709. &buf->cardrsp, sizeof ( buf->cardrsp ) );
  710. DBGC ( phantom, "Phantom %p CDS producer CRB is %08lx\n",
  711. phantom, phantom->cds_producer_crb );
  712. out:
  713. free_dma ( buf, sizeof ( *buf ) );
  714. return rc;
  715. }
  716. /**
  717. * Destroy Phantom TX context
  718. *
  719. * @v phantom Phantom NIC
  720. * @ret rc Return status code
  721. */
  722. static void phantom_destroy_tx_ctx ( struct phantom_nic *phantom ) {
  723. int rc;
  724. DBGC ( phantom, "Phantom %p destroying TX context (id %04x)\n",
  725. phantom, phantom->tx_context_id );
  726. /* Issue request */
  727. if ( ( rc = phantom_issue_cmd ( phantom,
  728. NX_CDRP_CMD_DESTROY_TX_CTX,
  729. phantom->tx_context_id,
  730. NX_DESTROY_CTX_RESET, 0 ) ) != 0 ) {
  731. DBGC ( phantom, "Phantom %p could not destroy TX context: "
  732. "%s\n", phantom, strerror ( rc ) );
  733. /* We're probably screwed */
  734. return;
  735. }
  736. /* Clear context parameters */
  737. phantom->tx_context_id = 0;
  738. phantom->cds_producer_crb = 0;
  739. /* Reset software counters */
  740. phantom->cds_producer_idx = 0;
  741. phantom->cds_consumer_idx = 0;
  742. }
  743. /***************************************************************************
  744. *
  745. * Descriptor ring management
  746. *
  747. */
  748. /**
  749. * Allocate Phantom RX descriptor
  750. *
  751. * @v phantom Phantom NIC
  752. * @ret index RX descriptor index, or negative error
  753. */
  754. static int phantom_alloc_rds ( struct phantom_nic *phantom ) {
  755. unsigned int rds_producer_idx;
  756. unsigned int next_rds_producer_idx;
  757. /* Check for space in the ring. RX descriptors are consumed
  758. * out of order, but they are *read* by the hardware in strict
  759. * order. We maintain a pessimistic consumer index, which is
  760. * guaranteed never to be an overestimate of the number of
  761. * descriptors read by the hardware.
  762. */
  763. rds_producer_idx = phantom->rds_producer_idx;
  764. next_rds_producer_idx = ( ( rds_producer_idx + 1 ) % PHN_NUM_RDS );
  765. if ( next_rds_producer_idx == phantom->rds_consumer_idx ) {
  766. DBGC ( phantom, "Phantom %p RDS ring full (index %d not "
  767. "consumed)\n", phantom, next_rds_producer_idx );
  768. return -ENOBUFS;
  769. }
  770. return rds_producer_idx;
  771. }
  772. /**
  773. * Post Phantom RX descriptor
  774. *
  775. * @v phantom Phantom NIC
  776. * @v rds RX descriptor
  777. */
  778. static void phantom_post_rds ( struct phantom_nic *phantom,
  779. struct phantom_rds *rds ) {
  780. unsigned int rds_producer_idx;
  781. unsigned int next_rds_producer_idx;
  782. struct phantom_rds *entry;
  783. /* Copy descriptor to ring */
  784. rds_producer_idx = phantom->rds_producer_idx;
  785. entry = &phantom->desc->rds[rds_producer_idx];
  786. memcpy ( entry, rds, sizeof ( *entry ) );
  787. DBGC2 ( phantom, "Phantom %p posting RDS %ld (slot %d):\n",
  788. phantom, NX_GET ( rds, handle ), rds_producer_idx );
  789. DBGC2_HDA ( phantom, virt_to_bus ( entry ), entry, sizeof ( *entry ) );
  790. /* Update producer index */
  791. next_rds_producer_idx = ( ( rds_producer_idx + 1 ) % PHN_NUM_RDS );
  792. phantom->rds_producer_idx = next_rds_producer_idx;
  793. wmb();
  794. phantom_writel ( phantom, phantom->rds_producer_idx,
  795. phantom->rds_producer_crb );
  796. }
  797. /**
  798. * Allocate Phantom TX descriptor
  799. *
  800. * @v phantom Phantom NIC
  801. * @ret index TX descriptor index, or negative error
  802. */
  803. static int phantom_alloc_cds ( struct phantom_nic *phantom ) {
  804. unsigned int cds_producer_idx;
  805. unsigned int next_cds_producer_idx;
  806. /* Check for space in the ring. TX descriptors are consumed
  807. * in strict order, so we just check for a collision against
  808. * the consumer index.
  809. */
  810. cds_producer_idx = phantom->cds_producer_idx;
  811. next_cds_producer_idx = ( ( cds_producer_idx + 1 ) % PHN_NUM_CDS );
  812. if ( next_cds_producer_idx == phantom->cds_consumer_idx ) {
  813. DBGC ( phantom, "Phantom %p CDS ring full (index %d not "
  814. "consumed)\n", phantom, next_cds_producer_idx );
  815. return -ENOBUFS;
  816. }
  817. return cds_producer_idx;
  818. }
  819. /**
  820. * Post Phantom TX descriptor
  821. *
  822. * @v phantom Phantom NIC
  823. * @v cds TX descriptor
  824. */
  825. static void phantom_post_cds ( struct phantom_nic *phantom,
  826. union phantom_cds *cds ) {
  827. unsigned int cds_producer_idx;
  828. unsigned int next_cds_producer_idx;
  829. union phantom_cds *entry;
  830. /* Copy descriptor to ring */
  831. cds_producer_idx = phantom->cds_producer_idx;
  832. entry = &phantom->desc->cds[cds_producer_idx];
  833. memcpy ( entry, cds, sizeof ( *entry ) );
  834. DBGC2 ( phantom, "Phantom %p posting CDS %d:\n",
  835. phantom, cds_producer_idx );
  836. DBGC2_HDA ( phantom, virt_to_bus ( entry ), entry, sizeof ( *entry ) );
  837. /* Update producer index */
  838. next_cds_producer_idx = ( ( cds_producer_idx + 1 ) % PHN_NUM_CDS );
  839. phantom->cds_producer_idx = next_cds_producer_idx;
  840. wmb();
  841. phantom_writel ( phantom, phantom->cds_producer_idx,
  842. phantom->cds_producer_crb );
  843. }
  844. /***************************************************************************
  845. *
  846. * MAC address management
  847. *
  848. */
  849. /**
  850. * Add/remove MAC address
  851. *
  852. * @v phantom Phantom NIC
  853. * @v ll_addr MAC address to add or remove
  854. * @v opcode MAC request opcode
  855. * @ret rc Return status code
  856. */
  857. static int phantom_update_macaddr ( struct phantom_nic *phantom,
  858. const uint8_t *ll_addr,
  859. unsigned int opcode ) {
  860. union phantom_cds cds;
  861. int index;
  862. /* Get descriptor ring entry */
  863. index = phantom_alloc_cds ( phantom );
  864. if ( index < 0 )
  865. return index;
  866. /* Fill descriptor ring entry */
  867. memset ( &cds, 0, sizeof ( cds ) );
  868. NX_FILL_1 ( &cds, 0,
  869. nic_request.common.opcode, UNM_NIC_REQUEST );
  870. NX_FILL_2 ( &cds, 1,
  871. nic_request.header.opcode, UNM_MAC_EVENT,
  872. nic_request.header.context_id, phantom->port );
  873. NX_FILL_7 ( &cds, 2,
  874. nic_request.body.mac_request.opcode, opcode,
  875. nic_request.body.mac_request.mac_addr_0, ll_addr[0],
  876. nic_request.body.mac_request.mac_addr_1, ll_addr[1],
  877. nic_request.body.mac_request.mac_addr_2, ll_addr[2],
  878. nic_request.body.mac_request.mac_addr_3, ll_addr[3],
  879. nic_request.body.mac_request.mac_addr_4, ll_addr[4],
  880. nic_request.body.mac_request.mac_addr_5, ll_addr[5] );
  881. /* Post descriptor */
  882. phantom_post_cds ( phantom, &cds );
  883. return 0;
  884. }
  885. /**
  886. * Add MAC address
  887. *
  888. * @v phantom Phantom NIC
  889. * @v ll_addr MAC address to add or remove
  890. * @ret rc Return status code
  891. */
  892. static inline int phantom_add_macaddr ( struct phantom_nic *phantom,
  893. const uint8_t *ll_addr ) {
  894. DBGC ( phantom, "Phantom %p adding MAC address %s\n",
  895. phantom, eth_ntoa ( ll_addr ) );
  896. return phantom_update_macaddr ( phantom, ll_addr, UNM_MAC_ADD );
  897. }
  898. /**
  899. * Remove MAC address
  900. *
  901. * @v phantom Phantom NIC
  902. * @v ll_addr MAC address to add or remove
  903. * @ret rc Return status code
  904. */
  905. static inline int phantom_del_macaddr ( struct phantom_nic *phantom,
  906. const uint8_t *ll_addr ) {
  907. DBGC ( phantom, "Phantom %p removing MAC address %s\n",
  908. phantom, eth_ntoa ( ll_addr ) );
  909. return phantom_update_macaddr ( phantom, ll_addr, UNM_MAC_DEL );
  910. }
  911. /***************************************************************************
  912. *
  913. * Link state detection
  914. *
  915. */
  916. /**
  917. * Poll link state
  918. *
  919. * @v netdev Network device
  920. */
  921. static void phantom_poll_link_state ( struct net_device *netdev ) {
  922. struct phantom_nic *phantom = netdev_priv ( netdev );
  923. uint32_t xg_state_p3;
  924. unsigned int link;
  925. /* Read link state */
  926. xg_state_p3 = phantom_readl ( phantom, UNM_NIC_REG_XG_STATE_P3 );
  927. /* If there is no change, do nothing */
  928. if ( phantom->link_state == xg_state_p3 )
  929. return;
  930. /* Record new link state */
  931. DBGC ( phantom, "Phantom %p new link state %08x (was %08x)\n",
  932. phantom, xg_state_p3, phantom->link_state );
  933. phantom->link_state = xg_state_p3;
  934. /* Indicate link state to iPXE */
  935. link = UNM_NIC_REG_XG_STATE_P3_LINK ( phantom->port,
  936. phantom->link_state );
  937. switch ( link ) {
  938. case UNM_NIC_REG_XG_STATE_P3_LINK_UP:
  939. DBGC ( phantom, "Phantom %p link is up\n", phantom );
  940. netdev_link_up ( netdev );
  941. break;
  942. case UNM_NIC_REG_XG_STATE_P3_LINK_DOWN:
  943. DBGC ( phantom, "Phantom %p link is down\n", phantom );
  944. netdev_link_down ( netdev );
  945. break;
  946. default:
  947. DBGC ( phantom, "Phantom %p bad link state %d\n",
  948. phantom, link );
  949. break;
  950. }
  951. }
  952. /***************************************************************************
  953. *
  954. * Main driver body
  955. *
  956. */
  957. /**
  958. * Refill descriptor ring
  959. *
  960. * @v netdev Net device
  961. */
  962. static void phantom_refill_rx_ring ( struct net_device *netdev ) {
  963. struct phantom_nic *phantom = netdev_priv ( netdev );
  964. struct io_buffer *iobuf;
  965. struct phantom_rds rds;
  966. unsigned int handle;
  967. int index;
  968. for ( handle = 0 ; handle < PHN_RDS_MAX_FILL ; handle++ ) {
  969. /* Skip this index if the descriptor has not yet been
  970. * consumed.
  971. */
  972. if ( phantom->rds_iobuf[handle] != NULL )
  973. continue;
  974. /* Allocate descriptor ring entry */
  975. index = phantom_alloc_rds ( phantom );
  976. assert ( PHN_RDS_MAX_FILL < PHN_NUM_RDS );
  977. assert ( index >= 0 ); /* Guaranteed by MAX_FILL < NUM_RDS ) */
  978. /* Try to allocate an I/O buffer */
  979. iobuf = alloc_iob ( PHN_RX_BUFSIZE );
  980. if ( ! iobuf ) {
  981. /* Failure is non-fatal; we will retry later */
  982. netdev_rx_err ( netdev, NULL, -ENOMEM );
  983. break;
  984. }
  985. /* Fill descriptor ring entry */
  986. memset ( &rds, 0, sizeof ( rds ) );
  987. NX_FILL_2 ( &rds, 0,
  988. handle, handle,
  989. length, iob_len ( iobuf ) );
  990. NX_FILL_1 ( &rds, 1,
  991. dma_addr, virt_to_bus ( iobuf->data ) );
  992. /* Record I/O buffer */
  993. assert ( phantom->rds_iobuf[handle] == NULL );
  994. phantom->rds_iobuf[handle] = iobuf;
  995. /* Post descriptor */
  996. phantom_post_rds ( phantom, &rds );
  997. }
  998. }
  999. /**
  1000. * Open NIC
  1001. *
  1002. * @v netdev Net device
  1003. * @ret rc Return status code
  1004. */
  1005. static int phantom_open ( struct net_device *netdev ) {
  1006. struct phantom_nic *phantom = netdev_priv ( netdev );
  1007. int rc;
  1008. /* Allocate and zero descriptor rings */
  1009. phantom->desc = malloc_dma ( sizeof ( *(phantom->desc) ),
  1010. UNM_DMA_BUFFER_ALIGN );
  1011. if ( ! phantom->desc ) {
  1012. rc = -ENOMEM;
  1013. goto err_alloc_desc;
  1014. }
  1015. memset ( phantom->desc, 0, sizeof ( *(phantom->desc) ) );
  1016. /* Create RX context */
  1017. if ( ( rc = phantom_create_rx_ctx ( phantom ) ) != 0 )
  1018. goto err_create_rx_ctx;
  1019. /* Create TX context */
  1020. if ( ( rc = phantom_create_tx_ctx ( phantom ) ) != 0 )
  1021. goto err_create_tx_ctx;
  1022. /* Fill the RX descriptor ring */
  1023. phantom_refill_rx_ring ( netdev );
  1024. /* Add MAC addresses
  1025. *
  1026. * BUG5583
  1027. *
  1028. * We would like to be able to enable receiving all multicast
  1029. * packets (or, failing that, promiscuous mode), but the
  1030. * firmware doesn't currently support this.
  1031. */
  1032. if ( ( rc = phantom_add_macaddr ( phantom,
  1033. netdev->ll_broadcast ) ) != 0 )
  1034. goto err_add_macaddr_broadcast;
  1035. if ( ( rc = phantom_add_macaddr ( phantom,
  1036. netdev->ll_addr ) ) != 0 )
  1037. goto err_add_macaddr_unicast;
  1038. return 0;
  1039. phantom_del_macaddr ( phantom, netdev->ll_addr );
  1040. err_add_macaddr_unicast:
  1041. phantom_del_macaddr ( phantom, netdev->ll_broadcast );
  1042. err_add_macaddr_broadcast:
  1043. phantom_destroy_tx_ctx ( phantom );
  1044. err_create_tx_ctx:
  1045. phantom_destroy_rx_ctx ( phantom );
  1046. err_create_rx_ctx:
  1047. free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
  1048. phantom->desc = NULL;
  1049. err_alloc_desc:
  1050. return rc;
  1051. }
  1052. /**
  1053. * Close NIC
  1054. *
  1055. * @v netdev Net device
  1056. */
  1057. static void phantom_close ( struct net_device *netdev ) {
  1058. struct phantom_nic *phantom = netdev_priv ( netdev );
  1059. struct io_buffer *iobuf;
  1060. unsigned int i;
  1061. /* Shut down the port */
  1062. phantom_del_macaddr ( phantom, netdev->ll_addr );
  1063. phantom_del_macaddr ( phantom, netdev->ll_broadcast );
  1064. phantom_destroy_tx_ctx ( phantom );
  1065. phantom_destroy_rx_ctx ( phantom );
  1066. free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
  1067. phantom->desc = NULL;
  1068. /* Flush any uncompleted descriptors */
  1069. for ( i = 0 ; i < PHN_RDS_MAX_FILL ; i++ ) {
  1070. iobuf = phantom->rds_iobuf[i];
  1071. if ( iobuf ) {
  1072. free_iob ( iobuf );
  1073. phantom->rds_iobuf[i] = NULL;
  1074. }
  1075. }
  1076. for ( i = 0 ; i < PHN_NUM_CDS ; i++ ) {
  1077. iobuf = phantom->cds_iobuf[i];
  1078. if ( iobuf ) {
  1079. netdev_tx_complete_err ( netdev, iobuf, -ECANCELED );
  1080. phantom->cds_iobuf[i] = NULL;
  1081. }
  1082. }
  1083. }
  1084. /**
  1085. * Transmit packet
  1086. *
  1087. * @v netdev Network device
  1088. * @v iobuf I/O buffer
  1089. * @ret rc Return status code
  1090. */
  1091. static int phantom_transmit ( struct net_device *netdev,
  1092. struct io_buffer *iobuf ) {
  1093. struct phantom_nic *phantom = netdev_priv ( netdev );
  1094. union phantom_cds cds;
  1095. int index;
  1096. /* Get descriptor ring entry */
  1097. index = phantom_alloc_cds ( phantom );
  1098. if ( index < 0 )
  1099. return index;
  1100. /* Fill descriptor ring entry */
  1101. memset ( &cds, 0, sizeof ( cds ) );
  1102. NX_FILL_3 ( &cds, 0,
  1103. tx.opcode, UNM_TX_ETHER_PKT,
  1104. tx.num_buffers, 1,
  1105. tx.length, iob_len ( iobuf ) );
  1106. NX_FILL_2 ( &cds, 2,
  1107. tx.port, phantom->port,
  1108. tx.context_id, phantom->port );
  1109. NX_FILL_1 ( &cds, 4,
  1110. tx.buffer1_dma_addr, virt_to_bus ( iobuf->data ) );
  1111. NX_FILL_1 ( &cds, 5,
  1112. tx.buffer1_length, iob_len ( iobuf ) );
  1113. /* Record I/O buffer */
  1114. assert ( phantom->cds_iobuf[index] == NULL );
  1115. phantom->cds_iobuf[index] = iobuf;
  1116. /* Post descriptor */
  1117. phantom_post_cds ( phantom, &cds );
  1118. return 0;
  1119. }
  1120. /**
  1121. * Poll for received packets
  1122. *
  1123. * @v netdev Network device
  1124. */
  1125. static void phantom_poll ( struct net_device *netdev ) {
  1126. struct phantom_nic *phantom = netdev_priv ( netdev );
  1127. struct io_buffer *iobuf;
  1128. unsigned int irq_vector;
  1129. unsigned int irq_state;
  1130. unsigned int cds_consumer_idx;
  1131. unsigned int raw_new_cds_consumer_idx;
  1132. unsigned int new_cds_consumer_idx;
  1133. unsigned int rds_consumer_idx;
  1134. unsigned int sds_consumer_idx;
  1135. struct phantom_sds *sds;
  1136. unsigned int sds_handle;
  1137. unsigned int sds_opcode;
  1138. /* Occasionally poll the link state */
  1139. if ( phantom->link_poll_timer-- == 0 ) {
  1140. phantom_poll_link_state ( netdev );
  1141. /* Reset the link poll timer */
  1142. phantom->link_poll_timer = PHN_LINK_POLL_FREQUENCY;
  1143. }
  1144. /* Check for interrupts */
  1145. if ( phantom->sds_irq_enabled ) {
  1146. /* Do nothing unless an interrupt is asserted */
  1147. irq_vector = phantom_readl ( phantom, UNM_PCIE_IRQ_VECTOR );
  1148. if ( ! ( irq_vector & UNM_PCIE_IRQ_VECTOR_BIT( phantom->port )))
  1149. return;
  1150. /* Do nothing unless interrupt state machine has stabilised */
  1151. irq_state = phantom_readl ( phantom, UNM_PCIE_IRQ_STATE );
  1152. if ( ! UNM_PCIE_IRQ_STATE_TRIGGERED ( irq_state ) )
  1153. return;
  1154. /* Acknowledge interrupt */
  1155. phantom_writel ( phantom, UNM_PCIE_IRQ_STATUS_MAGIC,
  1156. phantom_irq_status_reg[phantom->port] );
  1157. phantom_readl ( phantom, UNM_PCIE_IRQ_VECTOR );
  1158. }
  1159. /* Check for TX completions */
  1160. cds_consumer_idx = phantom->cds_consumer_idx;
  1161. raw_new_cds_consumer_idx = phantom->desc->cmd_cons;
  1162. new_cds_consumer_idx = le32_to_cpu ( raw_new_cds_consumer_idx );
  1163. while ( cds_consumer_idx != new_cds_consumer_idx ) {
  1164. DBGC2 ( phantom, "Phantom %p CDS %d complete\n",
  1165. phantom, cds_consumer_idx );
  1166. /* Completions may be for commands other than TX, so
  1167. * there may not always be an associated I/O buffer.
  1168. */
  1169. if ( ( iobuf = phantom->cds_iobuf[cds_consumer_idx] ) ) {
  1170. netdev_tx_complete ( netdev, iobuf );
  1171. phantom->cds_iobuf[cds_consumer_idx] = NULL;
  1172. }
  1173. cds_consumer_idx = ( ( cds_consumer_idx + 1 ) % PHN_NUM_CDS );
  1174. phantom->cds_consumer_idx = cds_consumer_idx;
  1175. }
  1176. /* Check for received packets */
  1177. rds_consumer_idx = phantom->rds_consumer_idx;
  1178. sds_consumer_idx = phantom->sds_consumer_idx;
  1179. while ( 1 ) {
  1180. sds = &phantom->desc->sds[sds_consumer_idx];
  1181. if ( NX_GET ( sds, owner ) == 0 )
  1182. break;
  1183. DBGC2 ( phantom, "Phantom %p SDS %d status:\n",
  1184. phantom, sds_consumer_idx );
  1185. DBGC2_HDA ( phantom, virt_to_bus ( sds ), sds, sizeof (*sds) );
  1186. /* Check received opcode */
  1187. sds_opcode = NX_GET ( sds, opcode );
  1188. if ( ( sds_opcode == UNM_RXPKT_DESC ) ||
  1189. ( sds_opcode == UNM_SYN_OFFLOAD ) ) {
  1190. /* Sanity check: ensure that all of the SDS
  1191. * descriptor has been written.
  1192. */
  1193. if ( NX_GET ( sds, total_length ) == 0 ) {
  1194. DBGC ( phantom, "Phantom %p SDS %d "
  1195. "incomplete; deferring\n",
  1196. phantom, sds_consumer_idx );
  1197. /* Leave for next poll() */
  1198. break;
  1199. }
  1200. /* Process received packet */
  1201. sds_handle = NX_GET ( sds, handle );
  1202. iobuf = phantom->rds_iobuf[sds_handle];
  1203. assert ( iobuf != NULL );
  1204. iob_put ( iobuf, NX_GET ( sds, total_length ) );
  1205. iob_pull ( iobuf, NX_GET ( sds, pkt_offset ) );
  1206. DBGC2 ( phantom, "Phantom %p RDS %d complete\n",
  1207. phantom, sds_handle );
  1208. netdev_rx ( netdev, iobuf );
  1209. phantom->rds_iobuf[sds_handle] = NULL;
  1210. /* Update RDS consumer counter. This is a
  1211. * lower bound for the number of descriptors
  1212. * that have been read by the hardware, since
  1213. * the hardware must have read at least one
  1214. * descriptor for each completion that we
  1215. * receive.
  1216. */
  1217. rds_consumer_idx =
  1218. ( ( rds_consumer_idx + 1 ) % PHN_NUM_RDS );
  1219. phantom->rds_consumer_idx = rds_consumer_idx;
  1220. } else {
  1221. DBGC ( phantom, "Phantom %p unexpected SDS opcode "
  1222. "%02x\n", phantom, sds_opcode );
  1223. DBGC_HDA ( phantom, virt_to_bus ( sds ),
  1224. sds, sizeof ( *sds ) );
  1225. }
  1226. /* Clear status descriptor */
  1227. memset ( sds, 0, sizeof ( *sds ) );
  1228. /* Update SDS consumer index */
  1229. sds_consumer_idx = ( ( sds_consumer_idx + 1 ) % PHN_NUM_SDS );
  1230. phantom->sds_consumer_idx = sds_consumer_idx;
  1231. wmb();
  1232. phantom_writel ( phantom, phantom->sds_consumer_idx,
  1233. phantom->sds_consumer_crb );
  1234. }
  1235. /* Refill the RX descriptor ring */
  1236. phantom_refill_rx_ring ( netdev );
  1237. }
  1238. /**
  1239. * Enable/disable interrupts
  1240. *
  1241. * @v netdev Network device
  1242. * @v enable Interrupts should be enabled
  1243. */
  1244. static void phantom_irq ( struct net_device *netdev, int enable ) {
  1245. struct phantom_nic *phantom = netdev_priv ( netdev );
  1246. phantom_writel ( phantom, ( enable ? 1 : 0 ),
  1247. phantom->sds_irq_mask_crb );
  1248. phantom_writel ( phantom, UNM_PCIE_IRQ_MASK_MAGIC,
  1249. phantom_irq_mask_reg[phantom->port] );
  1250. phantom->sds_irq_enabled = enable;
  1251. }
  1252. /** Phantom net device operations */
  1253. static struct net_device_operations phantom_operations = {
  1254. .open = phantom_open,
  1255. .close = phantom_close,
  1256. .transmit = phantom_transmit,
  1257. .poll = phantom_poll,
  1258. .irq = phantom_irq,
  1259. };
  1260. /***************************************************************************
  1261. *
  1262. * CLP settings
  1263. *
  1264. */
  1265. /** Phantom CLP settings scope */
  1266. static const struct settings_scope phantom_settings_scope;
  1267. /** Phantom CLP data
  1268. *
  1269. */
  1270. union phantom_clp_data {
  1271. /** Data bytes
  1272. *
  1273. * This field is right-aligned; if only N bytes are present
  1274. * then bytes[0]..bytes[7-N] should be zero, and the data
  1275. * should be in bytes[7-N+1] to bytes[7];
  1276. */
  1277. uint8_t bytes[8];
  1278. /** Dwords for the CLP interface */
  1279. struct {
  1280. /** High dword, in network byte order */
  1281. uint32_t hi;
  1282. /** Low dword, in network byte order */
  1283. uint32_t lo;
  1284. } dwords;
  1285. };
  1286. #define PHN_CLP_BLKSIZE ( sizeof ( union phantom_clp_data ) )
  1287. /**
  1288. * Wait for Phantom CLP command to complete
  1289. *
  1290. * @v phantom Phantom NIC
  1291. * @ret rc Return status code
  1292. */
  1293. static int phantom_clp_wait ( struct phantom_nic *phantom ) {
  1294. unsigned int retries;
  1295. uint32_t status;
  1296. for ( retries = 0 ; retries < PHN_CLP_CMD_TIMEOUT_MS ; retries++ ) {
  1297. status = phantom_readl ( phantom, UNM_CAM_RAM_CLP_STATUS );
  1298. if ( status & UNM_CAM_RAM_CLP_STATUS_DONE )
  1299. return 0;
  1300. mdelay ( 1 );
  1301. }
  1302. DBGC ( phantom, "Phantom %p timed out waiting for CLP command\n",
  1303. phantom );
  1304. return -ETIMEDOUT;
  1305. }
  1306. /**
  1307. * Issue Phantom CLP command
  1308. *
  1309. * @v phantom Phantom NIC
  1310. * @v port Virtual port number
  1311. * @v opcode Opcode
  1312. * @v data_in Data in, or NULL
  1313. * @v data_out Data out, or NULL
  1314. * @v offset Offset within data
  1315. * @v len Data buffer length
  1316. * @ret len Total transfer length (for reads), or negative error
  1317. */
  1318. static int phantom_clp_cmd ( struct phantom_nic *phantom, unsigned int port,
  1319. unsigned int opcode, const void *data_in,
  1320. void *data_out, size_t offset, size_t len ) {
  1321. union phantom_clp_data data;
  1322. unsigned int index = ( offset / sizeof ( data ) );
  1323. unsigned int last = 0;
  1324. size_t in_frag_len;
  1325. uint8_t *in_frag;
  1326. uint32_t command;
  1327. uint32_t status;
  1328. size_t read_len;
  1329. unsigned int error;
  1330. size_t out_frag_len;
  1331. uint8_t *out_frag;
  1332. int rc;
  1333. /* Sanity checks */
  1334. assert ( ( offset % sizeof ( data ) ) == 0 );
  1335. if ( len > 255 ) {
  1336. DBGC ( phantom, "Phantom %p invalid CLP length %zd\n",
  1337. phantom, len );
  1338. return -EINVAL;
  1339. }
  1340. /* Check that CLP interface is ready */
  1341. if ( ( rc = phantom_clp_wait ( phantom ) ) != 0 )
  1342. return rc;
  1343. /* Copy data in */
  1344. memset ( &data, 0, sizeof ( data ) );
  1345. if ( data_in ) {
  1346. assert ( offset < len );
  1347. in_frag_len = ( len - offset );
  1348. if ( in_frag_len > sizeof ( data ) ) {
  1349. in_frag_len = sizeof ( data );
  1350. } else {
  1351. last = 1;
  1352. }
  1353. in_frag = &data.bytes[ sizeof ( data ) - in_frag_len ];
  1354. memcpy ( in_frag, ( data_in + offset ), in_frag_len );
  1355. phantom_writel ( phantom, be32_to_cpu ( data.dwords.lo ),
  1356. UNM_CAM_RAM_CLP_DATA_LO );
  1357. phantom_writel ( phantom, be32_to_cpu ( data.dwords.hi ),
  1358. UNM_CAM_RAM_CLP_DATA_HI );
  1359. }
  1360. /* Issue CLP command */
  1361. command = ( ( index << 24 ) | ( ( data_in ? len : 0 ) << 16 ) |
  1362. ( port << 8 ) | ( last << 7 ) | ( opcode << 0 ) );
  1363. phantom_writel ( phantom, command, UNM_CAM_RAM_CLP_COMMAND );
  1364. mb();
  1365. phantom_writel ( phantom, UNM_CAM_RAM_CLP_STATUS_START,
  1366. UNM_CAM_RAM_CLP_STATUS );
  1367. /* Wait for command to complete */
  1368. if ( ( rc = phantom_clp_wait ( phantom ) ) != 0 )
  1369. return rc;
  1370. /* Get command status */
  1371. status = phantom_readl ( phantom, UNM_CAM_RAM_CLP_STATUS );
  1372. read_len = ( ( status >> 16 ) & 0xff );
  1373. error = ( ( status >> 8 ) & 0xff );
  1374. if ( error ) {
  1375. DBGC ( phantom, "Phantom %p CLP command error %02x\n",
  1376. phantom, error );
  1377. return -EIO;
  1378. }
  1379. /* Copy data out */
  1380. if ( data_out ) {
  1381. data.dwords.lo = cpu_to_be32 ( phantom_readl ( phantom,
  1382. UNM_CAM_RAM_CLP_DATA_LO ) );
  1383. data.dwords.hi = cpu_to_be32 ( phantom_readl ( phantom,
  1384. UNM_CAM_RAM_CLP_DATA_HI ) );
  1385. out_frag_len = ( read_len - offset );
  1386. if ( out_frag_len > sizeof ( data ) )
  1387. out_frag_len = sizeof ( data );
  1388. out_frag = &data.bytes[ sizeof ( data ) - out_frag_len ];
  1389. if ( out_frag_len > ( len - offset ) )
  1390. out_frag_len = ( len - offset );
  1391. memcpy ( ( data_out + offset ), out_frag, out_frag_len );
  1392. }
  1393. return read_len;
  1394. }
  1395. /**
  1396. * Store Phantom CLP setting
  1397. *
  1398. * @v phantom Phantom NIC
  1399. * @v port Virtual port number
  1400. * @v setting Setting number
  1401. * @v data Data buffer
  1402. * @v len Length of data buffer
  1403. * @ret rc Return status code
  1404. */
  1405. static int phantom_clp_store ( struct phantom_nic *phantom, unsigned int port,
  1406. unsigned int setting, const void *data,
  1407. size_t len ) {
  1408. unsigned int opcode = setting;
  1409. size_t offset;
  1410. int rc;
  1411. for ( offset = 0 ; offset < len ; offset += PHN_CLP_BLKSIZE ) {
  1412. if ( ( rc = phantom_clp_cmd ( phantom, port, opcode, data,
  1413. NULL, offset, len ) ) < 0 )
  1414. return rc;
  1415. }
  1416. return 0;
  1417. }
  1418. /**
  1419. * Fetch Phantom CLP setting
  1420. *
  1421. * @v phantom Phantom NIC
  1422. * @v port Virtual port number
  1423. * @v setting Setting number
  1424. * @v data Data buffer
  1425. * @v len Length of data buffer
  1426. * @ret len Length of setting, or negative error
  1427. */
  1428. static int phantom_clp_fetch ( struct phantom_nic *phantom, unsigned int port,
  1429. unsigned int setting, void *data, size_t len ) {
  1430. unsigned int opcode = ( setting + 1 );
  1431. size_t offset = 0;
  1432. int read_len;
  1433. while ( 1 ) {
  1434. read_len = phantom_clp_cmd ( phantom, port, opcode, NULL,
  1435. data, offset, len );
  1436. if ( read_len < 0 )
  1437. return read_len;
  1438. offset += PHN_CLP_BLKSIZE;
  1439. if ( offset >= ( unsigned ) read_len )
  1440. break;
  1441. if ( offset >= len )
  1442. break;
  1443. }
  1444. return read_len;
  1445. }
  1446. /** A Phantom CLP setting */
  1447. struct phantom_clp_setting {
  1448. /** iPXE setting */
  1449. const struct setting *setting;
  1450. /** Setting number */
  1451. unsigned int clp_setting;
  1452. };
  1453. /** Phantom CLP settings */
  1454. static struct phantom_clp_setting clp_settings[] = {
  1455. { &mac_setting, 0x01 },
  1456. };
  1457. /**
  1458. * Find Phantom CLP setting
  1459. *
  1460. * @v setting iPXE setting
  1461. * @v clp_setting Setting number, or 0 if not found
  1462. */
  1463. static unsigned int
  1464. phantom_clp_setting ( struct phantom_nic *phantom,
  1465. const struct setting *setting ) {
  1466. struct phantom_clp_setting *clp_setting;
  1467. unsigned int i;
  1468. /* Search the list of explicitly-defined settings */
  1469. for ( i = 0 ; i < ( sizeof ( clp_settings ) /
  1470. sizeof ( clp_settings[0] ) ) ; i++ ) {
  1471. clp_setting = &clp_settings[i];
  1472. if ( setting_cmp ( setting, clp_setting->setting ) == 0 )
  1473. return clp_setting->clp_setting;
  1474. }
  1475. /* Allow for use of numbered settings */
  1476. if ( setting->scope == &phantom_settings_scope )
  1477. return setting->tag;
  1478. DBGC2 ( phantom, "Phantom %p has no \"%s\" setting\n",
  1479. phantom, setting->name );
  1480. return 0;
  1481. }
  1482. /**
  1483. * Check applicability of Phantom CLP setting
  1484. *
  1485. * @v settings Settings block
  1486. * @v setting Setting
  1487. * @ret applies Setting applies within this settings block
  1488. */
  1489. static int phantom_setting_applies ( struct settings *settings,
  1490. const struct setting *setting ) {
  1491. struct phantom_nic *phantom =
  1492. container_of ( settings, struct phantom_nic, settings );
  1493. unsigned int clp_setting;
  1494. /* Find Phantom setting equivalent to iPXE setting */
  1495. clp_setting = phantom_clp_setting ( phantom, setting );
  1496. return ( clp_setting != 0 );
  1497. }
  1498. /**
  1499. * Store Phantom CLP setting
  1500. *
  1501. * @v settings Settings block
  1502. * @v setting Setting to store
  1503. * @v data Setting data, or NULL to clear setting
  1504. * @v len Length of setting data
  1505. * @ret rc Return status code
  1506. */
  1507. static int phantom_store_setting ( struct settings *settings,
  1508. const struct setting *setting,
  1509. const void *data, size_t len ) {
  1510. struct phantom_nic *phantom =
  1511. container_of ( settings, struct phantom_nic, settings );
  1512. unsigned int clp_setting;
  1513. int rc;
  1514. /* Find Phantom setting equivalent to iPXE setting */
  1515. clp_setting = phantom_clp_setting ( phantom, setting );
  1516. assert ( clp_setting != 0 );
  1517. /* Store setting */
  1518. if ( ( rc = phantom_clp_store ( phantom, phantom->port,
  1519. clp_setting, data, len ) ) != 0 ) {
  1520. DBGC ( phantom, "Phantom %p could not store setting \"%s\": "
  1521. "%s\n", phantom, setting->name, strerror ( rc ) );
  1522. return rc;
  1523. }
  1524. return 0;
  1525. }
  1526. /**
  1527. * Fetch Phantom CLP setting
  1528. *
  1529. * @v settings Settings block
  1530. * @v setting Setting to fetch
  1531. * @v data Buffer to fill with setting data
  1532. * @v len Length of buffer
  1533. * @ret len Length of setting data, or negative error
  1534. */
  1535. static int phantom_fetch_setting ( struct settings *settings,
  1536. struct setting *setting,
  1537. void *data, size_t len ) {
  1538. struct phantom_nic *phantom =
  1539. container_of ( settings, struct phantom_nic, settings );
  1540. unsigned int clp_setting;
  1541. int read_len;
  1542. int rc;
  1543. /* Find Phantom setting equivalent to iPXE setting */
  1544. clp_setting = phantom_clp_setting ( phantom, setting );
  1545. assert ( clp_setting != 0 );
  1546. /* Fetch setting */
  1547. if ( ( read_len = phantom_clp_fetch ( phantom, phantom->port,
  1548. clp_setting, data, len ) ) < 0 ){
  1549. rc = read_len;
  1550. DBGC ( phantom, "Phantom %p could not fetch setting \"%s\": "
  1551. "%s\n", phantom, setting->name, strerror ( rc ) );
  1552. return rc;
  1553. }
  1554. return read_len;
  1555. }
  1556. /** Phantom CLP settings operations */
  1557. static struct settings_operations phantom_settings_operations = {
  1558. .applies = phantom_setting_applies,
  1559. .store = phantom_store_setting,
  1560. .fetch = phantom_fetch_setting,
  1561. };
  1562. /***************************************************************************
  1563. *
  1564. * Initialisation
  1565. *
  1566. */
  1567. /**
  1568. * Map Phantom CRB window
  1569. *
  1570. * @v phantom Phantom NIC
  1571. * @ret rc Return status code
  1572. */
  1573. static int phantom_map_crb ( struct phantom_nic *phantom,
  1574. struct pci_device *pci ) {
  1575. unsigned long bar0_start;
  1576. unsigned long bar0_size;
  1577. bar0_start = pci_bar_start ( pci, PCI_BASE_ADDRESS_0 );
  1578. bar0_size = pci_bar_size ( pci, PCI_BASE_ADDRESS_0 );
  1579. DBGC ( phantom, "Phantom %p is " PCI_FMT " with BAR0 at %08lx+%lx\n",
  1580. phantom, PCI_ARGS ( pci ), bar0_start, bar0_size );
  1581. if ( ! bar0_start ) {
  1582. DBGC ( phantom, "Phantom %p BAR not assigned; ignoring\n",
  1583. phantom );
  1584. return -EINVAL;
  1585. }
  1586. switch ( bar0_size ) {
  1587. case ( 128 * 1024 * 1024 ) :
  1588. DBGC ( phantom, "Phantom %p has 128MB BAR\n", phantom );
  1589. phantom->crb_access = phantom_crb_access_128m;
  1590. break;
  1591. case ( 32 * 1024 * 1024 ) :
  1592. DBGC ( phantom, "Phantom %p has 32MB BAR\n", phantom );
  1593. phantom->crb_access = phantom_crb_access_32m;
  1594. break;
  1595. case ( 2 * 1024 * 1024 ) :
  1596. DBGC ( phantom, "Phantom %p has 2MB BAR\n", phantom );
  1597. phantom->crb_access = phantom_crb_access_2m;
  1598. break;
  1599. default:
  1600. DBGC ( phantom, "Phantom %p has bad BAR size\n", phantom );
  1601. return -EINVAL;
  1602. }
  1603. phantom->bar0 = ioremap ( bar0_start, bar0_size );
  1604. if ( ! phantom->bar0 ) {
  1605. DBGC ( phantom, "Phantom %p could not map BAR0\n", phantom );
  1606. return -EIO;
  1607. }
  1608. /* Mark current CRB window as invalid, so that the first
  1609. * read/write will set the current window.
  1610. */
  1611. phantom->crb_window = -1UL;
  1612. return 0;
  1613. }
  1614. /**
  1615. * Unhalt all PEGs
  1616. *
  1617. * @v phantom Phantom NIC
  1618. */
  1619. static void phantom_unhalt_pegs ( struct phantom_nic *phantom ) {
  1620. uint32_t halt_status;
  1621. halt_status = phantom_readl ( phantom, UNM_PEG_0_HALT_STATUS );
  1622. phantom_writel ( phantom, halt_status, UNM_PEG_0_HALT_STATUS );
  1623. halt_status = phantom_readl ( phantom, UNM_PEG_1_HALT_STATUS );
  1624. phantom_writel ( phantom, halt_status, UNM_PEG_1_HALT_STATUS );
  1625. halt_status = phantom_readl ( phantom, UNM_PEG_2_HALT_STATUS );
  1626. phantom_writel ( phantom, halt_status, UNM_PEG_2_HALT_STATUS );
  1627. halt_status = phantom_readl ( phantom, UNM_PEG_3_HALT_STATUS );
  1628. phantom_writel ( phantom, halt_status, UNM_PEG_3_HALT_STATUS );
  1629. halt_status = phantom_readl ( phantom, UNM_PEG_4_HALT_STATUS );
  1630. phantom_writel ( phantom, halt_status, UNM_PEG_4_HALT_STATUS );
  1631. }
  1632. /**
  1633. * Initialise the Phantom command PEG
  1634. *
  1635. * @v phantom Phantom NIC
  1636. * @ret rc Return status code
  1637. */
  1638. static int phantom_init_cmdpeg ( struct phantom_nic *phantom ) {
  1639. uint32_t cold_boot;
  1640. uint32_t sw_reset;
  1641. unsigned int retries;
  1642. uint32_t cmdpeg_state;
  1643. uint32_t last_cmdpeg_state = 0;
  1644. /* Check for a previous initialisation. This could have
  1645. * happened if, for example, the BIOS used the UNDI API to
  1646. * drive the NIC prior to a full PXE boot.
  1647. */
  1648. cmdpeg_state = phantom_readl ( phantom, UNM_NIC_REG_CMDPEG_STATE );
  1649. if ( cmdpeg_state == UNM_NIC_REG_CMDPEG_STATE_INITIALIZE_ACK ) {
  1650. DBGC ( phantom, "Phantom %p command PEG already initialized\n",
  1651. phantom );
  1652. /* Unhalt the PEGs. Previous firmware (e.g. BOFM) may
  1653. * have halted the PEGs to prevent internal bus
  1654. * collisions when the BIOS re-reads the expansion ROM.
  1655. */
  1656. phantom_unhalt_pegs ( phantom );
  1657. return 0;
  1658. }
  1659. /* If this was a cold boot, check that the hardware came up ok */
  1660. cold_boot = phantom_readl ( phantom, UNM_CAM_RAM_COLD_BOOT );
  1661. if ( cold_boot == UNM_CAM_RAM_COLD_BOOT_MAGIC ) {
  1662. DBGC ( phantom, "Phantom %p coming up from cold boot\n",
  1663. phantom );
  1664. sw_reset = phantom_readl ( phantom, UNM_ROMUSB_GLB_SW_RESET );
  1665. if ( sw_reset != UNM_ROMUSB_GLB_SW_RESET_MAGIC ) {
  1666. DBGC ( phantom, "Phantom %p reset failed: %08x\n",
  1667. phantom, sw_reset );
  1668. return -EIO;
  1669. }
  1670. } else {
  1671. DBGC ( phantom, "Phantom %p coming up from warm boot "
  1672. "(%08x)\n", phantom, cold_boot );
  1673. }
  1674. /* Clear cold-boot flag */
  1675. phantom_writel ( phantom, 0, UNM_CAM_RAM_COLD_BOOT );
  1676. /* Set port modes */
  1677. phantom_writel ( phantom, UNM_CAM_RAM_PORT_MODE_AUTO_NEG_1G,
  1678. UNM_CAM_RAM_WOL_PORT_MODE );
  1679. /* Pass dummy DMA area to card */
  1680. phantom_write_hilo ( phantom, 0,
  1681. UNM_NIC_REG_DUMMY_BUF_ADDR_LO,
  1682. UNM_NIC_REG_DUMMY_BUF_ADDR_HI );
  1683. phantom_writel ( phantom, UNM_NIC_REG_DUMMY_BUF_INIT,
  1684. UNM_NIC_REG_DUMMY_BUF );
  1685. /* Tell the hardware that tuning is complete */
  1686. phantom_writel ( phantom, UNM_ROMUSB_GLB_PEGTUNE_DONE_MAGIC,
  1687. UNM_ROMUSB_GLB_PEGTUNE_DONE );
  1688. /* Wait for command PEG to finish initialising */
  1689. DBGC ( phantom, "Phantom %p initialising command PEG (will take up to "
  1690. "%d seconds)...\n", phantom, PHN_CMDPEG_INIT_TIMEOUT_SEC );
  1691. for ( retries = 0; retries < PHN_CMDPEG_INIT_TIMEOUT_SEC; retries++ ) {
  1692. cmdpeg_state = phantom_readl ( phantom,
  1693. UNM_NIC_REG_CMDPEG_STATE );
  1694. if ( cmdpeg_state != last_cmdpeg_state ) {
  1695. DBGC ( phantom, "Phantom %p command PEG state is "
  1696. "%08x after %d seconds...\n",
  1697. phantom, cmdpeg_state, retries );
  1698. last_cmdpeg_state = cmdpeg_state;
  1699. }
  1700. if ( cmdpeg_state == UNM_NIC_REG_CMDPEG_STATE_INITIALIZED ) {
  1701. /* Acknowledge the PEG initialisation */
  1702. phantom_writel ( phantom,
  1703. UNM_NIC_REG_CMDPEG_STATE_INITIALIZE_ACK,
  1704. UNM_NIC_REG_CMDPEG_STATE );
  1705. return 0;
  1706. }
  1707. mdelay ( 1000 );
  1708. }
  1709. DBGC ( phantom, "Phantom %p timed out waiting for command PEG to "
  1710. "initialise (status %08x)\n", phantom, cmdpeg_state );
  1711. return -ETIMEDOUT;
  1712. }
  1713. /**
  1714. * Read Phantom MAC address
  1715. *
  1716. * @v phanton_port Phantom NIC
  1717. * @v hw_addr Buffer to fill with MAC address
  1718. */
  1719. static void phantom_get_macaddr ( struct phantom_nic *phantom,
  1720. uint8_t *hw_addr ) {
  1721. union {
  1722. uint8_t mac_addr[2][ETH_ALEN];
  1723. uint32_t dwords[3];
  1724. } u;
  1725. unsigned long offset;
  1726. int i;
  1727. /* Read the three dwords that include this MAC address and one other */
  1728. offset = ( UNM_CAM_RAM_MAC_ADDRS +
  1729. ( 12 * ( phantom->port / 2 ) ) );
  1730. for ( i = 0 ; i < 3 ; i++, offset += 4 ) {
  1731. u.dwords[i] = phantom_readl ( phantom, offset );
  1732. }
  1733. /* Copy out the relevant MAC address */
  1734. for ( i = 0 ; i < ETH_ALEN ; i++ ) {
  1735. hw_addr[ ETH_ALEN - i - 1 ] =
  1736. u.mac_addr[ phantom->port & 1 ][i];
  1737. }
  1738. DBGC ( phantom, "Phantom %p MAC address is %s\n",
  1739. phantom, eth_ntoa ( hw_addr ) );
  1740. }
  1741. /**
  1742. * Check Phantom is enabled for boot
  1743. *
  1744. * @v phanton_port Phantom NIC
  1745. * @ret rc Return status code
  1746. *
  1747. * This is something of an ugly hack to accommodate an OEM
  1748. * requirement. The NIC has only one expansion ROM BAR, rather than
  1749. * one per port. To allow individual ports to be selectively
  1750. * enabled/disabled for PXE boot (as required), we must therefore
  1751. * leave the expansion ROM always enabled, and place the per-port
  1752. * enable/disable logic within the iPXE driver.
  1753. */
  1754. static int phantom_check_boot_enable ( struct phantom_nic *phantom ) {
  1755. unsigned long boot_enable;
  1756. boot_enable = phantom_readl ( phantom, UNM_CAM_RAM_BOOT_ENABLE );
  1757. if ( ! ( boot_enable & ( 1 << phantom->port ) ) ) {
  1758. DBGC ( phantom, "Phantom %p PXE boot is disabled\n",
  1759. phantom );
  1760. return -ENOTSUP;
  1761. }
  1762. return 0;
  1763. }
  1764. /**
  1765. * Initialise Phantom receive PEG
  1766. *
  1767. * @v phantom Phantom NIC
  1768. * @ret rc Return status code
  1769. */
  1770. static int phantom_init_rcvpeg ( struct phantom_nic *phantom ) {
  1771. unsigned int retries;
  1772. uint32_t rcvpeg_state;
  1773. uint32_t last_rcvpeg_state = 0;
  1774. DBGC ( phantom, "Phantom %p initialising receive PEG (will take up to "
  1775. "%d seconds)...\n", phantom, PHN_RCVPEG_INIT_TIMEOUT_SEC );
  1776. for ( retries = 0; retries < PHN_RCVPEG_INIT_TIMEOUT_SEC; retries++ ) {
  1777. rcvpeg_state = phantom_readl ( phantom,
  1778. UNM_NIC_REG_RCVPEG_STATE );
  1779. if ( rcvpeg_state != last_rcvpeg_state ) {
  1780. DBGC ( phantom, "Phantom %p receive PEG state is "
  1781. "%08x after %d seconds...\n",
  1782. phantom, rcvpeg_state, retries );
  1783. last_rcvpeg_state = rcvpeg_state;
  1784. }
  1785. if ( rcvpeg_state == UNM_NIC_REG_RCVPEG_STATE_INITIALIZED )
  1786. return 0;
  1787. mdelay ( 1000 );
  1788. }
  1789. DBGC ( phantom, "Phantom %p timed out waiting for receive PEG to "
  1790. "initialise (status %08x)\n", phantom, rcvpeg_state );
  1791. return -ETIMEDOUT;
  1792. }
  1793. /**
  1794. * Probe PCI device
  1795. *
  1796. * @v pci PCI device
  1797. * @v id PCI ID
  1798. * @ret rc Return status code
  1799. */
  1800. static int phantom_probe ( struct pci_device *pci ) {
  1801. struct net_device *netdev;
  1802. struct phantom_nic *phantom;
  1803. struct settings *parent_settings;
  1804. unsigned int busdevfn;
  1805. int rc;
  1806. /* Allocate Phantom device */
  1807. netdev = alloc_etherdev ( sizeof ( *phantom ) );
  1808. if ( ! netdev ) {
  1809. rc = -ENOMEM;
  1810. goto err_alloc_etherdev;
  1811. }
  1812. netdev_init ( netdev, &phantom_operations );
  1813. phantom = netdev_priv ( netdev );
  1814. pci_set_drvdata ( pci, netdev );
  1815. netdev->dev = &pci->dev;
  1816. memset ( phantom, 0, sizeof ( *phantom ) );
  1817. phantom->port = PCI_FUNC ( pci->busdevfn );
  1818. assert ( phantom->port < PHN_MAX_NUM_PORTS );
  1819. settings_init ( &phantom->settings,
  1820. &phantom_settings_operations,
  1821. &netdev->refcnt, &phantom_settings_scope );
  1822. /* Fix up PCI device */
  1823. adjust_pci_device ( pci );
  1824. /* Map CRB */
  1825. if ( ( rc = phantom_map_crb ( phantom, pci ) ) != 0 )
  1826. goto err_map_crb;
  1827. /* BUG5945 - need to hack PCI config space on P3 B1 silicon.
  1828. * B2 will have this fixed; remove this hack when B1 is no
  1829. * longer in use.
  1830. */
  1831. busdevfn = pci->busdevfn;
  1832. if ( PCI_FUNC ( busdevfn ) == 0 ) {
  1833. unsigned int i;
  1834. for ( i = 0 ; i < 8 ; i++ ) {
  1835. uint32_t temp;
  1836. pci->busdevfn =
  1837. PCI_BUSDEVFN ( PCI_SEG ( busdevfn ),
  1838. PCI_BUS ( busdevfn ),
  1839. PCI_SLOT ( busdevfn ), i );
  1840. pci_read_config_dword ( pci, 0xc8, &temp );
  1841. pci_read_config_dword ( pci, 0xc8, &temp );
  1842. pci_write_config_dword ( pci, 0xc8, 0xf1000 );
  1843. }
  1844. pci->busdevfn = busdevfn;
  1845. }
  1846. /* Initialise the command PEG */
  1847. if ( ( rc = phantom_init_cmdpeg ( phantom ) ) != 0 )
  1848. goto err_init_cmdpeg;
  1849. /* Initialise the receive PEG */
  1850. if ( ( rc = phantom_init_rcvpeg ( phantom ) ) != 0 )
  1851. goto err_init_rcvpeg;
  1852. /* Read MAC addresses */
  1853. phantom_get_macaddr ( phantom, netdev->hw_addr );
  1854. /* Skip if boot disabled on NIC */
  1855. if ( ( rc = phantom_check_boot_enable ( phantom ) ) != 0 )
  1856. goto err_check_boot_enable;
  1857. /* Register network devices */
  1858. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  1859. DBGC ( phantom, "Phantom %p could not register net device: "
  1860. "%s\n", phantom, strerror ( rc ) );
  1861. goto err_register_netdev;
  1862. }
  1863. /* Register settings blocks */
  1864. parent_settings = netdev_settings ( netdev );
  1865. if ( ( rc = register_settings ( &phantom->settings,
  1866. parent_settings, "clp" ) ) != 0 ) {
  1867. DBGC ( phantom, "Phantom %p could not register settings: "
  1868. "%s\n", phantom, strerror ( rc ) );
  1869. goto err_register_settings;
  1870. }
  1871. return 0;
  1872. unregister_settings ( &phantom->settings );
  1873. err_register_settings:
  1874. unregister_netdev ( netdev );
  1875. err_register_netdev:
  1876. err_check_boot_enable:
  1877. err_init_rcvpeg:
  1878. err_init_cmdpeg:
  1879. err_map_crb:
  1880. netdev_nullify ( netdev );
  1881. netdev_put ( netdev );
  1882. err_alloc_etherdev:
  1883. return rc;
  1884. }
  1885. /**
  1886. * Remove PCI device
  1887. *
  1888. * @v pci PCI device
  1889. */
  1890. static void phantom_remove ( struct pci_device *pci ) {
  1891. struct net_device *netdev = pci_get_drvdata ( pci );
  1892. struct phantom_nic *phantom = netdev_priv ( netdev );
  1893. unregister_settings ( &phantom->settings );
  1894. unregister_netdev ( netdev );
  1895. netdev_nullify ( netdev );
  1896. netdev_put ( netdev );
  1897. }
  1898. /** Phantom PCI IDs */
  1899. static struct pci_device_id phantom_nics[] = {
  1900. PCI_ROM ( 0x4040, 0x0100, "nx", "NX", 0 ),
  1901. };
  1902. /** Phantom PCI driver */
  1903. struct pci_driver phantom_driver __pci_driver = {
  1904. .ids = phantom_nics,
  1905. .id_count = ( sizeof ( phantom_nics ) / sizeof ( phantom_nics[0] ) ),
  1906. .probe = phantom_probe,
  1907. .remove = phantom_remove,
  1908. };