Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394
  1. /*
  2. * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. FILE_LICENCE ( GPL2_OR_LATER );
  19. #include <stdint.h>
  20. #include <stdlib.h>
  21. #include <errno.h>
  22. #include <unistd.h>
  23. #include <assert.h>
  24. #include <gpxe/io.h>
  25. #include <gpxe/pci.h>
  26. #include <gpxe/infiniband.h>
  27. #include <gpxe/i2c.h>
  28. #include <gpxe/bitbash.h>
  29. #include <gpxe/malloc.h>
  30. #include <gpxe/iobuf.h>
  31. #include <gpxe/ib_sma.h>
  32. #include "linda.h"
  33. /**
  34. * @file
  35. *
  36. * QLogic Linda Infiniband HCA
  37. *
  38. */
  39. /** A Linda send work queue */
  40. struct linda_send_work_queue {
  41. /** Send buffer usage */
  42. uint8_t *send_buf;
  43. /** Producer index */
  44. unsigned int prod;
  45. /** Consumer index */
  46. unsigned int cons;
  47. };
  48. /** A Linda receive work queue */
  49. struct linda_recv_work_queue {
  50. /** Receive header ring */
  51. void *header;
  52. /** Receive header producer offset (written by hardware) */
  53. struct QIB_7220_scalar header_prod;
  54. /** Receive header consumer offset */
  55. unsigned int header_cons;
  56. /** Offset within register space of the eager array */
  57. unsigned long eager_array;
  58. /** Number of entries in eager array */
  59. unsigned int eager_entries;
  60. /** Eager array producer index */
  61. unsigned int eager_prod;
  62. /** Eager array consumer index */
  63. unsigned int eager_cons;
  64. };
  65. /** A Linda HCA */
  66. struct linda {
  67. /** Registers */
  68. void *regs;
  69. /** In-use contexts */
  70. uint8_t used_ctx[LINDA_NUM_CONTEXTS];
  71. /** Send work queues */
  72. struct linda_send_work_queue send_wq[LINDA_NUM_CONTEXTS];
  73. /** Receive work queues */
  74. struct linda_recv_work_queue recv_wq[LINDA_NUM_CONTEXTS];
  75. /** Offset within register space of the first send buffer */
  76. unsigned long send_buffer_base;
  77. /** Send buffer availability (reported by hardware) */
  78. struct QIB_7220_SendBufAvail *sendbufavail;
  79. /** Send buffer availability (maintained by software) */
  80. uint8_t send_buf[LINDA_MAX_SEND_BUFS];
  81. /** Send buffer availability producer counter */
  82. unsigned int send_buf_prod;
  83. /** Send buffer availability consumer counter */
  84. unsigned int send_buf_cons;
  85. /** Number of reserved send buffers (across all QPs) */
  86. unsigned int reserved_send_bufs;
  87. /** I2C bit-bashing interface */
  88. struct i2c_bit_basher i2c;
  89. /** I2C serial EEPROM */
  90. struct i2c_device eeprom;
  91. /** Subnet management agent */
  92. struct ib_sma sma;
  93. };
  94. /***************************************************************************
  95. *
  96. * Linda register access
  97. *
  98. ***************************************************************************
  99. *
  100. * This card requires atomic 64-bit accesses. Strange things happen
  101. * if you try to use 32-bit accesses; sometimes they work, sometimes
  102. * they don't, sometimes you get random data.
  103. *
  104. * These accessors use the "movq" MMX instruction, and so won't work
  105. * on really old Pentiums (which won't have PCIe anyway, so this is
  106. * something of a moot point).
  107. */
  108. /**
  109. * Read Linda qword register
  110. *
  111. * @v linda Linda device
  112. * @v dwords Register buffer to read into
  113. * @v offset Register offset
  114. */
  115. static void linda_readq ( struct linda *linda, uint32_t *dwords,
  116. unsigned long offset ) {
  117. void *addr = ( linda->regs + offset );
  118. __asm__ __volatile__ ( "movq (%1), %%mm0\n\t"
  119. "movq %%mm0, (%0)\n\t"
  120. : : "r" ( dwords ), "r" ( addr ) : "memory" );
  121. DBGIO ( "[%08lx] => %08x%08x\n",
  122. virt_to_phys ( addr ), dwords[1], dwords[0] );
  123. }
  124. #define linda_readq( _linda, _ptr, _offset ) \
  125. linda_readq ( (_linda), (_ptr)->u.dwords, (_offset) )
  126. #define linda_readq_array8b( _linda, _ptr, _offset, _idx ) \
  127. linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
  128. #define linda_readq_array64k( _linda, _ptr, _offset, _idx ) \
  129. linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
  130. /**
  131. * Write Linda qword register
  132. *
  133. * @v linda Linda device
  134. * @v dwords Register buffer to write
  135. * @v offset Register offset
  136. */
  137. static void linda_writeq ( struct linda *linda, const uint32_t *dwords,
  138. unsigned long offset ) {
  139. void *addr = ( linda->regs + offset );
  140. DBGIO ( "[%08lx] <= %08x%08x\n",
  141. virt_to_phys ( addr ), dwords[1], dwords[0] );
  142. __asm__ __volatile__ ( "movq (%0), %%mm0\n\t"
  143. "movq %%mm0, (%1)\n\t"
  144. : : "r" ( dwords ), "r" ( addr ) : "memory" );
  145. }
  146. #define linda_writeq( _linda, _ptr, _offset ) \
  147. linda_writeq ( (_linda), (_ptr)->u.dwords, (_offset) )
  148. #define linda_writeq_array8b( _linda, _ptr, _offset, _idx ) \
  149. linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
  150. #define linda_writeq_array64k( _linda, _ptr, _offset, _idx ) \
  151. linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
  152. /**
  153. * Write Linda dword register
  154. *
  155. * @v linda Linda device
  156. * @v dword Value to write
  157. * @v offset Register offset
  158. */
  159. static void linda_writel ( struct linda *linda, uint32_t dword,
  160. unsigned long offset ) {
  161. writel ( dword, ( linda->regs + offset ) );
  162. }
  163. /***************************************************************************
  164. *
  165. * Link state management
  166. *
  167. ***************************************************************************
  168. */
  169. /**
  170. * Textual representation of link state
  171. *
  172. * @v link_state Link state
  173. * @ret link_text Link state text
  174. */
  175. static const char * linda_link_state_text ( unsigned int link_state ) {
  176. switch ( link_state ) {
  177. case LINDA_LINK_STATE_DOWN: return "DOWN";
  178. case LINDA_LINK_STATE_INIT: return "INIT";
  179. case LINDA_LINK_STATE_ARM: return "ARM";
  180. case LINDA_LINK_STATE_ACTIVE: return "ACTIVE";
  181. case LINDA_LINK_STATE_ACT_DEFER:return "ACT_DEFER";
  182. default: return "UNKNOWN";
  183. }
  184. }
  185. /**
  186. * Handle link state change
  187. *
  188. * @v linda Linda device
  189. */
  190. static void linda_link_state_changed ( struct ib_device *ibdev ) {
  191. struct linda *linda = ib_get_drvdata ( ibdev );
  192. struct QIB_7220_IBCStatus ibcstatus;
  193. struct QIB_7220_EXTCtrl extctrl;
  194. unsigned int link_state;
  195. unsigned int link_width;
  196. unsigned int link_speed;
  197. /* Read link state */
  198. linda_readq ( linda, &ibcstatus, QIB_7220_IBCStatus_offset );
  199. link_state = BIT_GET ( &ibcstatus, LinkState );
  200. link_width = BIT_GET ( &ibcstatus, LinkWidthActive );
  201. link_speed = BIT_GET ( &ibcstatus, LinkSpeedActive );
  202. DBGC ( linda, "Linda %p link state %s (%s %s)\n", linda,
  203. linda_link_state_text ( link_state ),
  204. ( link_speed ? "DDR" : "SDR" ), ( link_width ? "x4" : "x1" ) );
  205. /* Set LEDs according to link state */
  206. linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
  207. BIT_SET ( &extctrl, LEDPriPortGreenOn,
  208. ( ( link_state >= LINDA_LINK_STATE_INIT ) ? 1 : 0 ) );
  209. BIT_SET ( &extctrl, LEDPriPortYellowOn,
  210. ( ( link_state >= LINDA_LINK_STATE_ACTIVE ) ? 1 : 0 ) );
  211. linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
  212. /* Notify Infiniband core of link state change */
  213. ibdev->port_state = ( link_state + 1 );
  214. ibdev->link_width =
  215. ( link_width ? IB_LINK_WIDTH_4X : IB_LINK_WIDTH_1X );
  216. ibdev->link_speed =
  217. ( link_speed ? IB_LINK_SPEED_DDR : IB_LINK_SPEED_SDR );
  218. ib_link_state_changed ( ibdev );
  219. }
  220. /**
  221. * Set port information
  222. *
  223. * @v ibdev Infiniband device
  224. * @v port_info New port information
  225. */
  226. static int linda_set_port_info ( struct ib_device *ibdev,
  227. const struct ib_port_info *port_info ) {
  228. struct linda *linda = ib_get_drvdata ( ibdev );
  229. struct QIB_7220_IBCCtrl ibcctrl;
  230. unsigned int port_state;
  231. unsigned int link_state;
  232. /* Set new link state */
  233. port_state = ( port_info->link_speed_supported__port_state & 0xf );
  234. if ( port_state ) {
  235. link_state = ( port_state - 1 );
  236. DBGC ( linda, "Linda %p set link state to %s (%x)\n", linda,
  237. linda_link_state_text ( link_state ), link_state );
  238. linda_readq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
  239. BIT_SET ( &ibcctrl, LinkCmd, link_state );
  240. linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
  241. }
  242. /* Detect and report link state change */
  243. linda_link_state_changed ( ibdev );
  244. return 0;
  245. }
  246. /***************************************************************************
  247. *
  248. * Context allocation
  249. *
  250. ***************************************************************************
  251. */
  252. /**
  253. * Map context number to QPN
  254. *
  255. * @v ctx Context index
  256. * @ret qpn Queue pair number
  257. */
  258. static int linda_ctx_to_qpn ( unsigned int ctx ) {
  259. /* This mapping is fixed by hardware */
  260. return ( ctx * 2 );
  261. }
  262. /**
  263. * Map QPN to context number
  264. *
  265. * @v qpn Queue pair number
  266. * @ret ctx Context index
  267. */
  268. static int linda_qpn_to_ctx ( unsigned int qpn ) {
  269. /* This mapping is fixed by hardware */
  270. return ( qpn / 2 );
  271. }
  272. /**
  273. * Allocate a context
  274. *
  275. * @v linda Linda device
  276. * @ret ctx Context index, or negative error
  277. */
  278. static int linda_alloc_ctx ( struct linda *linda ) {
  279. unsigned int ctx;
  280. for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
  281. if ( ! linda->used_ctx[ctx] ) {
  282. linda->used_ctx[ctx ] = 1;
  283. DBGC2 ( linda, "Linda %p CTX %d allocated\n",
  284. linda, ctx );
  285. return ctx;
  286. }
  287. }
  288. DBGC ( linda, "Linda %p out of available contexts\n", linda );
  289. return -ENOENT;
  290. }
  291. /**
  292. * Free a context
  293. *
  294. * @v linda Linda device
  295. * @v ctx Context index
  296. */
  297. static void linda_free_ctx ( struct linda *linda, unsigned int ctx ) {
  298. linda->used_ctx[ctx] = 0;
  299. DBGC2 ( linda, "Linda %p CTX %d freed\n", linda, ctx );
  300. }
  301. /***************************************************************************
  302. *
  303. * Send datapath
  304. *
  305. ***************************************************************************
  306. */
  307. /** Send buffer toggle bit
  308. *
  309. * We encode send buffers as 7 bits of send buffer index plus a single
  310. * bit which should match the "check" bit in the SendBufAvail array.
  311. */
  312. #define LINDA_SEND_BUF_TOGGLE 0x80
  313. /**
  314. * Allocate a send buffer
  315. *
  316. * @v linda Linda device
  317. * @ret send_buf Send buffer
  318. *
  319. * You must guarantee that a send buffer is available. This is done
  320. * by refusing to allocate more TX WQEs in total than the number of
  321. * available send buffers.
  322. */
  323. static unsigned int linda_alloc_send_buf ( struct linda *linda ) {
  324. unsigned int send_buf;
  325. send_buf = linda->send_buf[linda->send_buf_cons];
  326. send_buf ^= LINDA_SEND_BUF_TOGGLE;
  327. linda->send_buf_cons = ( ( linda->send_buf_cons + 1 ) %
  328. LINDA_MAX_SEND_BUFS );
  329. return send_buf;
  330. }
  331. /**
  332. * Free a send buffer
  333. *
  334. * @v linda Linda device
  335. * @v send_buf Send buffer
  336. */
  337. static void linda_free_send_buf ( struct linda *linda,
  338. unsigned int send_buf ) {
  339. linda->send_buf[linda->send_buf_prod] = send_buf;
  340. linda->send_buf_prod = ( ( linda->send_buf_prod + 1 ) %
  341. LINDA_MAX_SEND_BUFS );
  342. }
  343. /**
  344. * Check to see if send buffer is in use
  345. *
  346. * @v linda Linda device
  347. * @v send_buf Send buffer
  348. * @ret in_use Send buffer is in use
  349. */
  350. static int linda_send_buf_in_use ( struct linda *linda,
  351. unsigned int send_buf ) {
  352. unsigned int send_idx;
  353. unsigned int send_check;
  354. unsigned int inusecheck;
  355. unsigned int inuse;
  356. unsigned int check;
  357. send_idx = ( send_buf & ~LINDA_SEND_BUF_TOGGLE );
  358. send_check = ( !! ( send_buf & LINDA_SEND_BUF_TOGGLE ) );
  359. inusecheck = BIT_GET ( linda->sendbufavail, InUseCheck[send_idx] );
  360. inuse = ( !! ( inusecheck & 0x02 ) );
  361. check = ( !! ( inusecheck & 0x01 ) );
  362. return ( inuse || ( check != send_check ) );
  363. }
  364. /**
  365. * Calculate starting offset for send buffer
  366. *
  367. * @v linda Linda device
  368. * @v send_buf Send buffer
  369. * @ret offset Starting offset
  370. */
  371. static unsigned long linda_send_buffer_offset ( struct linda *linda,
  372. unsigned int send_buf ) {
  373. return ( linda->send_buffer_base +
  374. ( ( send_buf & ~LINDA_SEND_BUF_TOGGLE ) *
  375. LINDA_SEND_BUF_SIZE ) );
  376. }
  377. /**
  378. * Create send work queue
  379. *
  380. * @v linda Linda device
  381. * @v qp Queue pair
  382. */
  383. static int linda_create_send_wq ( struct linda *linda,
  384. struct ib_queue_pair *qp ) {
  385. struct ib_work_queue *wq = &qp->send;
  386. struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  387. int rc;
  388. /* Reserve send buffers */
  389. if ( ( linda->reserved_send_bufs + qp->send.num_wqes ) >
  390. LINDA_MAX_SEND_BUFS ) {
  391. DBGC ( linda, "Linda %p out of send buffers (have %d, used "
  392. "%d, need %d)\n", linda, LINDA_MAX_SEND_BUFS,
  393. linda->reserved_send_bufs, qp->send.num_wqes );
  394. rc = -ENOBUFS;
  395. goto err_reserve_bufs;
  396. }
  397. linda->reserved_send_bufs += qp->send.num_wqes;
  398. /* Reset work queue */
  399. linda_wq->prod = 0;
  400. linda_wq->cons = 0;
  401. /* Allocate space for send buffer uasge list */
  402. linda_wq->send_buf = zalloc ( qp->send.num_wqes *
  403. sizeof ( linda_wq->send_buf[0] ) );
  404. if ( ! linda_wq->send_buf ) {
  405. rc = -ENOBUFS;
  406. goto err_alloc_send_buf;
  407. }
  408. return 0;
  409. free ( linda_wq->send_buf );
  410. err_alloc_send_buf:
  411. linda->reserved_send_bufs -= qp->send.num_wqes;
  412. err_reserve_bufs:
  413. return rc;
  414. }
  415. /**
  416. * Destroy send work queue
  417. *
  418. * @v linda Linda device
  419. * @v qp Queue pair
  420. */
  421. static void linda_destroy_send_wq ( struct linda *linda,
  422. struct ib_queue_pair *qp ) {
  423. struct ib_work_queue *wq = &qp->send;
  424. struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  425. free ( linda_wq->send_buf );
  426. linda->reserved_send_bufs -= qp->send.num_wqes;
  427. }
  428. /**
  429. * Initialise send datapath
  430. *
  431. * @v linda Linda device
  432. * @ret rc Return status code
  433. */
  434. static int linda_init_send ( struct linda *linda ) {
  435. struct QIB_7220_SendBufBase sendbufbase;
  436. struct QIB_7220_SendBufAvailAddr sendbufavailaddr;
  437. struct QIB_7220_SendCtrl sendctrl;
  438. unsigned int i;
  439. int rc;
  440. /* Retrieve SendBufBase */
  441. linda_readq ( linda, &sendbufbase, QIB_7220_SendBufBase_offset );
  442. linda->send_buffer_base = BIT_GET ( &sendbufbase,
  443. BaseAddr_SmallPIO );
  444. DBGC ( linda, "Linda %p send buffers at %lx\n",
  445. linda, linda->send_buffer_base );
  446. /* Initialise the send_buf[] array */
  447. for ( i = 0 ; i < LINDA_MAX_SEND_BUFS ; i++ )
  448. linda->send_buf[i] = i;
  449. /* Allocate space for the SendBufAvail array */
  450. linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
  451. LINDA_SENDBUFAVAIL_ALIGN );
  452. if ( ! linda->sendbufavail ) {
  453. rc = -ENOMEM;
  454. goto err_alloc_sendbufavail;
  455. }
  456. memset ( linda->sendbufavail, 0, sizeof ( linda->sendbufavail ) );
  457. /* Program SendBufAvailAddr into the hardware */
  458. memset ( &sendbufavailaddr, 0, sizeof ( sendbufavailaddr ) );
  459. BIT_FILL_1 ( &sendbufavailaddr, SendBufAvailAddr,
  460. ( virt_to_bus ( linda->sendbufavail ) >> 6 ) );
  461. linda_writeq ( linda, &sendbufavailaddr,
  462. QIB_7220_SendBufAvailAddr_offset );
  463. /* Enable sending and DMA of SendBufAvail */
  464. memset ( &sendctrl, 0, sizeof ( sendctrl ) );
  465. BIT_FILL_2 ( &sendctrl,
  466. SendBufAvailUpd, 1,
  467. SPioEnable, 1 );
  468. linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
  469. return 0;
  470. free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
  471. err_alloc_sendbufavail:
  472. return rc;
  473. }
  474. /**
  475. * Shut down send datapath
  476. *
  477. * @v linda Linda device
  478. */
  479. static void linda_fini_send ( struct linda *linda ) {
  480. struct QIB_7220_SendCtrl sendctrl;
  481. /* Disable sending and DMA of SendBufAvail */
  482. memset ( &sendctrl, 0, sizeof ( sendctrl ) );
  483. linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
  484. mb();
  485. /* Ensure hardware has seen this disable */
  486. linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
  487. free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
  488. }
  489. /***************************************************************************
  490. *
  491. * Receive datapath
  492. *
  493. ***************************************************************************
  494. */
  495. /**
  496. * Create receive work queue
  497. *
  498. * @v linda Linda device
  499. * @v qp Queue pair
  500. * @ret rc Return status code
  501. */
  502. static int linda_create_recv_wq ( struct linda *linda,
  503. struct ib_queue_pair *qp ) {
  504. struct ib_work_queue *wq = &qp->recv;
  505. struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  506. struct QIB_7220_RcvHdrAddr0 rcvhdraddr;
  507. struct QIB_7220_RcvHdrTailAddr0 rcvhdrtailaddr;
  508. struct QIB_7220_RcvHdrHead0 rcvhdrhead;
  509. struct QIB_7220_scalar rcvegrindexhead;
  510. struct QIB_7220_RcvCtrl rcvctrl;
  511. unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
  512. int rc;
  513. /* Reset context information */
  514. memset ( &linda_wq->header_prod, 0,
  515. sizeof ( linda_wq->header_prod ) );
  516. linda_wq->header_cons = 0;
  517. linda_wq->eager_prod = 0;
  518. linda_wq->eager_cons = 0;
  519. /* Allocate receive header buffer */
  520. linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
  521. LINDA_RECV_HEADERS_ALIGN );
  522. if ( ! linda_wq->header ) {
  523. rc = -ENOMEM;
  524. goto err_alloc_header;
  525. }
  526. /* Enable context in hardware */
  527. memset ( &rcvhdraddr, 0, sizeof ( rcvhdraddr ) );
  528. BIT_FILL_1 ( &rcvhdraddr, RcvHdrAddr0,
  529. ( virt_to_bus ( linda_wq->header ) >> 2 ) );
  530. linda_writeq_array8b ( linda, &rcvhdraddr,
  531. QIB_7220_RcvHdrAddr0_offset, ctx );
  532. memset ( &rcvhdrtailaddr, 0, sizeof ( rcvhdrtailaddr ) );
  533. BIT_FILL_1 ( &rcvhdrtailaddr, RcvHdrTailAddr0,
  534. ( virt_to_bus ( &linda_wq->header_prod ) >> 2 ) );
  535. linda_writeq_array8b ( linda, &rcvhdrtailaddr,
  536. QIB_7220_RcvHdrTailAddr0_offset, ctx );
  537. memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
  538. BIT_FILL_1 ( &rcvhdrhead, counter, 1 );
  539. linda_writeq_array64k ( linda, &rcvhdrhead,
  540. QIB_7220_RcvHdrHead0_offset, ctx );
  541. memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
  542. BIT_FILL_1 ( &rcvegrindexhead, Value, 1 );
  543. linda_writeq_array64k ( linda, &rcvegrindexhead,
  544. QIB_7220_RcvEgrIndexHead0_offset, ctx );
  545. linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  546. BIT_SET ( &rcvctrl, PortEnable[ctx], 1 );
  547. BIT_SET ( &rcvctrl, IntrAvail[ctx], 1 );
  548. linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  549. DBGC ( linda, "Linda %p QPN %ld CTX %d hdrs [%lx,%lx) prod %lx\n",
  550. linda, qp->qpn, ctx, virt_to_bus ( linda_wq->header ),
  551. ( virt_to_bus ( linda_wq->header ) + LINDA_RECV_HEADERS_SIZE ),
  552. virt_to_bus ( &linda_wq->header_prod ) );
  553. return 0;
  554. free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
  555. err_alloc_header:
  556. return rc;
  557. }
  558. /**
  559. * Destroy receive work queue
  560. *
  561. * @v linda Linda device
  562. * @v qp Queue pair
  563. */
  564. static void linda_destroy_recv_wq ( struct linda *linda,
  565. struct ib_queue_pair *qp ) {
  566. struct ib_work_queue *wq = &qp->recv;
  567. struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  568. struct QIB_7220_RcvCtrl rcvctrl;
  569. unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
  570. /* Disable context in hardware */
  571. linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  572. BIT_SET ( &rcvctrl, PortEnable[ctx], 0 );
  573. BIT_SET ( &rcvctrl, IntrAvail[ctx], 0 );
  574. linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  575. /* Make sure the hardware has seen that the context is disabled */
  576. linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  577. mb();
  578. /* Free headers ring */
  579. free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
  580. /* Free context */
  581. linda_free_ctx ( linda, ctx );
  582. }
  583. /**
  584. * Initialise receive datapath
  585. *
  586. * @v linda Linda device
  587. * @ret rc Return status code
  588. */
  589. static int linda_init_recv ( struct linda *linda ) {
  590. struct QIB_7220_RcvCtrl rcvctrl;
  591. struct QIB_7220_scalar rcvegrbase;
  592. struct QIB_7220_scalar rcvhdrentsize;
  593. struct QIB_7220_scalar rcvhdrcnt;
  594. struct QIB_7220_RcvBTHQP rcvbthqp;
  595. unsigned int portcfg;
  596. unsigned long egrbase;
  597. unsigned int eager_array_size_0;
  598. unsigned int eager_array_size_other;
  599. unsigned int ctx;
  600. /* Select configuration based on number of contexts */
  601. switch ( LINDA_NUM_CONTEXTS ) {
  602. case 5:
  603. portcfg = LINDA_PORTCFG_5CTX;
  604. eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_5CTX_0;
  605. eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_5CTX_OTHER;
  606. break;
  607. case 9:
  608. portcfg = LINDA_PORTCFG_9CTX;
  609. eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_9CTX_0;
  610. eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_9CTX_OTHER;
  611. break;
  612. case 17:
  613. portcfg = LINDA_PORTCFG_17CTX;
  614. eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_17CTX_0;
  615. eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_17CTX_OTHER;
  616. break;
  617. default:
  618. linker_assert ( 0, invalid_LINDA_NUM_CONTEXTS );
  619. return -EINVAL;
  620. }
  621. /* Configure number of contexts */
  622. memset ( &rcvctrl, 0, sizeof ( rcvctrl ) );
  623. BIT_FILL_3 ( &rcvctrl,
  624. TailUpd, 1,
  625. PortCfg, portcfg,
  626. RcvQPMapEnable, 1 );
  627. linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
  628. /* Configure receive header buffer sizes */
  629. memset ( &rcvhdrcnt, 0, sizeof ( rcvhdrcnt ) );
  630. BIT_FILL_1 ( &rcvhdrcnt, Value, LINDA_RECV_HEADER_COUNT );
  631. linda_writeq ( linda, &rcvhdrcnt, QIB_7220_RcvHdrCnt_offset );
  632. memset ( &rcvhdrentsize, 0, sizeof ( rcvhdrentsize ) );
  633. BIT_FILL_1 ( &rcvhdrentsize, Value, ( LINDA_RECV_HEADER_SIZE >> 2 ) );
  634. linda_writeq ( linda, &rcvhdrentsize, QIB_7220_RcvHdrEntSize_offset );
  635. /* Calculate eager array start addresses for each context */
  636. linda_readq ( linda, &rcvegrbase, QIB_7220_RcvEgrBase_offset );
  637. egrbase = BIT_GET ( &rcvegrbase, Value );
  638. linda->recv_wq[0].eager_array = egrbase;
  639. linda->recv_wq[0].eager_entries = eager_array_size_0;
  640. egrbase += ( eager_array_size_0 * sizeof ( struct QIB_7220_RcvEgr ) );
  641. for ( ctx = 1 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
  642. linda->recv_wq[ctx].eager_array = egrbase;
  643. linda->recv_wq[ctx].eager_entries = eager_array_size_other;
  644. egrbase += ( eager_array_size_other *
  645. sizeof ( struct QIB_7220_RcvEgr ) );
  646. }
  647. for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
  648. DBGC ( linda, "Linda %p CTX %d eager array at %lx (%d "
  649. "entries)\n", linda, ctx,
  650. linda->recv_wq[ctx].eager_array,
  651. linda->recv_wq[ctx].eager_entries );
  652. }
  653. /* Set the BTH QP for Infinipath packets to an unused value */
  654. memset ( &rcvbthqp, 0, sizeof ( rcvbthqp ) );
  655. BIT_FILL_1 ( &rcvbthqp, RcvBTHQP, LINDA_QP_IDETH );
  656. linda_writeq ( linda, &rcvbthqp, QIB_7220_RcvBTHQP_offset );
  657. return 0;
  658. }
  659. /**
  660. * Shut down receive datapath
  661. *
  662. * @v linda Linda device
  663. */
  664. static void linda_fini_recv ( struct linda *linda __unused ) {
  665. /* Nothing to do; all contexts were already disabled when the
  666. * queue pairs were destroyed
  667. */
  668. }
  669. /***************************************************************************
  670. *
  671. * Completion queue operations
  672. *
  673. ***************************************************************************
  674. */
  675. /**
  676. * Create completion queue
  677. *
  678. * @v ibdev Infiniband device
  679. * @v cq Completion queue
  680. * @ret rc Return status code
  681. */
  682. static int linda_create_cq ( struct ib_device *ibdev,
  683. struct ib_completion_queue *cq ) {
  684. struct linda *linda = ib_get_drvdata ( ibdev );
  685. static int cqn;
  686. /* The hardware has no concept of completion queues. We
  687. * simply use the association between CQs and WQs (already
  688. * handled by the IB core) to decide which WQs to poll.
  689. *
  690. * We do set a CQN, just to avoid confusing debug messages
  691. * from the IB core.
  692. */
  693. cq->cqn = ++cqn;
  694. DBGC ( linda, "Linda %p CQN %ld created\n", linda, cq->cqn );
  695. return 0;
  696. }
  697. /**
  698. * Destroy completion queue
  699. *
  700. * @v ibdev Infiniband device
  701. * @v cq Completion queue
  702. */
  703. static void linda_destroy_cq ( struct ib_device *ibdev,
  704. struct ib_completion_queue *cq ) {
  705. struct linda *linda = ib_get_drvdata ( ibdev );
  706. /* Nothing to do */
  707. DBGC ( linda, "Linda %p CQN %ld destroyed\n", linda, cq->cqn );
  708. }
  709. /***************************************************************************
  710. *
  711. * Queue pair operations
  712. *
  713. ***************************************************************************
  714. */
  715. /**
  716. * Create queue pair
  717. *
  718. * @v ibdev Infiniband device
  719. * @v qp Queue pair
  720. * @ret rc Return status code
  721. */
  722. static int linda_create_qp ( struct ib_device *ibdev,
  723. struct ib_queue_pair *qp ) {
  724. struct linda *linda = ib_get_drvdata ( ibdev );
  725. int ctx;
  726. int rc;
  727. /* Locate an available context */
  728. ctx = linda_alloc_ctx ( linda );
  729. if ( ctx < 0 ) {
  730. rc = ctx;
  731. goto err_alloc_ctx;
  732. }
  733. /* Set queue pair number based on context index */
  734. qp->qpn = linda_ctx_to_qpn ( ctx );
  735. /* Set work-queue private data pointers */
  736. ib_wq_set_drvdata ( &qp->send, &linda->send_wq[ctx] );
  737. ib_wq_set_drvdata ( &qp->recv, &linda->recv_wq[ctx] );
  738. /* Create receive work queue */
  739. if ( ( rc = linda_create_recv_wq ( linda, qp ) ) != 0 )
  740. goto err_create_recv_wq;
  741. /* Create send work queue */
  742. if ( ( rc = linda_create_send_wq ( linda, qp ) ) != 0 )
  743. goto err_create_send_wq;
  744. return 0;
  745. linda_destroy_send_wq ( linda, qp );
  746. err_create_send_wq:
  747. linda_destroy_recv_wq ( linda, qp );
  748. err_create_recv_wq:
  749. linda_free_ctx ( linda, ctx );
  750. err_alloc_ctx:
  751. return rc;
  752. }
  753. /**
  754. * Modify queue pair
  755. *
  756. * @v ibdev Infiniband device
  757. * @v qp Queue pair
  758. * @v mod_list Modification list
  759. * @ret rc Return status code
  760. */
  761. static int linda_modify_qp ( struct ib_device *ibdev,
  762. struct ib_queue_pair *qp,
  763. unsigned long mod_list __unused ) {
  764. struct linda *linda = ib_get_drvdata ( ibdev );
  765. /* Nothing to do; the hardware doesn't have a notion of queue
  766. * keys
  767. */
  768. DBGC ( linda, "Linda %p QPN %ld modified\n", linda, qp->qpn );
  769. return 0;
  770. }
  771. /**
  772. * Destroy queue pair
  773. *
  774. * @v ibdev Infiniband device
  775. * @v qp Queue pair
  776. */
  777. static void linda_destroy_qp ( struct ib_device *ibdev,
  778. struct ib_queue_pair *qp ) {
  779. struct linda *linda = ib_get_drvdata ( ibdev );
  780. linda_destroy_send_wq ( linda, qp );
  781. linda_destroy_recv_wq ( linda, qp );
  782. }
  783. /***************************************************************************
  784. *
  785. * Work request operations
  786. *
  787. ***************************************************************************
  788. */
  789. /**
  790. * Post send work queue entry
  791. *
  792. * @v ibdev Infiniband device
  793. * @v qp Queue pair
  794. * @v av Address vector
  795. * @v iobuf I/O buffer
  796. * @ret rc Return status code
  797. */
  798. static int linda_post_send ( struct ib_device *ibdev,
  799. struct ib_queue_pair *qp,
  800. struct ib_address_vector *av,
  801. struct io_buffer *iobuf ) {
  802. struct linda *linda = ib_get_drvdata ( ibdev );
  803. struct ib_work_queue *wq = &qp->send;
  804. struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  805. struct QIB_7220_SendPbc sendpbc;
  806. uint8_t header_buf[IB_MAX_HEADER_SIZE];
  807. struct io_buffer headers;
  808. unsigned int send_buf;
  809. unsigned long start_offset;
  810. unsigned long offset;
  811. size_t len;
  812. ssize_t frag_len;
  813. uint32_t *data;
  814. /* Allocate send buffer and calculate offset */
  815. send_buf = linda_alloc_send_buf ( linda );
  816. start_offset = offset = linda_send_buffer_offset ( linda, send_buf );
  817. /* Store I/O buffer and send buffer index */
  818. assert ( wq->iobufs[linda_wq->prod] == NULL );
  819. wq->iobufs[linda_wq->prod] = iobuf;
  820. linda_wq->send_buf[linda_wq->prod] = send_buf;
  821. /* Construct headers */
  822. iob_populate ( &headers, header_buf, 0, sizeof ( header_buf ) );
  823. iob_reserve ( &headers, sizeof ( header_buf ) );
  824. ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), av );
  825. /* Calculate packet length */
  826. len = ( ( sizeof ( sendpbc ) + iob_len ( &headers ) +
  827. iob_len ( iobuf ) + 3 ) & ~3 );
  828. /* Construct send per-buffer control word */
  829. memset ( &sendpbc, 0, sizeof ( sendpbc ) );
  830. BIT_FILL_2 ( &sendpbc,
  831. LengthP1_toibc, ( ( len >> 2 ) - 1 ),
  832. VL15, 1 );
  833. /* Write SendPbc */
  834. DBG_DISABLE ( DBGLVL_IO );
  835. linda_writeq ( linda, &sendpbc, offset );
  836. offset += sizeof ( sendpbc );
  837. /* Write headers */
  838. for ( data = headers.data, frag_len = iob_len ( &headers ) ;
  839. frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
  840. linda_writel ( linda, *data, offset );
  841. }
  842. /* Write data */
  843. for ( data = iobuf->data, frag_len = iob_len ( iobuf ) ;
  844. frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
  845. linda_writel ( linda, *data, offset );
  846. }
  847. DBG_ENABLE ( DBGLVL_IO );
  848. assert ( ( start_offset + len ) == offset );
  849. DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) posted [%lx,%lx)\n",
  850. linda, qp->qpn, send_buf, linda_wq->prod,
  851. start_offset, offset );
  852. /* Increment producer counter */
  853. linda_wq->prod = ( ( linda_wq->prod + 1 ) & ( wq->num_wqes - 1 ) );
  854. return 0;
  855. }
  856. /**
  857. * Complete send work queue entry
  858. *
  859. * @v ibdev Infiniband device
  860. * @v qp Queue pair
  861. * @v wqe_idx Work queue entry index
  862. */
  863. static void linda_complete_send ( struct ib_device *ibdev,
  864. struct ib_queue_pair *qp,
  865. unsigned int wqe_idx ) {
  866. struct linda *linda = ib_get_drvdata ( ibdev );
  867. struct ib_work_queue *wq = &qp->send;
  868. struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  869. struct io_buffer *iobuf;
  870. unsigned int send_buf;
  871. /* Parse completion */
  872. send_buf = linda_wq->send_buf[wqe_idx];
  873. DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) complete\n",
  874. linda, qp->qpn, send_buf, wqe_idx );
  875. /* Complete work queue entry */
  876. iobuf = wq->iobufs[wqe_idx];
  877. assert ( iobuf != NULL );
  878. ib_complete_send ( ibdev, qp, iobuf, 0 );
  879. wq->iobufs[wqe_idx] = NULL;
  880. /* Free send buffer */
  881. linda_free_send_buf ( linda, send_buf );
  882. }
  883. /**
  884. * Poll send work queue
  885. *
  886. * @v ibdev Infiniband device
  887. * @v qp Queue pair
  888. */
  889. static void linda_poll_send_wq ( struct ib_device *ibdev,
  890. struct ib_queue_pair *qp ) {
  891. struct linda *linda = ib_get_drvdata ( ibdev );
  892. struct ib_work_queue *wq = &qp->send;
  893. struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  894. unsigned int send_buf;
  895. /* Look for completions */
  896. while ( wq->fill ) {
  897. /* Check to see if send buffer has completed */
  898. send_buf = linda_wq->send_buf[linda_wq->cons];
  899. if ( linda_send_buf_in_use ( linda, send_buf ) )
  900. break;
  901. /* Complete this buffer */
  902. linda_complete_send ( ibdev, qp, linda_wq->cons );
  903. /* Increment consumer counter */
  904. linda_wq->cons = ( ( linda_wq->cons + 1 ) &
  905. ( wq->num_wqes - 1 ) );
  906. }
  907. }
  908. /**
  909. * Post receive work queue entry
  910. *
  911. * @v ibdev Infiniband device
  912. * @v qp Queue pair
  913. * @v iobuf I/O buffer
  914. * @ret rc Return status code
  915. */
  916. static int linda_post_recv ( struct ib_device *ibdev,
  917. struct ib_queue_pair *qp,
  918. struct io_buffer *iobuf ) {
  919. struct linda *linda = ib_get_drvdata ( ibdev );
  920. struct ib_work_queue *wq = &qp->recv;
  921. struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  922. struct QIB_7220_RcvEgr rcvegr;
  923. struct QIB_7220_scalar rcvegrindexhead;
  924. unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
  925. physaddr_t addr;
  926. size_t len;
  927. unsigned int wqe_idx;
  928. unsigned int bufsize;
  929. /* Sanity checks */
  930. addr = virt_to_bus ( iobuf->data );
  931. len = iob_tailroom ( iobuf );
  932. if ( addr & ( LINDA_EAGER_BUFFER_ALIGN - 1 ) ) {
  933. DBGC ( linda, "Linda %p QPN %ld misaligned RX buffer "
  934. "(%08lx)\n", linda, qp->qpn, addr );
  935. return -EINVAL;
  936. }
  937. if ( len != LINDA_RECV_PAYLOAD_SIZE ) {
  938. DBGC ( linda, "Linda %p QPN %ld wrong RX buffer size (%zd)\n",
  939. linda, qp->qpn, len );
  940. return -EINVAL;
  941. }
  942. /* Calculate eager producer index and WQE index */
  943. wqe_idx = ( linda_wq->eager_prod & ( wq->num_wqes - 1 ) );
  944. assert ( wq->iobufs[wqe_idx] == NULL );
  945. /* Store I/O buffer */
  946. wq->iobufs[wqe_idx] = iobuf;
  947. /* Calculate buffer size */
  948. switch ( LINDA_RECV_PAYLOAD_SIZE ) {
  949. case 2048: bufsize = LINDA_EAGER_BUFFER_2K; break;
  950. case 4096: bufsize = LINDA_EAGER_BUFFER_4K; break;
  951. case 8192: bufsize = LINDA_EAGER_BUFFER_8K; break;
  952. case 16384: bufsize = LINDA_EAGER_BUFFER_16K; break;
  953. case 32768: bufsize = LINDA_EAGER_BUFFER_32K; break;
  954. case 65536: bufsize = LINDA_EAGER_BUFFER_64K; break;
  955. default: linker_assert ( 0, invalid_rx_payload_size );
  956. bufsize = LINDA_EAGER_BUFFER_NONE;
  957. }
  958. /* Post eager buffer */
  959. memset ( &rcvegr, 0, sizeof ( rcvegr ) );
  960. BIT_FILL_2 ( &rcvegr,
  961. Addr, ( addr >> 11 ),
  962. BufSize, bufsize );
  963. linda_writeq_array8b ( linda, &rcvegr,
  964. linda_wq->eager_array, linda_wq->eager_prod );
  965. DBGC2 ( linda, "Linda %p QPN %ld RX egr %d(%d) posted [%lx,%lx)\n",
  966. linda, qp->qpn, linda_wq->eager_prod, wqe_idx,
  967. addr, ( addr + len ) );
  968. /* Increment producer index */
  969. linda_wq->eager_prod = ( ( linda_wq->eager_prod + 1 ) &
  970. ( linda_wq->eager_entries - 1 ) );
  971. /* Update head index */
  972. memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
  973. BIT_FILL_1 ( &rcvegrindexhead,
  974. Value, ( ( linda_wq->eager_prod + 1 ) &
  975. ( linda_wq->eager_entries - 1 ) ) );
  976. linda_writeq_array64k ( linda, &rcvegrindexhead,
  977. QIB_7220_RcvEgrIndexHead0_offset, ctx );
  978. return 0;
  979. }
  980. /**
  981. * Complete receive work queue entry
  982. *
  983. * @v ibdev Infiniband device
  984. * @v qp Queue pair
  985. * @v header_offs Header offset
  986. */
  987. static void linda_complete_recv ( struct ib_device *ibdev,
  988. struct ib_queue_pair *qp,
  989. unsigned int header_offs ) {
  990. struct linda *linda = ib_get_drvdata ( ibdev );
  991. struct ib_work_queue *wq = &qp->recv;
  992. struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  993. struct QIB_7220_RcvHdrFlags *rcvhdrflags;
  994. struct QIB_7220_RcvEgr rcvegr;
  995. struct io_buffer headers;
  996. struct io_buffer *iobuf;
  997. struct ib_queue_pair *intended_qp;
  998. struct ib_address_vector av;
  999. unsigned int rcvtype;
  1000. unsigned int pktlen;
  1001. unsigned int egrindex;
  1002. unsigned int useegrbfr;
  1003. unsigned int iberr, mkerr, tiderr, khdrerr, mtuerr;
  1004. unsigned int lenerr, parityerr, vcrcerr, icrcerr;
  1005. unsigned int err;
  1006. unsigned int hdrqoffset;
  1007. unsigned int header_len;
  1008. unsigned int padded_payload_len;
  1009. unsigned int wqe_idx;
  1010. size_t payload_len;
  1011. int qp0;
  1012. int rc;
  1013. /* RcvHdrFlags are at the end of the header entry */
  1014. rcvhdrflags = ( linda_wq->header + header_offs +
  1015. LINDA_RECV_HEADER_SIZE - sizeof ( *rcvhdrflags ) );
  1016. rcvtype = BIT_GET ( rcvhdrflags, RcvType );
  1017. pktlen = ( BIT_GET ( rcvhdrflags, PktLen ) << 2 );
  1018. egrindex = BIT_GET ( rcvhdrflags, EgrIndex );
  1019. useegrbfr = BIT_GET ( rcvhdrflags, UseEgrBfr );
  1020. hdrqoffset = ( BIT_GET ( rcvhdrflags, HdrqOffset ) << 2 );
  1021. iberr = BIT_GET ( rcvhdrflags, IBErr );
  1022. mkerr = BIT_GET ( rcvhdrflags, MKErr );
  1023. tiderr = BIT_GET ( rcvhdrflags, TIDErr );
  1024. khdrerr = BIT_GET ( rcvhdrflags, KHdrErr );
  1025. mtuerr = BIT_GET ( rcvhdrflags, MTUErr );
  1026. lenerr = BIT_GET ( rcvhdrflags, LenErr );
  1027. parityerr = BIT_GET ( rcvhdrflags, ParityErr );
  1028. vcrcerr = BIT_GET ( rcvhdrflags, VCRCErr );
  1029. icrcerr = BIT_GET ( rcvhdrflags, ICRCErr );
  1030. header_len = ( LINDA_RECV_HEADER_SIZE - hdrqoffset -
  1031. sizeof ( *rcvhdrflags ) );
  1032. padded_payload_len = ( pktlen - header_len - 4 /* ICRC */ );
  1033. err = ( iberr | mkerr | tiderr | khdrerr | mtuerr |
  1034. lenerr | parityerr | vcrcerr | icrcerr );
  1035. /* IB header is placed immediately before RcvHdrFlags */
  1036. iob_populate ( &headers, ( ( ( void * ) rcvhdrflags ) - header_len ),
  1037. header_len, header_len );
  1038. /* Dump diagnostic information */
  1039. if ( err || ( ! useegrbfr ) ) {
  1040. DBGC ( linda, "Linda %p QPN %ld RX egr %d%s hdr %d type %d "
  1041. "len %d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", linda,
  1042. qp->qpn, egrindex, ( useegrbfr ? "" : "(unused)" ),
  1043. ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
  1044. pktlen, header_len, padded_payload_len,
  1045. ( err ? " [Err" : "" ), ( iberr ? " IB" : "" ),
  1046. ( mkerr ? " MK" : "" ), ( tiderr ? " TID" : "" ),
  1047. ( khdrerr ? " KHdr" : "" ), ( mtuerr ? " MTU" : "" ),
  1048. ( lenerr ? " Len" : "" ), ( parityerr ? " Parity" : ""),
  1049. ( vcrcerr ? " VCRC" : "" ), ( icrcerr ? " ICRC" : "" ),
  1050. ( err ? "]" : "" ) );
  1051. } else {
  1052. DBGC2 ( linda, "Linda %p QPN %ld RX egr %d hdr %d type %d "
  1053. "len %d(%d+%d+4)\n", linda, qp->qpn, egrindex,
  1054. ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
  1055. pktlen, header_len, padded_payload_len );
  1056. }
  1057. DBGCP_HDA ( linda, hdrqoffset, headers.data,
  1058. ( header_len + sizeof ( *rcvhdrflags ) ) );
  1059. /* Parse header to generate address vector */
  1060. qp0 = ( qp->qpn == 0 );
  1061. intended_qp = NULL;
  1062. if ( ( rc = ib_pull ( ibdev, &headers, ( qp0 ? &intended_qp : NULL ),
  1063. &payload_len, &av ) ) != 0 ) {
  1064. DBGC ( linda, "Linda %p could not parse headers: %s\n",
  1065. linda, strerror ( rc ) );
  1066. err = 1;
  1067. }
  1068. if ( ! intended_qp )
  1069. intended_qp = qp;
  1070. /* Complete this buffer and any skipped buffers. Note that
  1071. * when the hardware runs out of buffers, it will repeatedly
  1072. * report the same buffer (the tail) as a TID error, and that
  1073. * it also has a habit of sometimes skipping over several
  1074. * buffers at once.
  1075. */
  1076. while ( 1 ) {
  1077. /* If we have caught up to the producer counter, stop.
  1078. * This will happen when the hardware first runs out
  1079. * of buffers and starts reporting TID errors against
  1080. * the eager buffer it wants to use next.
  1081. */
  1082. if ( linda_wq->eager_cons == linda_wq->eager_prod )
  1083. break;
  1084. /* If we have caught up to where we should be after
  1085. * completing this egrindex, stop. We phrase the test
  1086. * this way to avoid completing the entire ring when
  1087. * we receive the same egrindex twice in a row.
  1088. */
  1089. if ( ( linda_wq->eager_cons ==
  1090. ( ( egrindex + 1 ) & ( linda_wq->eager_entries - 1 ) )))
  1091. break;
  1092. /* Identify work queue entry and corresponding I/O
  1093. * buffer.
  1094. */
  1095. wqe_idx = ( linda_wq->eager_cons & ( wq->num_wqes - 1 ) );
  1096. iobuf = wq->iobufs[wqe_idx];
  1097. assert ( iobuf != NULL );
  1098. wq->iobufs[wqe_idx] = NULL;
  1099. /* Complete the eager buffer */
  1100. if ( linda_wq->eager_cons == egrindex ) {
  1101. /* Completing the eager buffer described in
  1102. * this header entry.
  1103. */
  1104. iob_put ( iobuf, payload_len );
  1105. rc = ( err ? -EIO : ( useegrbfr ? 0 : -ECANCELED ) );
  1106. /* Redirect to target QP if necessary */
  1107. if ( qp != intended_qp ) {
  1108. DBGC ( linda, "Linda %p redirecting QPN %ld "
  1109. "=> %ld\n",
  1110. linda, qp->qpn, intended_qp->qpn );
  1111. /* Compensate for incorrect fill levels */
  1112. qp->recv.fill--;
  1113. intended_qp->recv.fill++;
  1114. }
  1115. ib_complete_recv ( ibdev, intended_qp, &av, iobuf, rc);
  1116. } else {
  1117. /* Completing on a skipped-over eager buffer */
  1118. ib_complete_recv ( ibdev, qp, &av, iobuf, -ECANCELED );
  1119. }
  1120. /* Clear eager buffer */
  1121. memset ( &rcvegr, 0, sizeof ( rcvegr ) );
  1122. linda_writeq_array8b ( linda, &rcvegr, linda_wq->eager_array,
  1123. linda_wq->eager_cons );
  1124. /* Increment consumer index */
  1125. linda_wq->eager_cons = ( ( linda_wq->eager_cons + 1 ) &
  1126. ( linda_wq->eager_entries - 1 ) );
  1127. }
  1128. }
  1129. /**
  1130. * Poll receive work queue
  1131. *
  1132. * @v ibdev Infiniband device
  1133. * @v qp Queue pair
  1134. */
  1135. static void linda_poll_recv_wq ( struct ib_device *ibdev,
  1136. struct ib_queue_pair *qp ) {
  1137. struct linda *linda = ib_get_drvdata ( ibdev );
  1138. struct ib_work_queue *wq = &qp->recv;
  1139. struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
  1140. struct QIB_7220_RcvHdrHead0 rcvhdrhead;
  1141. unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
  1142. unsigned int header_prod;
  1143. /* Check for received packets */
  1144. header_prod = ( BIT_GET ( &linda_wq->header_prod, Value ) << 2 );
  1145. if ( header_prod == linda_wq->header_cons )
  1146. return;
  1147. /* Process all received packets */
  1148. while ( linda_wq->header_cons != header_prod ) {
  1149. /* Complete the receive */
  1150. linda_complete_recv ( ibdev, qp, linda_wq->header_cons );
  1151. /* Increment the consumer offset */
  1152. linda_wq->header_cons += LINDA_RECV_HEADER_SIZE;
  1153. linda_wq->header_cons %= LINDA_RECV_HEADERS_SIZE;
  1154. }
  1155. /* Update consumer offset */
  1156. memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
  1157. BIT_FILL_2 ( &rcvhdrhead,
  1158. RcvHeadPointer, ( linda_wq->header_cons >> 2 ),
  1159. counter, 1 );
  1160. linda_writeq_array64k ( linda, &rcvhdrhead,
  1161. QIB_7220_RcvHdrHead0_offset, ctx );
  1162. }
  1163. /**
  1164. * Poll completion queue
  1165. *
  1166. * @v ibdev Infiniband device
  1167. * @v cq Completion queue
  1168. */
  1169. static void linda_poll_cq ( struct ib_device *ibdev,
  1170. struct ib_completion_queue *cq ) {
  1171. struct ib_work_queue *wq;
  1172. /* Poll associated send and receive queues */
  1173. list_for_each_entry ( wq, &cq->work_queues, list ) {
  1174. if ( wq->is_send ) {
  1175. linda_poll_send_wq ( ibdev, wq->qp );
  1176. } else {
  1177. linda_poll_recv_wq ( ibdev, wq->qp );
  1178. }
  1179. }
  1180. }
  1181. /***************************************************************************
  1182. *
  1183. * Event queues
  1184. *
  1185. ***************************************************************************
  1186. */
  1187. /**
  1188. * Poll event queue
  1189. *
  1190. * @v ibdev Infiniband device
  1191. */
  1192. static void linda_poll_eq ( struct ib_device *ibdev ) {
  1193. struct linda *linda = ib_get_drvdata ( ibdev );
  1194. struct QIB_7220_ErrStatus errstatus;
  1195. struct QIB_7220_ErrClear errclear;
  1196. /* Check for link status changes */
  1197. DBG_DISABLE ( DBGLVL_IO );
  1198. linda_readq ( linda, &errstatus, QIB_7220_ErrStatus_offset );
  1199. DBG_ENABLE ( DBGLVL_IO );
  1200. if ( BIT_GET ( &errstatus, IBStatusChanged ) ) {
  1201. linda_link_state_changed ( ibdev );
  1202. memset ( &errclear, 0, sizeof ( errclear ) );
  1203. BIT_FILL_1 ( &errclear, IBStatusChangedClear, 1 );
  1204. linda_writeq ( linda, &errclear, QIB_7220_ErrClear_offset );
  1205. }
  1206. }
  1207. /***************************************************************************
  1208. *
  1209. * Infiniband link-layer operations
  1210. *
  1211. ***************************************************************************
  1212. */
  1213. /**
  1214. * Initialise Infiniband link
  1215. *
  1216. * @v ibdev Infiniband device
  1217. * @ret rc Return status code
  1218. */
  1219. static int linda_open ( struct ib_device *ibdev ) {
  1220. struct linda *linda = ib_get_drvdata ( ibdev );
  1221. struct QIB_7220_Control control;
  1222. /* Disable link */
  1223. linda_readq ( linda, &control, QIB_7220_Control_offset );
  1224. BIT_SET ( &control, LinkEn, 1 );
  1225. linda_writeq ( linda, &control, QIB_7220_Control_offset );
  1226. return 0;
  1227. }
  1228. /**
  1229. * Close Infiniband link
  1230. *
  1231. * @v ibdev Infiniband device
  1232. */
  1233. static void linda_close ( struct ib_device *ibdev ) {
  1234. struct linda *linda = ib_get_drvdata ( ibdev );
  1235. struct QIB_7220_Control control;
  1236. /* Disable link */
  1237. linda_readq ( linda, &control, QIB_7220_Control_offset );
  1238. BIT_SET ( &control, LinkEn, 0 );
  1239. linda_writeq ( linda, &control, QIB_7220_Control_offset );
  1240. }
  1241. /***************************************************************************
  1242. *
  1243. * Multicast group operations
  1244. *
  1245. ***************************************************************************
  1246. */
  1247. /**
  1248. * Attach to multicast group
  1249. *
  1250. * @v ibdev Infiniband device
  1251. * @v qp Queue pair
  1252. * @v gid Multicast GID
  1253. * @ret rc Return status code
  1254. */
  1255. static int linda_mcast_attach ( struct ib_device *ibdev,
  1256. struct ib_queue_pair *qp,
  1257. struct ib_gid *gid ) {
  1258. struct linda *linda = ib_get_drvdata ( ibdev );
  1259. ( void ) linda;
  1260. ( void ) qp;
  1261. ( void ) gid;
  1262. return 0;
  1263. }
  1264. /**
  1265. * Detach from multicast group
  1266. *
  1267. * @v ibdev Infiniband device
  1268. * @v qp Queue pair
  1269. * @v gid Multicast GID
  1270. */
  1271. static void linda_mcast_detach ( struct ib_device *ibdev,
  1272. struct ib_queue_pair *qp,
  1273. struct ib_gid *gid ) {
  1274. struct linda *linda = ib_get_drvdata ( ibdev );
  1275. ( void ) linda;
  1276. ( void ) qp;
  1277. ( void ) gid;
  1278. }
  1279. /** Linda Infiniband operations */
  1280. static struct ib_device_operations linda_ib_operations = {
  1281. .create_cq = linda_create_cq,
  1282. .destroy_cq = linda_destroy_cq,
  1283. .create_qp = linda_create_qp,
  1284. .modify_qp = linda_modify_qp,
  1285. .destroy_qp = linda_destroy_qp,
  1286. .post_send = linda_post_send,
  1287. .post_recv = linda_post_recv,
  1288. .poll_cq = linda_poll_cq,
  1289. .poll_eq = linda_poll_eq,
  1290. .open = linda_open,
  1291. .close = linda_close,
  1292. .mcast_attach = linda_mcast_attach,
  1293. .mcast_detach = linda_mcast_detach,
  1294. .set_port_info = linda_set_port_info,
  1295. };
  1296. /***************************************************************************
  1297. *
  1298. * I2C bus operations
  1299. *
  1300. ***************************************************************************
  1301. */
  1302. /** Linda I2C bit to GPIO mappings */
  1303. static unsigned int linda_i2c_bits[] = {
  1304. [I2C_BIT_SCL] = ( 1 << LINDA_GPIO_SCL ),
  1305. [I2C_BIT_SDA] = ( 1 << LINDA_GPIO_SDA ),
  1306. };
  1307. /**
  1308. * Read Linda I2C line status
  1309. *
  1310. * @v basher Bit-bashing interface
  1311. * @v bit_id Bit number
  1312. * @ret zero Input is a logic 0
  1313. * @ret non-zero Input is a logic 1
  1314. */
  1315. static int linda_i2c_read_bit ( struct bit_basher *basher,
  1316. unsigned int bit_id ) {
  1317. struct linda *linda =
  1318. container_of ( basher, struct linda, i2c.basher );
  1319. struct QIB_7220_EXTStatus extstatus;
  1320. unsigned int status;
  1321. DBG_DISABLE ( DBGLVL_IO );
  1322. linda_readq ( linda, &extstatus, QIB_7220_EXTStatus_offset );
  1323. status = ( BIT_GET ( &extstatus, GPIOIn ) & linda_i2c_bits[bit_id] );
  1324. DBG_ENABLE ( DBGLVL_IO );
  1325. return status;
  1326. }
  1327. /**
  1328. * Write Linda I2C line status
  1329. *
  1330. * @v basher Bit-bashing interface
  1331. * @v bit_id Bit number
  1332. * @v data Value to write
  1333. */
  1334. static void linda_i2c_write_bit ( struct bit_basher *basher,
  1335. unsigned int bit_id, unsigned long data ) {
  1336. struct linda *linda =
  1337. container_of ( basher, struct linda, i2c.basher );
  1338. struct QIB_7220_EXTCtrl extctrl;
  1339. struct QIB_7220_GPIO gpioout;
  1340. unsigned int bit = linda_i2c_bits[bit_id];
  1341. unsigned int outputs = 0;
  1342. unsigned int output_enables = 0;
  1343. DBG_DISABLE ( DBGLVL_IO );
  1344. /* Read current GPIO mask and outputs */
  1345. linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
  1346. linda_readq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
  1347. /* Update outputs and output enables. I2C lines are tied
  1348. * high, so we always set the output to 0 and use the output
  1349. * enable to control the line.
  1350. */
  1351. output_enables = BIT_GET ( &extctrl, GPIOOe );
  1352. output_enables = ( ( output_enables & ~bit ) | ( ~data & bit ) );
  1353. outputs = BIT_GET ( &gpioout, GPIO );
  1354. outputs = ( outputs & ~bit );
  1355. BIT_SET ( &extctrl, GPIOOe, output_enables );
  1356. BIT_SET ( &gpioout, GPIO, outputs );
  1357. /* Write the output enable first; that way we avoid logic
  1358. * hazards.
  1359. */
  1360. linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
  1361. linda_writeq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
  1362. mb();
  1363. DBG_ENABLE ( DBGLVL_IO );
  1364. }
  1365. /** Linda I2C bit-bashing interface operations */
  1366. static struct bit_basher_operations linda_i2c_basher_ops = {
  1367. .read = linda_i2c_read_bit,
  1368. .write = linda_i2c_write_bit,
  1369. };
  1370. /**
  1371. * Initialise Linda I2C subsystem
  1372. *
  1373. * @v linda Linda device
  1374. * @ret rc Return status code
  1375. */
  1376. static int linda_init_i2c ( struct linda *linda ) {
  1377. static int try_eeprom_address[] = { 0x51, 0x50 };
  1378. unsigned int i;
  1379. int rc;
  1380. /* Initialise bus */
  1381. if ( ( rc = init_i2c_bit_basher ( &linda->i2c,
  1382. &linda_i2c_basher_ops ) ) != 0 ) {
  1383. DBGC ( linda, "Linda %p could not initialise I2C bus: %s\n",
  1384. linda, strerror ( rc ) );
  1385. return rc;
  1386. }
  1387. /* Probe for devices */
  1388. for ( i = 0 ; i < ( sizeof ( try_eeprom_address ) /
  1389. sizeof ( try_eeprom_address[0] ) ) ; i++ ) {
  1390. init_i2c_eeprom ( &linda->eeprom, try_eeprom_address[i] );
  1391. if ( ( rc = i2c_check_presence ( &linda->i2c.i2c,
  1392. &linda->eeprom ) ) == 0 ) {
  1393. DBGC2 ( linda, "Linda %p found EEPROM at %02x\n",
  1394. linda, try_eeprom_address[i] );
  1395. return 0;
  1396. }
  1397. }
  1398. DBGC ( linda, "Linda %p could not find EEPROM\n", linda );
  1399. return -ENODEV;
  1400. }
  1401. /**
  1402. * Read EEPROM parameters
  1403. *
  1404. * @v linda Linda device
  1405. * @v guid GUID to fill in
  1406. * @ret rc Return status code
  1407. */
  1408. static int linda_read_eeprom ( struct linda *linda,
  1409. struct ib_gid_half *guid ) {
  1410. struct i2c_interface *i2c = &linda->i2c.i2c;
  1411. int rc;
  1412. /* Read GUID */
  1413. if ( ( rc = i2c->read ( i2c, &linda->eeprom, LINDA_EEPROM_GUID_OFFSET,
  1414. guid->bytes, sizeof ( *guid ) ) ) != 0 ) {
  1415. DBGC ( linda, "Linda %p could not read GUID: %s\n",
  1416. linda, strerror ( rc ) );
  1417. return rc;
  1418. }
  1419. DBGC2 ( linda, "Linda %p has GUID %02x:%02x:%02x:%02x:%02x:%02x:"
  1420. "%02x:%02x\n", linda, guid->bytes[0], guid->bytes[1],
  1421. guid->bytes[2], guid->bytes[3], guid->bytes[4],
  1422. guid->bytes[5], guid->bytes[6], guid->bytes[7] );
  1423. /* Read serial number (debug only) */
  1424. if ( DBG_LOG ) {
  1425. uint8_t serial[LINDA_EEPROM_SERIAL_SIZE + 1];
  1426. serial[ sizeof ( serial ) - 1 ] = '\0';
  1427. if ( ( rc = i2c->read ( i2c, &linda->eeprom,
  1428. LINDA_EEPROM_SERIAL_OFFSET, serial,
  1429. ( sizeof ( serial ) - 1 ) ) ) != 0 ) {
  1430. DBGC ( linda, "Linda %p could not read serial: %s\n",
  1431. linda, strerror ( rc ) );
  1432. return rc;
  1433. }
  1434. DBGC2 ( linda, "Linda %p has serial number \"%s\"\n",
  1435. linda, serial );
  1436. }
  1437. return 0;
  1438. }
  1439. /***************************************************************************
  1440. *
  1441. * External parallel bus access
  1442. *
  1443. ***************************************************************************
  1444. */
  1445. /**
  1446. * Request ownership of the IB external parallel bus
  1447. *
  1448. * @v linda Linda device
  1449. * @ret rc Return status code
  1450. */
  1451. static int linda_ib_epb_request ( struct linda *linda ) {
  1452. struct QIB_7220_ibsd_epb_access_ctrl access;
  1453. unsigned int i;
  1454. /* Request ownership */
  1455. memset ( &access, 0, sizeof ( access ) );
  1456. BIT_FILL_1 ( &access, sw_ib_epb_req, 1 );
  1457. linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
  1458. /* Wait for ownership to be granted */
  1459. for ( i = 0 ; i < LINDA_EPB_REQUEST_MAX_WAIT_US ; i++ ) {
  1460. linda_readq ( linda, &access,
  1461. QIB_7220_ibsd_epb_access_ctrl_offset );
  1462. if ( BIT_GET ( &access, sw_ib_epb_req_granted ) )
  1463. return 0;
  1464. udelay ( 1 );
  1465. }
  1466. DBGC ( linda, "Linda %p timed out waiting for IB EPB request\n",
  1467. linda );
  1468. return -ETIMEDOUT;
  1469. }
  1470. /**
  1471. * Wait for IB external parallel bus transaction to complete
  1472. *
  1473. * @v linda Linda device
  1474. * @v xact Buffer to hold transaction result
  1475. * @ret rc Return status code
  1476. */
  1477. static int linda_ib_epb_wait ( struct linda *linda,
  1478. struct QIB_7220_ibsd_epb_transaction_reg *xact ) {
  1479. unsigned int i;
  1480. /* Discard first read to allow for signals crossing clock domains */
  1481. linda_readq ( linda, xact, QIB_7220_ibsd_epb_transaction_reg_offset );
  1482. for ( i = 0 ; i < LINDA_EPB_XACT_MAX_WAIT_US ; i++ ) {
  1483. linda_readq ( linda, xact,
  1484. QIB_7220_ibsd_epb_transaction_reg_offset );
  1485. if ( BIT_GET ( xact, ib_epb_rdy ) ) {
  1486. if ( BIT_GET ( xact, ib_epb_req_error ) ) {
  1487. DBGC ( linda, "Linda %p EPB transaction "
  1488. "failed\n", linda );
  1489. return -EIO;
  1490. } else {
  1491. return 0;
  1492. }
  1493. }
  1494. udelay ( 1 );
  1495. }
  1496. DBGC ( linda, "Linda %p timed out waiting for IB EPB transaction\n",
  1497. linda );
  1498. return -ETIMEDOUT;
  1499. }
  1500. /**
  1501. * Release ownership of the IB external parallel bus
  1502. *
  1503. * @v linda Linda device
  1504. */
  1505. static void linda_ib_epb_release ( struct linda *linda ) {
  1506. struct QIB_7220_ibsd_epb_access_ctrl access;
  1507. memset ( &access, 0, sizeof ( access ) );
  1508. BIT_FILL_1 ( &access, sw_ib_epb_req, 0 );
  1509. linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
  1510. }
  1511. /**
  1512. * Read data via IB external parallel bus
  1513. *
  1514. * @v linda Linda device
  1515. * @v location EPB location
  1516. * @ret data Data read, or negative error
  1517. *
  1518. * You must have already acquired ownership of the IB external
  1519. * parallel bus.
  1520. */
  1521. static int linda_ib_epb_read ( struct linda *linda, unsigned int location ) {
  1522. struct QIB_7220_ibsd_epb_transaction_reg xact;
  1523. unsigned int data;
  1524. int rc;
  1525. /* Ensure no transaction is currently in progress */
  1526. if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
  1527. return rc;
  1528. /* Process data */
  1529. memset ( &xact, 0, sizeof ( xact ) );
  1530. BIT_FILL_3 ( &xact,
  1531. ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
  1532. ib_epb_read_write, LINDA_EPB_READ,
  1533. ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
  1534. linda_writeq ( linda, &xact,
  1535. QIB_7220_ibsd_epb_transaction_reg_offset );
  1536. /* Wait for transaction to complete */
  1537. if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
  1538. return rc;
  1539. data = BIT_GET ( &xact, ib_epb_data );
  1540. return data;
  1541. }
  1542. /**
  1543. * Write data via IB external parallel bus
  1544. *
  1545. * @v linda Linda device
  1546. * @v location EPB location
  1547. * @v data Data to write
  1548. * @ret rc Return status code
  1549. *
  1550. * You must have already acquired ownership of the IB external
  1551. * parallel bus.
  1552. */
  1553. static int linda_ib_epb_write ( struct linda *linda, unsigned int location,
  1554. unsigned int data ) {
  1555. struct QIB_7220_ibsd_epb_transaction_reg xact;
  1556. int rc;
  1557. /* Ensure no transaction is currently in progress */
  1558. if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
  1559. return rc;
  1560. /* Process data */
  1561. memset ( &xact, 0, sizeof ( xact ) );
  1562. BIT_FILL_4 ( &xact,
  1563. ib_epb_data, data,
  1564. ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
  1565. ib_epb_read_write, LINDA_EPB_WRITE,
  1566. ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
  1567. linda_writeq ( linda, &xact,
  1568. QIB_7220_ibsd_epb_transaction_reg_offset );
  1569. /* Wait for transaction to complete */
  1570. if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
  1571. return rc;
  1572. return 0;
  1573. }
  1574. /**
  1575. * Read/modify/write EPB register
  1576. *
  1577. * @v linda Linda device
  1578. * @v cs Chip select
  1579. * @v channel Channel
  1580. * @v element Element
  1581. * @v reg Register
  1582. * @v value Value to set
  1583. * @v mask Mask to apply to old value
  1584. * @ret rc Return status code
  1585. */
  1586. static int linda_ib_epb_mod_reg ( struct linda *linda, unsigned int cs,
  1587. unsigned int channel, unsigned int element,
  1588. unsigned int reg, unsigned int value,
  1589. unsigned int mask ) {
  1590. unsigned int location;
  1591. int old_value;
  1592. int rc;
  1593. DBG_DISABLE ( DBGLVL_IO );
  1594. /* Sanity check */
  1595. assert ( ( value & mask ) == value );
  1596. /* Acquire bus ownership */
  1597. if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
  1598. goto out;
  1599. /* Read existing value, if necessary */
  1600. location = LINDA_EPB_LOC ( cs, channel, element, reg );
  1601. if ( (~mask) & 0xff ) {
  1602. old_value = linda_ib_epb_read ( linda, location );
  1603. if ( old_value < 0 ) {
  1604. rc = old_value;
  1605. goto out_release;
  1606. }
  1607. } else {
  1608. old_value = 0;
  1609. }
  1610. /* Update value */
  1611. value = ( ( old_value & ~mask ) | value );
  1612. DBGCP ( linda, "Linda %p CS %d EPB(%d,%d,%#02x) %#02x => %#02x\n",
  1613. linda, cs, channel, element, reg, old_value, value );
  1614. if ( ( rc = linda_ib_epb_write ( linda, location, value ) ) != 0 )
  1615. goto out_release;
  1616. out_release:
  1617. /* Release bus */
  1618. linda_ib_epb_release ( linda );
  1619. out:
  1620. DBG_ENABLE ( DBGLVL_IO );
  1621. return rc;
  1622. }
  1623. /**
  1624. * Transfer data to/from microcontroller RAM
  1625. *
  1626. * @v linda Linda device
  1627. * @v address Starting address
  1628. * @v write Data to write, or NULL
  1629. * @v read Data to read, or NULL
  1630. * @v len Length of data
  1631. * @ret rc Return status code
  1632. */
  1633. static int linda_ib_epb_ram_xfer ( struct linda *linda, unsigned int address,
  1634. const void *write, void *read,
  1635. size_t len ) {
  1636. unsigned int control;
  1637. unsigned int address_hi;
  1638. unsigned int address_lo;
  1639. int data;
  1640. int rc;
  1641. DBG_DISABLE ( DBGLVL_IO );
  1642. assert ( ! ( write && read ) );
  1643. assert ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
  1644. assert ( ( len % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
  1645. /* Acquire bus ownership */
  1646. if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
  1647. goto out;
  1648. /* Process data */
  1649. while ( len ) {
  1650. /* Reset the address for each new chunk */
  1651. if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
  1652. /* Write the control register */
  1653. control = ( read ? LINDA_EPB_UC_CTL_READ :
  1654. LINDA_EPB_UC_CTL_WRITE );
  1655. if ( ( rc = linda_ib_epb_write ( linda,
  1656. LINDA_EPB_UC_CTL,
  1657. control ) ) != 0 )
  1658. break;
  1659. /* Write the address registers */
  1660. address_hi = ( address >> 8 );
  1661. if ( ( rc = linda_ib_epb_write ( linda,
  1662. LINDA_EPB_UC_ADDR_HI,
  1663. address_hi ) ) != 0 )
  1664. break;
  1665. address_lo = ( address & 0xff );
  1666. if ( ( rc = linda_ib_epb_write ( linda,
  1667. LINDA_EPB_UC_ADDR_LO,
  1668. address_lo ) ) != 0 )
  1669. break;
  1670. }
  1671. /* Read or write the data */
  1672. if ( read ) {
  1673. data = linda_ib_epb_read ( linda, LINDA_EPB_UC_DATA );
  1674. if ( data < 0 ) {
  1675. rc = data;
  1676. break;
  1677. }
  1678. *( ( uint8_t * ) read++ ) = data;
  1679. } else {
  1680. data = *( ( uint8_t * ) write++ );
  1681. if ( ( rc = linda_ib_epb_write ( linda,
  1682. LINDA_EPB_UC_DATA,
  1683. data ) ) != 0 )
  1684. break;
  1685. }
  1686. address++;
  1687. len--;
  1688. /* Reset the control byte after each chunk */
  1689. if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
  1690. if ( ( rc = linda_ib_epb_write ( linda,
  1691. LINDA_EPB_UC_CTL,
  1692. 0 ) ) != 0 )
  1693. break;
  1694. }
  1695. }
  1696. /* Release bus */
  1697. linda_ib_epb_release ( linda );
  1698. out:
  1699. DBG_ENABLE ( DBGLVL_IO );
  1700. return rc;
  1701. }
  1702. /***************************************************************************
  1703. *
  1704. * Infiniband SerDes initialisation
  1705. *
  1706. ***************************************************************************
  1707. */
  1708. /** A Linda SerDes parameter */
  1709. struct linda_serdes_param {
  1710. /** EPB address as constructed by LINDA_EPB_ADDRESS() */
  1711. uint16_t address;
  1712. /** Value to set */
  1713. uint8_t value;
  1714. /** Mask to apply to old value */
  1715. uint8_t mask;
  1716. } __packed;
  1717. /** Magic "all channels" channel number */
  1718. #define LINDA_EPB_ALL_CHANNELS 31
  1719. /** End of SerDes parameter list marker */
  1720. #define LINDA_SERDES_PARAM_END { 0, 0, 0 }
  1721. /**
  1722. * Program IB SerDes register(s)
  1723. *
  1724. * @v linda Linda device
  1725. * @v param SerDes parameter
  1726. * @ret rc Return status code
  1727. */
  1728. static int linda_set_serdes_param ( struct linda *linda,
  1729. struct linda_serdes_param *param ) {
  1730. unsigned int channel;
  1731. unsigned int channel_start;
  1732. unsigned int channel_end;
  1733. unsigned int element;
  1734. unsigned int reg;
  1735. int rc;
  1736. /* Break down the EPB address and determine channels */
  1737. channel = LINDA_EPB_ADDRESS_CHANNEL ( param->address );
  1738. element = LINDA_EPB_ADDRESS_ELEMENT ( param->address );
  1739. reg = LINDA_EPB_ADDRESS_REG ( param->address );
  1740. if ( channel == LINDA_EPB_ALL_CHANNELS ) {
  1741. channel_start = 0;
  1742. channel_end = 3;
  1743. } else {
  1744. channel_start = channel_end = channel;
  1745. }
  1746. /* Modify register for each specified channel */
  1747. for ( channel = channel_start ; channel <= channel_end ; channel++ ) {
  1748. if ( ( rc = linda_ib_epb_mod_reg ( linda, LINDA_EPB_CS_SERDES,
  1749. channel, element, reg,
  1750. param->value,
  1751. param->mask ) ) != 0 )
  1752. return rc;
  1753. }
  1754. return 0;
  1755. }
  1756. /**
  1757. * Program IB SerDes registers
  1758. *
  1759. * @v linda Linda device
  1760. * @v param SerDes parameters
  1761. * @v count Number of parameters
  1762. * @ret rc Return status code
  1763. */
  1764. static int linda_set_serdes_params ( struct linda *linda,
  1765. struct linda_serdes_param *params ) {
  1766. int rc;
  1767. for ( ; params->mask != 0 ; params++ ){
  1768. if ( ( rc = linda_set_serdes_param ( linda,
  1769. params ) ) != 0 )
  1770. return rc;
  1771. }
  1772. return 0;
  1773. }
  1774. #define LINDA_DDS_VAL( amp_d, main_d, ipst_d, ipre_d, \
  1775. amp_s, main_s, ipst_s, ipre_s ) \
  1776. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x00 ), \
  1777. ( ( ( amp_d & 0x1f ) << 1 ) | 1 ), 0xff }, \
  1778. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x01 ), \
  1779. ( ( ( amp_s & 0x1f ) << 1 ) | 1 ), 0xff }, \
  1780. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x09 ), \
  1781. ( ( main_d << 3 ) | 4 | ( ipre_d >> 2 ) ), 0xff }, \
  1782. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x0a ), \
  1783. ( ( main_s << 3 ) | 4 | ( ipre_s >> 2 ) ), 0xff }, \
  1784. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x06 ), \
  1785. ( ( ( ipst_d & 0xf ) << 1 ) | \
  1786. ( ( ipre_d & 3 ) << 6 ) | 0x21 ), 0xff }, \
  1787. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x07 ), \
  1788. ( ( ( ipst_s & 0xf ) << 1 ) | \
  1789. ( ( ipre_s & 3 ) << 6) | 0x21 ), 0xff }
  1790. /**
  1791. * Linda SerDes default parameters
  1792. *
  1793. * These magic start-of-day values are taken from the Linux driver.
  1794. */
  1795. static struct linda_serdes_param linda_serdes_defaults1[] = {
  1796. /* RXHSCTRL0 */
  1797. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x00 ), 0xd4, 0xff },
  1798. /* VCDL_DAC2 */
  1799. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x05 ), 0x2d, 0xff },
  1800. /* VCDL_CTRL2 */
  1801. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x08 ), 0x03, 0x0f },
  1802. /* START_EQ1 */
  1803. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
  1804. /* START_EQ2 */
  1805. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x28 ), 0x30, 0xff },
  1806. /* BACTRL */
  1807. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0e ), 0x40, 0xff },
  1808. /* LDOUTCTRL1 */
  1809. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x04, 0xff },
  1810. /* RXHSSTATUS */
  1811. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0f ), 0x04, 0xff },
  1812. /* End of this block */
  1813. LINDA_SERDES_PARAM_END
  1814. };
  1815. static struct linda_serdes_param linda_serdes_defaults2[] = {
  1816. /* LDOUTCTRL1 */
  1817. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x00, 0xff },
  1818. /* DDS values */
  1819. LINDA_DDS_VAL ( 31, 19, 12, 0, 29, 22, 9, 0 ),
  1820. /* Set Rcv Eq. to Preset node */
  1821. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
  1822. /* DFELTHFDR */
  1823. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x08 ), 0x00, 0xff },
  1824. /* DFELTHHDR */
  1825. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x21 ), 0x00, 0xff },
  1826. /* TLTHFDR */
  1827. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x09 ), 0x02, 0xff },
  1828. /* TLTHHDR */
  1829. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x23 ), 0x02, 0xff },
  1830. /* ZFR */
  1831. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1b ), 0x0c, 0xff },
  1832. /* ZCNT) */
  1833. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1c ), 0x0c, 0xff },
  1834. /* GFR */
  1835. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1e ), 0x10, 0xff },
  1836. /* GHR */
  1837. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1f ), 0x10, 0xff },
  1838. /* VCDL_CTRL0 toggle */
  1839. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x20, 0xff },
  1840. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x00, 0xff },
  1841. /* CMUCTRL5 */
  1842. { LINDA_EPB_ADDRESS ( 7, 0, 0x15 ), 0x80, 0xff },
  1843. /* End of this block */
  1844. LINDA_SERDES_PARAM_END
  1845. };
  1846. static struct linda_serdes_param linda_serdes_defaults3[] = {
  1847. /* START_EQ1 */
  1848. { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x00, 0x38 },
  1849. /* End of this block */
  1850. LINDA_SERDES_PARAM_END
  1851. };
  1852. /**
  1853. * Program the microcontroller RAM
  1854. *
  1855. * @v linda Linda device
  1856. * @ret rc Return status code
  1857. */
  1858. static int linda_program_uc_ram ( struct linda *linda ) {
  1859. int rc;
  1860. if ( ( rc = linda_ib_epb_ram_xfer ( linda, 0, linda_ib_fw, NULL,
  1861. sizeof ( linda_ib_fw ) ) ) != 0 ){
  1862. DBGC ( linda, "Linda %p could not load IB firmware: %s\n",
  1863. linda, strerror ( rc ) );
  1864. return rc;
  1865. }
  1866. return 0;
  1867. }
  1868. /**
  1869. * Verify the microcontroller RAM
  1870. *
  1871. * @v linda Linda device
  1872. * @ret rc Return status code
  1873. */
  1874. static int linda_verify_uc_ram ( struct linda *linda ) {
  1875. uint8_t verify[LINDA_EPB_UC_CHUNK_SIZE];
  1876. unsigned int offset;
  1877. int rc;
  1878. for ( offset = 0 ; offset < sizeof ( linda_ib_fw );
  1879. offset += sizeof ( verify ) ) {
  1880. if ( ( rc = linda_ib_epb_ram_xfer ( linda, offset,
  1881. NULL, verify,
  1882. sizeof (verify) )) != 0 ){
  1883. DBGC ( linda, "Linda %p could not read back IB "
  1884. "firmware: %s\n", linda, strerror ( rc ) );
  1885. return rc;
  1886. }
  1887. if ( memcmp ( ( linda_ib_fw + offset ), verify,
  1888. sizeof ( verify ) ) != 0 ) {
  1889. DBGC ( linda, "Linda %p firmware verification failed "
  1890. "at offset %#x\n", linda, offset );
  1891. DBGC_HDA ( linda, offset, ( linda_ib_fw + offset ),
  1892. sizeof ( verify ) );
  1893. DBGC_HDA ( linda, offset, verify, sizeof ( verify ) );
  1894. return -EIO;
  1895. }
  1896. }
  1897. DBGC2 ( linda, "Linda %p firmware verified ok\n", linda );
  1898. return 0;
  1899. }
  1900. /**
  1901. * Use the microcontroller to trim the IB link
  1902. *
  1903. * @v linda Linda device
  1904. * @ret rc Return status code
  1905. */
  1906. static int linda_trim_ib ( struct linda *linda ) {
  1907. struct QIB_7220_IBSerDesCtrl ctrl;
  1908. struct QIB_7220_IntStatus intstatus;
  1909. unsigned int i;
  1910. int rc;
  1911. /* Bring the microcontroller out of reset */
  1912. linda_readq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
  1913. BIT_SET ( &ctrl, ResetIB_uC_Core, 0 );
  1914. linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
  1915. /* Wait for the "trim done" signal */
  1916. for ( i = 0 ; i < LINDA_TRIM_DONE_MAX_WAIT_MS ; i++ ) {
  1917. linda_readq ( linda, &intstatus, QIB_7220_IntStatus_offset );
  1918. if ( BIT_GET ( &intstatus, IBSerdesTrimDone ) ) {
  1919. rc = 0;
  1920. goto out_reset;
  1921. }
  1922. mdelay ( 1 );
  1923. }
  1924. DBGC ( linda, "Linda %p timed out waiting for trim done\n", linda );
  1925. rc = -ETIMEDOUT;
  1926. out_reset:
  1927. /* Put the microcontroller back into reset */
  1928. BIT_SET ( &ctrl, ResetIB_uC_Core, 1 );
  1929. linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
  1930. return rc;
  1931. }
  1932. /**
  1933. * Initialise the IB SerDes
  1934. *
  1935. * @v linda Linda device
  1936. * @ret rc Return status code
  1937. */
  1938. static int linda_init_ib_serdes ( struct linda *linda ) {
  1939. struct QIB_7220_Control control;
  1940. struct QIB_7220_IBCCtrl ibcctrl;
  1941. struct QIB_7220_IBCDDRCtrl ibcddrctrl;
  1942. struct QIB_7220_XGXSCfg xgxscfg;
  1943. int rc;
  1944. /* Disable link */
  1945. linda_readq ( linda, &control, QIB_7220_Control_offset );
  1946. BIT_SET ( &control, LinkEn, 0 );
  1947. linda_writeq ( linda, &control, QIB_7220_Control_offset );
  1948. /* Configure sensible defaults for IBC */
  1949. memset ( &ibcctrl, 0, sizeof ( ibcctrl ) );
  1950. BIT_FILL_6 ( &ibcctrl, /* Tuning values taken from Linux driver */
  1951. FlowCtrlPeriod, 0x03,
  1952. FlowCtrlWaterMark, 0x05,
  1953. MaxPktLen, ( ( LINDA_RECV_HEADER_SIZE +
  1954. LINDA_RECV_PAYLOAD_SIZE +
  1955. 4 /* ICRC */ ) >> 2 ),
  1956. PhyerrThreshold, 0xf,
  1957. OverrunThreshold, 0xf,
  1958. CreditScale, 0x4 );
  1959. linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
  1960. /* Force SDR only to avoid needing all the DDR tuning,
  1961. * Mellanox compatibiltiy hacks etc. SDR is plenty for
  1962. * boot-time operation.
  1963. */
  1964. linda_readq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
  1965. BIT_SET ( &ibcddrctrl, IB_ENHANCED_MODE, 0 );
  1966. BIT_SET ( &ibcddrctrl, SD_SPEED_SDR, 1 );
  1967. BIT_SET ( &ibcddrctrl, SD_SPEED_DDR, 0 );
  1968. BIT_SET ( &ibcddrctrl, SD_SPEED_QDR, 0 );
  1969. BIT_SET ( &ibcddrctrl, HRTBT_ENB, 0 );
  1970. BIT_SET ( &ibcddrctrl, HRTBT_AUTO, 0 );
  1971. linda_writeq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
  1972. /* Set default SerDes parameters */
  1973. if ( ( rc = linda_set_serdes_params ( linda,
  1974. linda_serdes_defaults1 ) ) != 0 )
  1975. return rc;
  1976. udelay ( 415 ); /* Magic delay while SerDes sorts itself out */
  1977. if ( ( rc = linda_set_serdes_params ( linda,
  1978. linda_serdes_defaults2 ) ) != 0 )
  1979. return rc;
  1980. /* Program the microcontroller RAM */
  1981. if ( ( rc = linda_program_uc_ram ( linda ) ) != 0 )
  1982. return rc;
  1983. /* Verify the microcontroller RAM contents */
  1984. if ( DBGLVL_LOG ) {
  1985. if ( ( rc = linda_verify_uc_ram ( linda ) ) != 0 )
  1986. return rc;
  1987. }
  1988. /* More SerDes tuning */
  1989. if ( ( rc = linda_set_serdes_params ( linda,
  1990. linda_serdes_defaults3 ) ) != 0 )
  1991. return rc;
  1992. /* Use the microcontroller to trim the IB link */
  1993. if ( ( rc = linda_trim_ib ( linda ) ) != 0 )
  1994. return rc;
  1995. /* Bring XGXS out of reset */
  1996. linda_readq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
  1997. BIT_SET ( &xgxscfg, tx_rx_reset, 0 );
  1998. BIT_SET ( &xgxscfg, xcv_reset, 0 );
  1999. linda_writeq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
  2000. return rc;
  2001. }
  2002. /***************************************************************************
  2003. *
  2004. * PCI layer interface
  2005. *
  2006. ***************************************************************************
  2007. */
  2008. /**
  2009. * Probe PCI device
  2010. *
  2011. * @v pci PCI device
  2012. * @v id PCI ID
  2013. * @ret rc Return status code
  2014. */
  2015. static int linda_probe ( struct pci_device *pci,
  2016. const struct pci_device_id *id __unused ) {
  2017. struct ib_device *ibdev;
  2018. struct linda *linda;
  2019. struct QIB_7220_Revision revision;
  2020. int rc;
  2021. /* Allocate Infiniband device */
  2022. ibdev = alloc_ibdev ( sizeof ( *linda ) );
  2023. if ( ! ibdev ) {
  2024. rc = -ENOMEM;
  2025. goto err_alloc_ibdev;
  2026. }
  2027. pci_set_drvdata ( pci, ibdev );
  2028. linda = ib_get_drvdata ( ibdev );
  2029. ibdev->op = &linda_ib_operations;
  2030. ibdev->dev = &pci->dev;
  2031. ibdev->port = 1;
  2032. /* Fix up PCI device */
  2033. adjust_pci_device ( pci );
  2034. /* Get PCI BARs */
  2035. linda->regs = ioremap ( pci->membase, LINDA_BAR0_SIZE );
  2036. DBGC2 ( linda, "Linda %p has BAR at %08lx\n", linda, pci->membase );
  2037. /* Print some general data */
  2038. linda_readq ( linda, &revision, QIB_7220_Revision_offset );
  2039. DBGC2 ( linda, "Linda %p board %02lx v%ld.%ld.%ld.%ld\n", linda,
  2040. BIT_GET ( &revision, BoardID ),
  2041. BIT_GET ( &revision, R_SW ),
  2042. BIT_GET ( &revision, R_Arch ),
  2043. BIT_GET ( &revision, R_ChipRevMajor ),
  2044. BIT_GET ( &revision, R_ChipRevMinor ) );
  2045. /* Initialise I2C subsystem */
  2046. if ( ( rc = linda_init_i2c ( linda ) ) != 0 )
  2047. goto err_init_i2c;
  2048. /* Read EEPROM parameters */
  2049. if ( ( rc = linda_read_eeprom ( linda, &ibdev->gid.u.half[1] ) ) != 0 )
  2050. goto err_read_eeprom;
  2051. /* Initialise send datapath */
  2052. if ( ( rc = linda_init_send ( linda ) ) != 0 )
  2053. goto err_init_send;
  2054. /* Initialise receive datapath */
  2055. if ( ( rc = linda_init_recv ( linda ) ) != 0 )
  2056. goto err_init_recv;
  2057. /* Initialise the IB SerDes */
  2058. if ( ( rc = linda_init_ib_serdes ( linda ) ) != 0 )
  2059. goto err_init_ib_serdes;
  2060. /* Create the SMA */
  2061. if ( ( rc = ib_create_sma ( &linda->sma, ibdev ) ) != 0 )
  2062. goto err_create_sma;
  2063. /* If the SMA doesn't get context 0, we're screwed */
  2064. assert ( linda_qpn_to_ctx ( linda->sma.gma.qp->qpn ) == 0 );
  2065. /* Register Infiniband device */
  2066. if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
  2067. DBGC ( linda, "Linda %p could not register IB "
  2068. "device: %s\n", linda, strerror ( rc ) );
  2069. goto err_register_ibdev;
  2070. }
  2071. return 0;
  2072. unregister_ibdev ( ibdev );
  2073. err_register_ibdev:
  2074. ib_destroy_sma ( &linda->sma );
  2075. err_create_sma:
  2076. linda_fini_recv ( linda );
  2077. err_init_recv:
  2078. linda_fini_send ( linda );
  2079. err_init_send:
  2080. err_init_ib_serdes:
  2081. err_read_eeprom:
  2082. err_init_i2c:
  2083. ibdev_put ( ibdev );
  2084. err_alloc_ibdev:
  2085. return rc;
  2086. }
  2087. /**
  2088. * Remove PCI device
  2089. *
  2090. * @v pci PCI device
  2091. */
  2092. static void linda_remove ( struct pci_device *pci ) {
  2093. struct ib_device *ibdev = pci_get_drvdata ( pci );
  2094. struct linda *linda = ib_get_drvdata ( ibdev );
  2095. unregister_ibdev ( ibdev );
  2096. ib_destroy_sma ( &linda->sma );
  2097. linda_fini_recv ( linda );
  2098. linda_fini_send ( linda );
  2099. ibdev_put ( ibdev );
  2100. }
  2101. static struct pci_device_id linda_nics[] = {
  2102. PCI_ROM ( 0x1077, 0x7220, "iba7220", "QLE7240/7280 HCA driver", 0 ),
  2103. };
  2104. struct pci_driver linda_driver __pci_driver = {
  2105. .ids = linda_nics,
  2106. .id_count = ( sizeof ( linda_nics ) / sizeof ( linda_nics[0] ) ),
  2107. .probe = linda_probe,
  2108. .remove = linda_remove,
  2109. };