You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ath5k.c 41KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658
  1. /*
  2. * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
  3. * Copyright (c) 2004-2005 Atheros Communications, Inc.
  4. * Copyright (c) 2006 Devicescape Software, Inc.
  5. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
  6. * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
  7. *
  8. * Modified for iPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
  9. * Original from Linux kernel 2.6.30.
  10. *
  11. * All rights reserved.
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer,
  18. * without modification.
  19. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  20. * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
  21. * redistribution must be conditioned upon including a substantially
  22. * similar Disclaimer requirement for further binary redistribution.
  23. * 3. Neither the names of the above-listed copyright holders nor the names
  24. * of any contributors may be used to endorse or promote products derived
  25. * from this software without specific prior written permission.
  26. *
  27. * Alternatively, this software may be distributed under the terms of the
  28. * GNU General Public License ("GPL") version 2 as published by the Free
  29. * Software Foundation.
  30. *
  31. * NO WARRANTY
  32. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34. * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
  35. * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
  36. * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
  37. * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  38. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  39. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  40. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  41. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  42. * THE POSSIBILITY OF SUCH DAMAGES.
  43. *
  44. */
  45. FILE_LICENCE ( BSD3 );
  46. #include <stdlib.h>
  47. #include <ipxe/malloc.h>
  48. #include <ipxe/timer.h>
  49. #include <ipxe/netdevice.h>
  50. #include <ipxe/pci.h>
  51. #include <ipxe/pci_io.h>
  52. #include "base.h"
  53. #include "reg.h"
  54. #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
  55. #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
  56. #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
  57. /******************\
  58. * Internal defines *
  59. \******************/
  60. /* Known PCI ids */
  61. static struct pci_device_id ath5k_nics[] = {
  62. PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210),
  63. PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210),
  64. PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211),
  65. PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211),
  66. PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212),
  67. PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212),
  68. PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212),
  69. PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212),
  70. PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212),
  71. PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212),
  72. PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212),
  73. PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212),
  74. PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212),
  75. PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212),
  76. PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212),
  77. PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212),
  78. PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212),
  79. PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212),
  80. };
  81. #define ATH5K_SPMBL_NO 1
  82. #define ATH5K_SPMBL_YES 2
  83. #define ATH5K_SPMBL_BOTH 3
  84. static const struct {
  85. u16 bitrate;
  86. u8 short_pmbl;
  87. u8 hw_code;
  88. } ath5k_rates[] = {
  89. { 10, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_1M },
  90. { 20, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_2M },
  91. { 55, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_5_5M },
  92. { 110, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_11M },
  93. { 60, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_6M },
  94. { 90, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_9M },
  95. { 120, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_12M },
  96. { 180, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_18M },
  97. { 240, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_24M },
  98. { 360, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_36M },
  99. { 480, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_48M },
  100. { 540, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_54M },
  101. { 20, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE },
  102. { 55, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE },
  103. { 110, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE },
  104. { 0, 0, 0 },
  105. };
  106. #define ATH5K_NR_RATES 15
  107. /*
  108. * Prototypes - PCI stack related functions
  109. */
  110. static int ath5k_probe(struct pci_device *pdev);
  111. static void ath5k_remove(struct pci_device *pdev);
  112. struct pci_driver ath5k_pci_driver __pci_driver = {
  113. .ids = ath5k_nics,
  114. .id_count = sizeof(ath5k_nics) / sizeof(ath5k_nics[0]),
  115. .probe = ath5k_probe,
  116. .remove = ath5k_remove,
  117. };
  118. /*
  119. * Prototypes - MAC 802.11 stack related functions
  120. */
  121. static int ath5k_tx(struct net80211_device *dev, struct io_buffer *skb);
  122. static int ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan);
  123. static int ath5k_reset_wake(struct ath5k_softc *sc);
  124. static int ath5k_start(struct net80211_device *dev);
  125. static void ath5k_stop(struct net80211_device *dev);
  126. static int ath5k_config(struct net80211_device *dev, int changed);
  127. static void ath5k_poll(struct net80211_device *dev);
  128. static void ath5k_irq(struct net80211_device *dev, int enable);
  129. static struct net80211_device_operations ath5k_ops = {
  130. .open = ath5k_start,
  131. .close = ath5k_stop,
  132. .transmit = ath5k_tx,
  133. .poll = ath5k_poll,
  134. .irq = ath5k_irq,
  135. .config = ath5k_config,
  136. };
  137. /*
  138. * Prototypes - Internal functions
  139. */
  140. /* Attach detach */
  141. static int ath5k_attach(struct net80211_device *dev);
  142. static void ath5k_detach(struct net80211_device *dev);
  143. /* Channel/mode setup */
  144. static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
  145. struct net80211_channel *channels,
  146. unsigned int mode,
  147. unsigned int max);
  148. static int ath5k_setup_bands(struct net80211_device *dev);
  149. static int ath5k_chan_set(struct ath5k_softc *sc,
  150. struct net80211_channel *chan);
  151. static void ath5k_setcurmode(struct ath5k_softc *sc,
  152. unsigned int mode);
  153. static void ath5k_mode_setup(struct ath5k_softc *sc);
  154. /* Descriptor setup */
  155. static int ath5k_desc_alloc(struct ath5k_softc *sc);
  156. static void ath5k_desc_free(struct ath5k_softc *sc);
  157. /* Buffers setup */
  158. static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
  159. static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
  160. static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
  161. struct ath5k_buf *bf)
  162. {
  163. if (!bf->iob)
  164. return;
  165. net80211_tx_complete(sc->dev, bf->iob, 0, ECANCELED);
  166. bf->iob = NULL;
  167. }
  168. static inline void ath5k_rxbuf_free(struct ath5k_softc *sc __unused,
  169. struct ath5k_buf *bf)
  170. {
  171. free_iob(bf->iob);
  172. bf->iob = NULL;
  173. }
  174. /* Queues setup */
  175. static int ath5k_txq_setup(struct ath5k_softc *sc,
  176. int qtype, int subtype);
  177. static void ath5k_txq_drainq(struct ath5k_softc *sc,
  178. struct ath5k_txq *txq);
  179. static void ath5k_txq_cleanup(struct ath5k_softc *sc);
  180. static void ath5k_txq_release(struct ath5k_softc *sc);
  181. /* Rx handling */
  182. static int ath5k_rx_start(struct ath5k_softc *sc);
  183. static void ath5k_rx_stop(struct ath5k_softc *sc);
  184. /* Tx handling */
  185. static void ath5k_tx_processq(struct ath5k_softc *sc,
  186. struct ath5k_txq *txq);
  187. /* Interrupt handling */
  188. static int ath5k_init(struct ath5k_softc *sc);
  189. static int ath5k_stop_hw(struct ath5k_softc *sc);
  190. static void ath5k_calibrate(struct ath5k_softc *sc);
  191. /* Filter */
  192. static void ath5k_configure_filter(struct ath5k_softc *sc);
  193. /********************\
  194. * PCI Initialization *
  195. \********************/
  196. #if DBGLVL_MAX
  197. static const char *
  198. ath5k_chip_name(enum ath5k_srev_type type, u16 val)
  199. {
  200. const char *name = "xxxxx";
  201. unsigned int i;
  202. for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
  203. if (srev_names[i].sr_type != type)
  204. continue;
  205. if ((val & 0xf0) == srev_names[i].sr_val)
  206. name = srev_names[i].sr_name;
  207. if ((val & 0xff) == srev_names[i].sr_val) {
  208. name = srev_names[i].sr_name;
  209. break;
  210. }
  211. }
  212. return name;
  213. }
  214. #endif
  215. static int ath5k_probe(struct pci_device *pdev)
  216. {
  217. void *mem;
  218. struct ath5k_softc *sc;
  219. struct net80211_device *dev;
  220. int ret;
  221. u8 csz;
  222. adjust_pci_device(pdev);
  223. /*
  224. * Cache line size is used to size and align various
  225. * structures used to communicate with the hardware.
  226. */
  227. pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
  228. if (csz == 0) {
  229. /*
  230. * We must have this setup properly for rx buffer
  231. * DMA to work so force a reasonable value here if it
  232. * comes up zero.
  233. */
  234. csz = 16;
  235. pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
  236. }
  237. /*
  238. * The default setting of latency timer yields poor results,
  239. * set it to the value used by other systems. It may be worth
  240. * tweaking this setting more.
  241. */
  242. pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
  243. /*
  244. * Disable the RETRY_TIMEOUT register (0x41) to keep
  245. * PCI Tx retries from interfering with C3 CPU state.
  246. */
  247. pci_write_config_byte(pdev, 0x41, 0);
  248. mem = ioremap(pdev->membase, 0x10000);
  249. if (!mem) {
  250. DBG("ath5k: cannot remap PCI memory region\n");
  251. ret = -EIO;
  252. goto err;
  253. }
  254. /*
  255. * Allocate dev (net80211 main struct)
  256. * and dev->priv (driver private data)
  257. */
  258. dev = net80211_alloc(sizeof(*sc));
  259. if (!dev) {
  260. DBG("ath5k: cannot allocate 802.11 device\n");
  261. ret = -ENOMEM;
  262. goto err_map;
  263. }
  264. /* Initialize driver private data */
  265. sc = dev->priv;
  266. sc->dev = dev;
  267. sc->pdev = pdev;
  268. sc->hwinfo = zalloc(sizeof(*sc->hwinfo));
  269. if (!sc->hwinfo) {
  270. DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
  271. ret = -ENOMEM;
  272. goto err_free;
  273. }
  274. sc->hwinfo->flags = NET80211_HW_RX_HAS_FCS;
  275. sc->hwinfo->signal_type = NET80211_SIGNAL_DB;
  276. sc->hwinfo->signal_max = 40; /* 35dB should give perfect 54Mbps */
  277. sc->hwinfo->channel_change_time = 5000;
  278. /* Avoid working with the device until setup is complete */
  279. sc->status |= ATH_STAT_INVALID;
  280. sc->iobase = mem;
  281. sc->cachelsz = csz * 4; /* convert to bytes */
  282. DBG("ath5k: register base at %p (%08lx)\n", sc->iobase, pdev->membase);
  283. DBG("ath5k: cache line size %d\n", sc->cachelsz);
  284. /* Set private data */
  285. pci_set_drvdata(pdev, dev);
  286. dev->netdev->dev = (struct device *)pdev;
  287. /* Initialize device */
  288. ret = ath5k_hw_attach(sc, pdev->id->driver_data, &sc->ah);
  289. if (ret)
  290. goto err_free_hwinfo;
  291. /* Finish private driver data initialization */
  292. ret = ath5k_attach(dev);
  293. if (ret)
  294. goto err_ah;
  295. #if DBGLVL_MAX
  296. DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
  297. ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
  298. sc->ah->ah_mac_srev, sc->ah->ah_phy_revision);
  299. if (!sc->ah->ah_single_chip) {
  300. /* Single chip radio (!RF5111) */
  301. if (sc->ah->ah_radio_5ghz_revision &&
  302. !sc->ah->ah_radio_2ghz_revision) {
  303. /* No 5GHz support -> report 2GHz radio */
  304. if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A)) {
  305. DBG("RF%s 2GHz radio found (0x%x)\n",
  306. ath5k_chip_name(AR5K_VERSION_RAD,
  307. sc->ah->ah_radio_5ghz_revision),
  308. sc->ah->ah_radio_5ghz_revision);
  309. /* No 2GHz support (5110 and some
  310. * 5Ghz only cards) -> report 5Ghz radio */
  311. } else if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B)) {
  312. DBG("RF%s 5GHz radio found (0x%x)\n",
  313. ath5k_chip_name(AR5K_VERSION_RAD,
  314. sc->ah->ah_radio_5ghz_revision),
  315. sc->ah->ah_radio_5ghz_revision);
  316. /* Multiband radio */
  317. } else {
  318. DBG("RF%s multiband radio found (0x%x)\n",
  319. ath5k_chip_name(AR5K_VERSION_RAD,
  320. sc->ah->ah_radio_5ghz_revision),
  321. sc->ah->ah_radio_5ghz_revision);
  322. }
  323. }
  324. /* Multi chip radio (RF5111 - RF2111) ->
  325. * report both 2GHz/5GHz radios */
  326. else if (sc->ah->ah_radio_5ghz_revision &&
  327. sc->ah->ah_radio_2ghz_revision) {
  328. DBG("RF%s 5GHz radio found (0x%x)\n",
  329. ath5k_chip_name(AR5K_VERSION_RAD,
  330. sc->ah->ah_radio_5ghz_revision),
  331. sc->ah->ah_radio_5ghz_revision);
  332. DBG("RF%s 2GHz radio found (0x%x)\n",
  333. ath5k_chip_name(AR5K_VERSION_RAD,
  334. sc->ah->ah_radio_2ghz_revision),
  335. sc->ah->ah_radio_2ghz_revision);
  336. }
  337. }
  338. #endif
  339. /* Ready to go */
  340. sc->status &= ~ATH_STAT_INVALID;
  341. return 0;
  342. err_ah:
  343. ath5k_hw_detach(sc->ah);
  344. err_free_hwinfo:
  345. free(sc->hwinfo);
  346. err_free:
  347. net80211_free(dev);
  348. err_map:
  349. iounmap(mem);
  350. err:
  351. return ret;
  352. }
  353. static void ath5k_remove(struct pci_device *pdev)
  354. {
  355. struct net80211_device *dev = pci_get_drvdata(pdev);
  356. struct ath5k_softc *sc = dev->priv;
  357. ath5k_detach(dev);
  358. ath5k_hw_detach(sc->ah);
  359. iounmap(sc->iobase);
  360. free(sc->hwinfo);
  361. net80211_free(dev);
  362. }
  363. /***********************\
  364. * Driver Initialization *
  365. \***********************/
  366. static int
  367. ath5k_attach(struct net80211_device *dev)
  368. {
  369. struct ath5k_softc *sc = dev->priv;
  370. struct ath5k_hw *ah = sc->ah;
  371. int ret;
  372. /*
  373. * Collect the channel list. The 802.11 layer
  374. * is resposible for filtering this list based
  375. * on settings like the phy mode and regulatory
  376. * domain restrictions.
  377. */
  378. ret = ath5k_setup_bands(dev);
  379. if (ret) {
  380. DBG("ath5k: can't get channels\n");
  381. goto err;
  382. }
  383. /* NB: setup here so ath5k_rate_update is happy */
  384. if (ah->ah_modes & AR5K_MODE_BIT_11A)
  385. ath5k_setcurmode(sc, AR5K_MODE_11A);
  386. else
  387. ath5k_setcurmode(sc, AR5K_MODE_11B);
  388. /*
  389. * Allocate tx+rx descriptors and populate the lists.
  390. */
  391. ret = ath5k_desc_alloc(sc);
  392. if (ret) {
  393. DBG("ath5k: can't allocate descriptors\n");
  394. goto err;
  395. }
  396. /*
  397. * Allocate hardware transmit queues. Note that hw functions
  398. * handle reseting these queues at the needed time.
  399. */
  400. ret = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
  401. if (ret) {
  402. DBG("ath5k: can't setup xmit queue\n");
  403. goto err_desc;
  404. }
  405. sc->last_calib_ticks = currticks();
  406. ret = ath5k_eeprom_read_mac(ah, sc->hwinfo->hwaddr);
  407. if (ret) {
  408. DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
  409. sc->pdev->device);
  410. goto err_queues;
  411. }
  412. memset(sc->bssidmask, 0xff, ETH_ALEN);
  413. ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
  414. ret = net80211_register(sc->dev, &ath5k_ops, sc->hwinfo);
  415. if (ret) {
  416. DBG("ath5k: can't register ieee80211 hw\n");
  417. goto err_queues;
  418. }
  419. return 0;
  420. err_queues:
  421. ath5k_txq_release(sc);
  422. err_desc:
  423. ath5k_desc_free(sc);
  424. err:
  425. return ret;
  426. }
  427. static void
  428. ath5k_detach(struct net80211_device *dev)
  429. {
  430. struct ath5k_softc *sc = dev->priv;
  431. net80211_unregister(dev);
  432. ath5k_desc_free(sc);
  433. ath5k_txq_release(sc);
  434. }
  435. /********************\
  436. * Channel/mode setup *
  437. \********************/
  438. /*
  439. * Convert IEEE channel number to MHz frequency.
  440. */
  441. static inline short
  442. ath5k_ieee2mhz(short chan)
  443. {
  444. if (chan < 14)
  445. return 2407 + 5 * chan;
  446. if (chan == 14)
  447. return 2484;
  448. if (chan < 27)
  449. return 2212 + 20 * chan;
  450. return 5000 + 5 * chan;
  451. }
  452. static unsigned int
  453. ath5k_copy_channels(struct ath5k_hw *ah,
  454. struct net80211_channel *channels,
  455. unsigned int mode, unsigned int max)
  456. {
  457. unsigned int i, count, size, chfreq, freq, ch;
  458. if (!(ah->ah_modes & (1 << mode)))
  459. return 0;
  460. switch (mode) {
  461. case AR5K_MODE_11A:
  462. case AR5K_MODE_11A_TURBO:
  463. /* 1..220, but 2GHz frequencies are filtered by check_channel */
  464. size = 220;
  465. chfreq = CHANNEL_5GHZ;
  466. break;
  467. case AR5K_MODE_11B:
  468. case AR5K_MODE_11G:
  469. case AR5K_MODE_11G_TURBO:
  470. size = 26;
  471. chfreq = CHANNEL_2GHZ;
  472. break;
  473. default:
  474. return 0;
  475. }
  476. for (i = 0, count = 0; i < size && max > 0; i++) {
  477. ch = i + 1 ;
  478. freq = ath5k_ieee2mhz(ch);
  479. /* Check if channel is supported by the chipset */
  480. if (!ath5k_channel_ok(ah, freq, chfreq))
  481. continue;
  482. /* Write channel info and increment counter */
  483. channels[count].center_freq = freq;
  484. channels[count].maxpower = 0; /* use regulatory */
  485. channels[count].band = (chfreq == CHANNEL_2GHZ) ?
  486. NET80211_BAND_2GHZ : NET80211_BAND_5GHZ;
  487. switch (mode) {
  488. case AR5K_MODE_11A:
  489. case AR5K_MODE_11G:
  490. channels[count].hw_value = chfreq | CHANNEL_OFDM;
  491. break;
  492. case AR5K_MODE_11A_TURBO:
  493. case AR5K_MODE_11G_TURBO:
  494. channels[count].hw_value = chfreq |
  495. CHANNEL_OFDM | CHANNEL_TURBO;
  496. break;
  497. case AR5K_MODE_11B:
  498. channels[count].hw_value = CHANNEL_B;
  499. }
  500. count++;
  501. max--;
  502. }
  503. return count;
  504. }
  505. static int
  506. ath5k_setup_bands(struct net80211_device *dev)
  507. {
  508. struct ath5k_softc *sc = dev->priv;
  509. struct ath5k_hw *ah = sc->ah;
  510. int max_c, count_c = 0;
  511. int i;
  512. int band;
  513. max_c = sizeof(sc->hwinfo->channels) / sizeof(sc->hwinfo->channels[0]);
  514. /* 2GHz band */
  515. if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11G) {
  516. /* G mode */
  517. band = NET80211_BAND_2GHZ;
  518. sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
  519. sc->hwinfo->modes = (NET80211_MODE_G | NET80211_MODE_B);
  520. for (i = 0; i < 12; i++)
  521. sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
  522. sc->hwinfo->nr_rates[band] = 12;
  523. sc->hwinfo->nr_channels =
  524. ath5k_copy_channels(ah, sc->hwinfo->channels,
  525. AR5K_MODE_11G, max_c);
  526. count_c = sc->hwinfo->nr_channels;
  527. max_c -= count_c;
  528. } else if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B) {
  529. /* B mode */
  530. band = NET80211_BAND_2GHZ;
  531. sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
  532. sc->hwinfo->modes = NET80211_MODE_B;
  533. for (i = 0; i < 4; i++)
  534. sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
  535. sc->hwinfo->nr_rates[band] = 4;
  536. sc->hwinfo->nr_channels =
  537. ath5k_copy_channels(ah, sc->hwinfo->channels,
  538. AR5K_MODE_11B, max_c);
  539. count_c = sc->hwinfo->nr_channels;
  540. max_c -= count_c;
  541. }
  542. /* 5GHz band, A mode */
  543. if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A) {
  544. band = NET80211_BAND_5GHZ;
  545. sc->hwinfo->bands |= NET80211_BAND_BIT_5GHZ;
  546. sc->hwinfo->modes |= NET80211_MODE_A;
  547. for (i = 0; i < 8; i++)
  548. sc->hwinfo->rates[band][i] = ath5k_rates[i+4].bitrate;
  549. sc->hwinfo->nr_rates[band] = 8;
  550. sc->hwinfo->nr_channels =
  551. ath5k_copy_channels(ah, sc->hwinfo->channels,
  552. AR5K_MODE_11B, max_c);
  553. count_c = sc->hwinfo->nr_channels;
  554. max_c -= count_c;
  555. }
  556. return 0;
  557. }
  558. /*
  559. * Set/change channels. If the channel is really being changed,
  560. * it's done by reseting the chip. To accomplish this we must
  561. * first cleanup any pending DMA, then restart stuff after a la
  562. * ath5k_init.
  563. */
  564. static int
  565. ath5k_chan_set(struct ath5k_softc *sc, struct net80211_channel *chan)
  566. {
  567. if (chan->center_freq != sc->curchan->center_freq ||
  568. chan->hw_value != sc->curchan->hw_value) {
  569. /*
  570. * To switch channels clear any pending DMA operations;
  571. * wait long enough for the RX fifo to drain, reset the
  572. * hardware at the new frequency, and then re-enable
  573. * the relevant bits of the h/w.
  574. */
  575. DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
  576. sc->curchan->center_freq, chan->center_freq);
  577. return ath5k_reset(sc, chan);
  578. }
  579. return 0;
  580. }
  581. static void
  582. ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
  583. {
  584. sc->curmode = mode;
  585. if (mode == AR5K_MODE_11A) {
  586. sc->curband = NET80211_BAND_5GHZ;
  587. } else {
  588. sc->curband = NET80211_BAND_2GHZ;
  589. }
  590. }
  591. static void
  592. ath5k_mode_setup(struct ath5k_softc *sc)
  593. {
  594. struct ath5k_hw *ah = sc->ah;
  595. u32 rfilt;
  596. /* configure rx filter */
  597. rfilt = sc->filter_flags;
  598. ath5k_hw_set_rx_filter(ah, rfilt);
  599. if (ath5k_hw_hasbssidmask(ah))
  600. ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
  601. /* configure operational mode */
  602. ath5k_hw_set_opmode(ah);
  603. ath5k_hw_set_mcast_filter(ah, 0, 0);
  604. }
  605. static inline int
  606. ath5k_hw_rix_to_bitrate(int hw_rix)
  607. {
  608. int i;
  609. for (i = 0; i < ATH5K_NR_RATES; i++) {
  610. if (ath5k_rates[i].hw_code == hw_rix)
  611. return ath5k_rates[i].bitrate;
  612. }
  613. DBG("ath5k: invalid rix %02x\n", hw_rix);
  614. return 10; /* use lowest rate */
  615. }
  616. int ath5k_bitrate_to_hw_rix(int bitrate)
  617. {
  618. int i;
  619. for (i = 0; i < ATH5K_NR_RATES; i++) {
  620. if (ath5k_rates[i].bitrate == bitrate)
  621. return ath5k_rates[i].hw_code;
  622. }
  623. DBG("ath5k: invalid bitrate %d\n", bitrate);
  624. return ATH5K_RATE_CODE_1M; /* use lowest rate */
  625. }
  626. /***************\
  627. * Buffers setup *
  628. \***************/
  629. static struct io_buffer *
  630. ath5k_rx_iob_alloc(struct ath5k_softc *sc, u32 *iob_addr)
  631. {
  632. struct io_buffer *iob;
  633. unsigned int off;
  634. /*
  635. * Allocate buffer with headroom_needed space for the
  636. * fake physical layer header at the start.
  637. */
  638. iob = alloc_iob(sc->rxbufsize + sc->cachelsz - 1);
  639. if (!iob) {
  640. DBG("ath5k: can't alloc iobuf of size %d\n",
  641. sc->rxbufsize + sc->cachelsz - 1);
  642. return NULL;
  643. }
  644. *iob_addr = virt_to_bus(iob->data);
  645. /*
  646. * Cache-line-align. This is important (for the
  647. * 5210 at least) as not doing so causes bogus data
  648. * in rx'd frames.
  649. */
  650. off = *iob_addr % sc->cachelsz;
  651. if (off != 0) {
  652. iob_reserve(iob, sc->cachelsz - off);
  653. *iob_addr += sc->cachelsz - off;
  654. }
  655. return iob;
  656. }
  657. static int
  658. ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
  659. {
  660. struct ath5k_hw *ah = sc->ah;
  661. struct io_buffer *iob = bf->iob;
  662. struct ath5k_desc *ds;
  663. if (!iob) {
  664. iob = ath5k_rx_iob_alloc(sc, &bf->iobaddr);
  665. if (!iob)
  666. return -ENOMEM;
  667. bf->iob = iob;
  668. }
  669. /*
  670. * Setup descriptors. For receive we always terminate
  671. * the descriptor list with a self-linked entry so we'll
  672. * not get overrun under high load (as can happen with a
  673. * 5212 when ANI processing enables PHY error frames).
  674. *
  675. * To insure the last descriptor is self-linked we create
  676. * each descriptor as self-linked and add it to the end. As
  677. * each additional descriptor is added the previous self-linked
  678. * entry is ``fixed'' naturally. This should be safe even
  679. * if DMA is happening. When processing RX interrupts we
  680. * never remove/process the last, self-linked, entry on the
  681. * descriptor list. This insures the hardware always has
  682. * someplace to write a new frame.
  683. */
  684. ds = bf->desc;
  685. ds->ds_link = bf->daddr; /* link to self */
  686. ds->ds_data = bf->iobaddr;
  687. if (ah->ah_setup_rx_desc(ah, ds,
  688. iob_tailroom(iob), /* buffer size */
  689. 0) != 0) {
  690. DBG("ath5k: error setting up RX descriptor for %zd bytes\n", iob_tailroom(iob));
  691. return -EINVAL;
  692. }
  693. if (sc->rxlink != NULL)
  694. *sc->rxlink = bf->daddr;
  695. sc->rxlink = &ds->ds_link;
  696. return 0;
  697. }
  698. static int
  699. ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
  700. {
  701. struct ath5k_hw *ah = sc->ah;
  702. struct ath5k_txq *txq = &sc->txq;
  703. struct ath5k_desc *ds = bf->desc;
  704. struct io_buffer *iob = bf->iob;
  705. unsigned int pktlen, flags;
  706. int ret;
  707. u16 duration = 0;
  708. u16 cts_rate = 0;
  709. flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
  710. bf->iobaddr = virt_to_bus(iob->data);
  711. pktlen = iob_len(iob);
  712. /* FIXME: If we are in g mode and rate is a CCK rate
  713. * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
  714. * from tx power (value is in dB units already) */
  715. if (sc->dev->phy_flags & NET80211_PHY_USE_PROTECTION) {
  716. struct net80211_device *dev = sc->dev;
  717. flags |= AR5K_TXDESC_CTSENA;
  718. cts_rate = sc->hw_rtscts_rate;
  719. duration = net80211_cts_duration(dev, pktlen);
  720. }
  721. ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
  722. IEEE80211_TYP_FRAME_HEADER_LEN,
  723. AR5K_PKT_TYPE_NORMAL, sc->power_level * 2,
  724. sc->hw_rate, ATH5K_RETRIES,
  725. AR5K_TXKEYIX_INVALID, 0, flags,
  726. cts_rate, duration);
  727. if (ret)
  728. return ret;
  729. ds->ds_link = 0;
  730. ds->ds_data = bf->iobaddr;
  731. list_add_tail(&bf->list, &txq->q);
  732. if (txq->link == NULL) /* is this first packet? */
  733. ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
  734. else /* no, so only link it */
  735. *txq->link = bf->daddr;
  736. txq->link = &ds->ds_link;
  737. ath5k_hw_start_tx_dma(ah, txq->qnum);
  738. mb();
  739. return 0;
  740. }
  741. /*******************\
  742. * Descriptors setup *
  743. \*******************/
  744. static int
  745. ath5k_desc_alloc(struct ath5k_softc *sc)
  746. {
  747. struct ath5k_desc *ds;
  748. struct ath5k_buf *bf;
  749. u32 da;
  750. unsigned int i;
  751. int ret;
  752. /* allocate descriptors */
  753. sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
  754. sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
  755. if (sc->desc == NULL) {
  756. DBG("ath5k: can't allocate descriptors\n");
  757. ret = -ENOMEM;
  758. goto err;
  759. }
  760. memset(sc->desc, 0, sc->desc_len);
  761. sc->desc_daddr = virt_to_bus(sc->desc);
  762. ds = sc->desc;
  763. da = sc->desc_daddr;
  764. bf = calloc(ATH_TXBUF + ATH_RXBUF + 1, sizeof(struct ath5k_buf));
  765. if (bf == NULL) {
  766. DBG("ath5k: can't allocate buffer pointers\n");
  767. ret = -ENOMEM;
  768. goto err_free;
  769. }
  770. sc->bufptr = bf;
  771. INIT_LIST_HEAD(&sc->rxbuf);
  772. for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
  773. bf->desc = ds;
  774. bf->daddr = da;
  775. list_add_tail(&bf->list, &sc->rxbuf);
  776. }
  777. INIT_LIST_HEAD(&sc->txbuf);
  778. sc->txbuf_len = ATH_TXBUF;
  779. for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
  780. bf->desc = ds;
  781. bf->daddr = da;
  782. list_add_tail(&bf->list, &sc->txbuf);
  783. }
  784. return 0;
  785. err_free:
  786. free_dma(sc->desc, sc->desc_len);
  787. err:
  788. sc->desc = NULL;
  789. return ret;
  790. }
  791. static void
  792. ath5k_desc_free(struct ath5k_softc *sc)
  793. {
  794. struct ath5k_buf *bf;
  795. list_for_each_entry(bf, &sc->txbuf, list)
  796. ath5k_txbuf_free(sc, bf);
  797. list_for_each_entry(bf, &sc->rxbuf, list)
  798. ath5k_rxbuf_free(sc, bf);
  799. /* Free memory associated with all descriptors */
  800. free_dma(sc->desc, sc->desc_len);
  801. free(sc->bufptr);
  802. sc->bufptr = NULL;
  803. }
  804. /**************\
  805. * Queues setup *
  806. \**************/
  807. static int
  808. ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype)
  809. {
  810. struct ath5k_hw *ah = sc->ah;
  811. struct ath5k_txq *txq;
  812. struct ath5k_txq_info qi = {
  813. .tqi_subtype = subtype,
  814. .tqi_aifs = AR5K_TXQ_USEDEFAULT,
  815. .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
  816. .tqi_cw_max = AR5K_TXQ_USEDEFAULT
  817. };
  818. int qnum;
  819. /*
  820. * Enable interrupts only for EOL and DESC conditions.
  821. * We mark tx descriptors to receive a DESC interrupt
  822. * when a tx queue gets deep; otherwise waiting for the
  823. * EOL to reap descriptors. Note that this is done to
  824. * reduce interrupt load and this only defers reaping
  825. * descriptors, never transmitting frames. Aside from
  826. * reducing interrupts this also permits more concurrency.
  827. * The only potential downside is if the tx queue backs
  828. * up in which case the top half of the kernel may backup
  829. * due to a lack of tx descriptors.
  830. */
  831. qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
  832. AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
  833. qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
  834. if (qnum < 0) {
  835. DBG("ath5k: can't set up a TX queue\n");
  836. return -EIO;
  837. }
  838. txq = &sc->txq;
  839. if (!txq->setup) {
  840. txq->qnum = qnum;
  841. txq->link = NULL;
  842. INIT_LIST_HEAD(&txq->q);
  843. txq->setup = 1;
  844. }
  845. return 0;
  846. }
  847. static void
  848. ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
  849. {
  850. struct ath5k_buf *bf, *bf0;
  851. list_for_each_entry_safe(bf, bf0, &txq->q, list) {
  852. ath5k_txbuf_free(sc, bf);
  853. list_del(&bf->list);
  854. list_add_tail(&bf->list, &sc->txbuf);
  855. sc->txbuf_len++;
  856. }
  857. txq->link = NULL;
  858. }
  859. /*
  860. * Drain the transmit queues and reclaim resources.
  861. */
  862. static void
  863. ath5k_txq_cleanup(struct ath5k_softc *sc)
  864. {
  865. struct ath5k_hw *ah = sc->ah;
  866. if (!(sc->status & ATH_STAT_INVALID)) {
  867. /* don't touch the hardware if marked invalid */
  868. if (sc->txq.setup) {
  869. ath5k_hw_stop_tx_dma(ah, sc->txq.qnum);
  870. DBG("ath5k: txq [%d] %x, link %p\n",
  871. sc->txq.qnum,
  872. ath5k_hw_get_txdp(ah, sc->txq.qnum),
  873. sc->txq.link);
  874. }
  875. }
  876. if (sc->txq.setup)
  877. ath5k_txq_drainq(sc, &sc->txq);
  878. }
  879. static void
  880. ath5k_txq_release(struct ath5k_softc *sc)
  881. {
  882. if (sc->txq.setup) {
  883. ath5k_hw_release_tx_queue(sc->ah);
  884. sc->txq.setup = 0;
  885. }
  886. }
  887. /*************\
  888. * RX Handling *
  889. \*************/
  890. /*
  891. * Enable the receive h/w following a reset.
  892. */
  893. static int
  894. ath5k_rx_start(struct ath5k_softc *sc)
  895. {
  896. struct ath5k_hw *ah = sc->ah;
  897. struct ath5k_buf *bf;
  898. int ret;
  899. sc->rxbufsize = IEEE80211_MAX_LEN;
  900. if (sc->rxbufsize % sc->cachelsz != 0)
  901. sc->rxbufsize += sc->cachelsz - (sc->rxbufsize % sc->cachelsz);
  902. sc->rxlink = NULL;
  903. list_for_each_entry(bf, &sc->rxbuf, list) {
  904. ret = ath5k_rxbuf_setup(sc, bf);
  905. if (ret != 0)
  906. return ret;
  907. }
  908. bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
  909. ath5k_hw_set_rxdp(ah, bf->daddr);
  910. ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
  911. ath5k_mode_setup(sc); /* set filters, etc. */
  912. ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
  913. return 0;
  914. }
  915. /*
  916. * Disable the receive h/w in preparation for a reset.
  917. */
  918. static void
  919. ath5k_rx_stop(struct ath5k_softc *sc)
  920. {
  921. struct ath5k_hw *ah = sc->ah;
  922. ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
  923. ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
  924. ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
  925. sc->rxlink = NULL; /* just in case */
  926. }
  927. static void
  928. ath5k_handle_rx(struct ath5k_softc *sc)
  929. {
  930. struct ath5k_rx_status rs;
  931. struct io_buffer *iob, *next_iob;
  932. u32 next_iob_addr;
  933. struct ath5k_buf *bf, *bf_last;
  934. struct ath5k_desc *ds;
  935. int ret;
  936. memset(&rs, 0, sizeof(rs));
  937. if (list_empty(&sc->rxbuf)) {
  938. DBG("ath5k: empty rx buf pool\n");
  939. return;
  940. }
  941. bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
  942. do {
  943. bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
  944. assert(bf->iob != NULL);
  945. iob = bf->iob;
  946. ds = bf->desc;
  947. /*
  948. * last buffer must not be freed to ensure proper hardware
  949. * function. When the hardware finishes also a packet next to
  950. * it, we are sure, it doesn't use it anymore and we can go on.
  951. */
  952. if (bf_last == bf)
  953. bf->flags |= 1;
  954. if (bf->flags) {
  955. struct ath5k_buf *bf_next = list_entry(bf->list.next,
  956. struct ath5k_buf, list);
  957. ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
  958. &rs);
  959. if (ret)
  960. break;
  961. bf->flags &= ~1;
  962. /* skip the overwritten one (even status is martian) */
  963. goto next;
  964. }
  965. ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
  966. if (ret) {
  967. if (ret != -EINPROGRESS) {
  968. DBG("ath5k: error in processing rx desc: %s\n",
  969. strerror(ret));
  970. net80211_rx_err(sc->dev, NULL, -ret);
  971. } else {
  972. /* normal return, reached end of
  973. available descriptors */
  974. }
  975. return;
  976. }
  977. if (rs.rs_more) {
  978. DBG("ath5k: unsupported fragmented rx\n");
  979. goto next;
  980. }
  981. if (rs.rs_status) {
  982. if (rs.rs_status & AR5K_RXERR_PHY) {
  983. /* These are uncommon, and may indicate a real problem. */
  984. net80211_rx_err(sc->dev, NULL, EIO);
  985. goto next;
  986. }
  987. if (rs.rs_status & AR5K_RXERR_CRC) {
  988. /* These occur *all the time*. */
  989. goto next;
  990. }
  991. if (rs.rs_status & AR5K_RXERR_DECRYPT) {
  992. /*
  993. * Decrypt error. If the error occurred
  994. * because there was no hardware key, then
  995. * let the frame through so the upper layers
  996. * can process it. This is necessary for 5210
  997. * parts which have no way to setup a ``clear''
  998. * key cache entry.
  999. *
  1000. * XXX do key cache faulting
  1001. */
  1002. if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
  1003. !(rs.rs_status & AR5K_RXERR_CRC))
  1004. goto accept;
  1005. }
  1006. /* any other error, unhandled */
  1007. DBG("ath5k: packet rx status %x\n", rs.rs_status);
  1008. goto next;
  1009. }
  1010. accept:
  1011. next_iob = ath5k_rx_iob_alloc(sc, &next_iob_addr);
  1012. /*
  1013. * If we can't replace bf->iob with a new iob under memory
  1014. * pressure, just skip this packet
  1015. */
  1016. if (!next_iob) {
  1017. DBG("ath5k: dropping packet under memory pressure\n");
  1018. goto next;
  1019. }
  1020. iob_put(iob, rs.rs_datalen);
  1021. /* The MAC header is padded to have 32-bit boundary if the
  1022. * packet payload is non-zero. However, iPXE only
  1023. * supports standard 802.11 packets with 24-byte
  1024. * header, so no padding correction should be needed.
  1025. */
  1026. DBG2("ath5k: rx %d bytes, signal %d\n", rs.rs_datalen,
  1027. rs.rs_rssi);
  1028. net80211_rx(sc->dev, iob, rs.rs_rssi,
  1029. ath5k_hw_rix_to_bitrate(rs.rs_rate));
  1030. bf->iob = next_iob;
  1031. bf->iobaddr = next_iob_addr;
  1032. next:
  1033. list_del(&bf->list);
  1034. list_add_tail(&bf->list, &sc->rxbuf);
  1035. } while (ath5k_rxbuf_setup(sc, bf) == 0);
  1036. }
  1037. /*************\
  1038. * TX Handling *
  1039. \*************/
  1040. static void
  1041. ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
  1042. {
  1043. struct ath5k_tx_status ts;
  1044. struct ath5k_buf *bf, *bf0;
  1045. struct ath5k_desc *ds;
  1046. struct io_buffer *iob;
  1047. int ret;
  1048. memset(&ts, 0, sizeof(ts));
  1049. list_for_each_entry_safe(bf, bf0, &txq->q, list) {
  1050. ds = bf->desc;
  1051. ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
  1052. if (ret) {
  1053. if (ret != -EINPROGRESS) {
  1054. DBG("ath5k: error in processing tx desc: %s\n",
  1055. strerror(ret));
  1056. } else {
  1057. /* normal return, reached end of tx completions */
  1058. }
  1059. break;
  1060. }
  1061. iob = bf->iob;
  1062. bf->iob = NULL;
  1063. DBG2("ath5k: tx %zd bytes complete, %d retries\n",
  1064. iob_len(iob), ts.ts_retry[0]);
  1065. net80211_tx_complete(sc->dev, iob, ts.ts_retry[0],
  1066. ts.ts_status ? EIO : 0);
  1067. list_del(&bf->list);
  1068. list_add_tail(&bf->list, &sc->txbuf);
  1069. sc->txbuf_len++;
  1070. }
  1071. if (list_empty(&txq->q))
  1072. txq->link = NULL;
  1073. }
  1074. static void
  1075. ath5k_handle_tx(struct ath5k_softc *sc)
  1076. {
  1077. ath5k_tx_processq(sc, &sc->txq);
  1078. }
  1079. /********************\
  1080. * Interrupt handling *
  1081. \********************/
  1082. static void
  1083. ath5k_irq(struct net80211_device *dev, int enable)
  1084. {
  1085. struct ath5k_softc *sc = dev->priv;
  1086. struct ath5k_hw *ah = sc->ah;
  1087. sc->irq_ena = enable;
  1088. ah->ah_ier = enable ? AR5K_IER_ENABLE : AR5K_IER_DISABLE;
  1089. ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
  1090. ath5k_hw_set_imr(ah, sc->imask);
  1091. }
  1092. static int
  1093. ath5k_init(struct ath5k_softc *sc)
  1094. {
  1095. struct ath5k_hw *ah = sc->ah;
  1096. int ret, i;
  1097. /*
  1098. * Stop anything previously setup. This is safe
  1099. * no matter this is the first time through or not.
  1100. */
  1101. ath5k_stop_hw(sc);
  1102. /*
  1103. * The basic interface to setting the hardware in a good
  1104. * state is ``reset''. On return the hardware is known to
  1105. * be powered up and with interrupts disabled. This must
  1106. * be followed by initialization of the appropriate bits
  1107. * and then setup of the interrupt mask.
  1108. */
  1109. sc->curchan = sc->dev->channels + sc->dev->channel;
  1110. sc->curband = sc->curchan->band;
  1111. sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
  1112. AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
  1113. AR5K_INT_FATAL | AR5K_INT_GLOBAL;
  1114. ret = ath5k_reset(sc, NULL);
  1115. if (ret)
  1116. goto done;
  1117. ath5k_rfkill_hw_start(ah);
  1118. /*
  1119. * Reset the key cache since some parts do not reset the
  1120. * contents on initial power up or resume from suspend.
  1121. */
  1122. for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
  1123. ath5k_hw_reset_key(ah, i);
  1124. /* Set ack to be sent at low bit-rates */
  1125. ath5k_hw_set_ack_bitrate_high(ah, 0);
  1126. ret = 0;
  1127. done:
  1128. mb();
  1129. return ret;
  1130. }
  1131. static int
  1132. ath5k_stop_hw(struct ath5k_softc *sc)
  1133. {
  1134. struct ath5k_hw *ah = sc->ah;
  1135. /*
  1136. * Shutdown the hardware and driver:
  1137. * stop output from above
  1138. * disable interrupts
  1139. * turn off timers
  1140. * turn off the radio
  1141. * clear transmit machinery
  1142. * clear receive machinery
  1143. * drain and release tx queues
  1144. * reclaim beacon resources
  1145. * power down hardware
  1146. *
  1147. * Note that some of this work is not possible if the
  1148. * hardware is gone (invalid).
  1149. */
  1150. if (!(sc->status & ATH_STAT_INVALID)) {
  1151. ath5k_hw_set_imr(ah, 0);
  1152. }
  1153. ath5k_txq_cleanup(sc);
  1154. if (!(sc->status & ATH_STAT_INVALID)) {
  1155. ath5k_rx_stop(sc);
  1156. ath5k_hw_phy_disable(ah);
  1157. } else
  1158. sc->rxlink = NULL;
  1159. ath5k_rfkill_hw_stop(sc->ah);
  1160. return 0;
  1161. }
  1162. static void
  1163. ath5k_poll(struct net80211_device *dev)
  1164. {
  1165. struct ath5k_softc *sc = dev->priv;
  1166. struct ath5k_hw *ah = sc->ah;
  1167. enum ath5k_int status;
  1168. unsigned int counter = 1000;
  1169. if (currticks() - sc->last_calib_ticks >
  1170. ATH5K_CALIB_INTERVAL * TICKS_PER_SEC) {
  1171. ath5k_calibrate(sc);
  1172. sc->last_calib_ticks = currticks();
  1173. }
  1174. if ((sc->status & ATH_STAT_INVALID) ||
  1175. (sc->irq_ena && !ath5k_hw_is_intr_pending(ah)))
  1176. return;
  1177. do {
  1178. ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
  1179. DBGP("ath5k: status %#x/%#x\n", status, sc->imask);
  1180. if (status & AR5K_INT_FATAL) {
  1181. /*
  1182. * Fatal errors are unrecoverable.
  1183. * Typically these are caused by DMA errors.
  1184. */
  1185. DBG("ath5k: fatal error, resetting\n");
  1186. ath5k_reset_wake(sc);
  1187. } else if (status & AR5K_INT_RXORN) {
  1188. DBG("ath5k: rx overrun, resetting\n");
  1189. ath5k_reset_wake(sc);
  1190. } else {
  1191. if (status & AR5K_INT_RXEOL) {
  1192. /*
  1193. * NB: the hardware should re-read the link when
  1194. * RXE bit is written, but it doesn't work at
  1195. * least on older hardware revs.
  1196. */
  1197. DBG("ath5k: rx EOL\n");
  1198. sc->rxlink = NULL;
  1199. }
  1200. if (status & AR5K_INT_TXURN) {
  1201. /* bump tx trigger level */
  1202. DBG("ath5k: tx underrun\n");
  1203. ath5k_hw_update_tx_triglevel(ah, 1);
  1204. }
  1205. if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
  1206. ath5k_handle_rx(sc);
  1207. if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
  1208. | AR5K_INT_TXERR | AR5K_INT_TXEOL))
  1209. ath5k_handle_tx(sc);
  1210. }
  1211. } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
  1212. if (!counter)
  1213. DBG("ath5k: too many interrupts, giving up for now\n");
  1214. }
  1215. /*
  1216. * Periodically recalibrate the PHY to account
  1217. * for temperature/environment changes.
  1218. */
  1219. static void
  1220. ath5k_calibrate(struct ath5k_softc *sc)
  1221. {
  1222. struct ath5k_hw *ah = sc->ah;
  1223. if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
  1224. /*
  1225. * Rfgain is out of bounds, reset the chip
  1226. * to load new gain values.
  1227. */
  1228. DBG("ath5k: resetting for calibration\n");
  1229. ath5k_reset_wake(sc);
  1230. }
  1231. if (ath5k_hw_phy_calibrate(ah, sc->curchan))
  1232. DBG("ath5k: calibration of channel %d failed\n",
  1233. sc->curchan->channel_nr);
  1234. }
  1235. /********************\
  1236. * Net80211 functions *
  1237. \********************/
  1238. static int
  1239. ath5k_tx(struct net80211_device *dev, struct io_buffer *iob)
  1240. {
  1241. struct ath5k_softc *sc = dev->priv;
  1242. struct ath5k_buf *bf;
  1243. int rc;
  1244. /*
  1245. * The hardware expects the header padded to 4 byte boundaries.
  1246. * iPXE only ever sends 24-byte headers, so no action necessary.
  1247. */
  1248. if (list_empty(&sc->txbuf)) {
  1249. DBG("ath5k: dropping packet because no tx bufs available\n");
  1250. return -ENOBUFS;
  1251. }
  1252. bf = list_entry(sc->txbuf.next, struct ath5k_buf, list);
  1253. list_del(&bf->list);
  1254. sc->txbuf_len--;
  1255. bf->iob = iob;
  1256. if ((rc = ath5k_txbuf_setup(sc, bf)) != 0) {
  1257. bf->iob = NULL;
  1258. list_add_tail(&bf->list, &sc->txbuf);
  1259. sc->txbuf_len++;
  1260. return rc;
  1261. }
  1262. return 0;
  1263. }
  1264. /*
  1265. * Reset the hardware. If chan is not NULL, then also pause rx/tx
  1266. * and change to the given channel.
  1267. */
  1268. static int
  1269. ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan)
  1270. {
  1271. struct ath5k_hw *ah = sc->ah;
  1272. int ret;
  1273. if (chan) {
  1274. ath5k_hw_set_imr(ah, 0);
  1275. ath5k_txq_cleanup(sc);
  1276. ath5k_rx_stop(sc);
  1277. sc->curchan = chan;
  1278. sc->curband = chan->band;
  1279. }
  1280. ret = ath5k_hw_reset(ah, sc->curchan, 1);
  1281. if (ret) {
  1282. DBG("ath5k: can't reset hardware: %s\n", strerror(ret));
  1283. return ret;
  1284. }
  1285. ret = ath5k_rx_start(sc);
  1286. if (ret) {
  1287. DBG("ath5k: can't start rx logic: %s\n", strerror(ret));
  1288. return ret;
  1289. }
  1290. /*
  1291. * Change channels and update the h/w rate map if we're switching;
  1292. * e.g. 11a to 11b/g.
  1293. *
  1294. * We may be doing a reset in response to an ioctl that changes the
  1295. * channel so update any state that might change as a result.
  1296. *
  1297. * XXX needed?
  1298. */
  1299. /* ath5k_chan_change(sc, c); */
  1300. /* Reenable interrupts if necessary */
  1301. ath5k_irq(sc->dev, sc->irq_ena);
  1302. return 0;
  1303. }
  1304. static int ath5k_reset_wake(struct ath5k_softc *sc)
  1305. {
  1306. return ath5k_reset(sc, sc->curchan);
  1307. }
  1308. static int ath5k_start(struct net80211_device *dev)
  1309. {
  1310. struct ath5k_softc *sc = dev->priv;
  1311. int ret;
  1312. if ((ret = ath5k_init(sc)) != 0)
  1313. return ret;
  1314. sc->assoc = 0;
  1315. ath5k_configure_filter(sc);
  1316. ath5k_hw_set_lladdr(sc->ah, dev->netdev->ll_addr);
  1317. return 0;
  1318. }
  1319. static void ath5k_stop(struct net80211_device *dev)
  1320. {
  1321. struct ath5k_softc *sc = dev->priv;
  1322. u8 mac[ETH_ALEN] = {};
  1323. ath5k_hw_set_lladdr(sc->ah, mac);
  1324. ath5k_stop_hw(sc);
  1325. }
  1326. static int
  1327. ath5k_config(struct net80211_device *dev, int changed)
  1328. {
  1329. struct ath5k_softc *sc = dev->priv;
  1330. struct ath5k_hw *ah = sc->ah;
  1331. struct net80211_channel *chan = &dev->channels[dev->channel];
  1332. int ret;
  1333. if (changed & NET80211_CFG_CHANNEL) {
  1334. sc->power_level = chan->maxpower;
  1335. if ((ret = ath5k_chan_set(sc, chan)) != 0)
  1336. return ret;
  1337. }
  1338. if ((changed & NET80211_CFG_RATE) ||
  1339. (changed & NET80211_CFG_PHY_PARAMS)) {
  1340. int spmbl = ATH5K_SPMBL_NO;
  1341. u16 rate = dev->rates[dev->rate];
  1342. u16 slowrate = dev->rates[dev->rtscts_rate];
  1343. int i;
  1344. if (dev->phy_flags & NET80211_PHY_USE_SHORT_PREAMBLE)
  1345. spmbl = ATH5K_SPMBL_YES;
  1346. for (i = 0; i < ATH5K_NR_RATES; i++) {
  1347. if (ath5k_rates[i].bitrate == rate &&
  1348. (ath5k_rates[i].short_pmbl & spmbl))
  1349. sc->hw_rate = ath5k_rates[i].hw_code;
  1350. if (ath5k_rates[i].bitrate == slowrate &&
  1351. (ath5k_rates[i].short_pmbl & spmbl))
  1352. sc->hw_rtscts_rate = ath5k_rates[i].hw_code;
  1353. }
  1354. }
  1355. if (changed & NET80211_CFG_ASSOC) {
  1356. sc->assoc = !!(dev->state & NET80211_ASSOCIATED);
  1357. if (sc->assoc) {
  1358. memcpy(ah->ah_bssid, dev->bssid, ETH_ALEN);
  1359. } else {
  1360. memset(ah->ah_bssid, 0xff, ETH_ALEN);
  1361. }
  1362. ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
  1363. }
  1364. return 0;
  1365. }
  1366. /*
  1367. * o always accept unicast, broadcast, and multicast traffic
  1368. * o multicast traffic for all BSSIDs will be enabled if mac80211
  1369. * says it should be
  1370. * o maintain current state of phy ofdm or phy cck error reception.
  1371. * If the hardware detects any of these type of errors then
  1372. * ath5k_hw_get_rx_filter() will pass to us the respective
  1373. * hardware filters to be able to receive these type of frames.
  1374. * o probe request frames are accepted only when operating in
  1375. * hostap, adhoc, or monitor modes
  1376. * o enable promiscuous mode according to the interface state
  1377. * o accept beacons:
  1378. * - when operating in adhoc mode so the 802.11 layer creates
  1379. * node table entries for peers,
  1380. * - when operating in station mode for collecting rssi data when
  1381. * the station is otherwise quiet, or
  1382. * - when scanning
  1383. */
  1384. static void ath5k_configure_filter(struct ath5k_softc *sc)
  1385. {
  1386. struct ath5k_hw *ah = sc->ah;
  1387. u32 mfilt[2], rfilt;
  1388. /* Enable all multicast */
  1389. mfilt[0] = ~0;
  1390. mfilt[1] = ~0;
  1391. /* Enable data frames and beacons */
  1392. rfilt = (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
  1393. AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_BEACON);
  1394. /* Set filters */
  1395. ath5k_hw_set_rx_filter(ah, rfilt);
  1396. /* Set multicast bits */
  1397. ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
  1398. /* Set the cached hw filter flags, this will alter actually
  1399. * be set in HW */
  1400. sc->filter_flags = rfilt;
  1401. }