You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ath9k_xmit.c 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
  5. * Original from Linux kernel 3.0.1
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for any
  8. * purpose with or without fee is hereby granted, provided that the above
  9. * copyright notice and this permission notice appear in all copies.
  10. *
  11. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  12. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  13. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  14. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  15. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <ipxe/io.h>
  20. #include "ath9k.h"
  21. #include "ar9003_mac.h"
  22. #define BITS_PER_BYTE 8
  23. #define OFDM_PLCP_BITS 22
  24. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  25. #define L_STF 8
  26. #define L_LTF 8
  27. #define L_SIG 4
  28. #define HT_SIG 8
  29. #define HT_STF 4
  30. #define HT_LTF(_ns) (4 * (_ns))
  31. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  32. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  33. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  34. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  35. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  36. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  37. struct ath_atx_tid *tid,
  38. struct list_head *bf_head);
  39. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  40. struct ath_txq *txq, struct list_head *bf_q,
  41. struct ath_tx_status *ts, int txok, int sendbar);
  42. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  43. struct list_head *head);
  44. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
  45. enum {
  46. MCS_HT20,
  47. MCS_HT20_SGI,
  48. MCS_HT40,
  49. MCS_HT40_SGI,
  50. };
  51. /*********************/
  52. /* Aggregation logic */
  53. /*********************/
  54. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  55. {
  56. struct ath_atx_ac *ac = tid->ac;
  57. if (tid->paused)
  58. return;
  59. if (tid->sched)
  60. return;
  61. tid->sched = 1;
  62. list_add_tail(&tid->list, &ac->tid_q);
  63. if (ac->sched)
  64. return;
  65. ac->sched = 1;
  66. list_add_tail(&ac->list, &txq->axq_acq);
  67. }
  68. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  69. {
  70. struct ath_buf *bf = NULL;
  71. if (list_empty(&sc->tx.txbuf)) {
  72. return NULL;
  73. }
  74. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  75. list_del(&bf->list);
  76. return bf;
  77. }
  78. static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
  79. {
  80. list_add_tail(&bf->list, &sc->tx.txbuf);
  81. }
  82. /********************/
  83. /* Queue Management */
  84. /********************/
  85. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  86. {
  87. struct ath_hw *ah = sc->sc_ah;
  88. struct ath9k_tx_queue_info qi;
  89. static const int subtype_txq_to_hwq[] = {
  90. [WME_AC_BE] = ATH_TXQ_AC_BE,
  91. };
  92. int axq_qnum, i;
  93. memset(&qi, 0, sizeof(qi));
  94. qi.tqi_subtype = subtype_txq_to_hwq[subtype];
  95. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  96. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  97. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  98. qi.tqi_physCompBuf = 0;
  99. /*
  100. * Enable interrupts only for EOL and DESC conditions.
  101. * We mark tx descriptors to receive a DESC interrupt
  102. * when a tx queue gets deep; otherwise waiting for the
  103. * EOL to reap descriptors. Note that this is done to
  104. * reduce interrupt load and this only defers reaping
  105. * descriptors, never transmitting frames. Aside from
  106. * reducing interrupts this also permits more concurrency.
  107. * The only potential downside is if the tx queue backs
  108. * up in which case the top half of the kernel may backup
  109. * due to a lack of tx descriptors.
  110. *
  111. * The UAPSD queue is an exception, since we take a desc-
  112. * based intr on the EOSP frames.
  113. */
  114. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  115. TXQ_FLAG_TXDESCINT_ENABLE;
  116. axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  117. if (axq_qnum == -1) {
  118. /*
  119. * NB: don't print a message, this happens
  120. * normally on parts with too few tx queues
  121. */
  122. return NULL;
  123. }
  124. if ((unsigned int)axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
  125. DBG("ath9k: qnum %d out of range, max %zd!\n",
  126. axq_qnum, ARRAY_SIZE(sc->tx.txq));
  127. ath9k_hw_releasetxqueue(ah, axq_qnum);
  128. return NULL;
  129. }
  130. if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
  131. struct ath_txq *txq = &sc->tx.txq[axq_qnum];
  132. txq->axq_qnum = axq_qnum;
  133. txq->mac80211_qnum = -1;
  134. txq->axq_link = NULL;
  135. INIT_LIST_HEAD(&txq->axq_q);
  136. INIT_LIST_HEAD(&txq->axq_acq);
  137. txq->axq_depth = 0;
  138. txq->axq_ampdu_depth = 0;
  139. txq->axq_tx_inprogress = 0;
  140. sc->tx.txqsetup |= 1<<axq_qnum;
  141. txq->txq_headidx = txq->txq_tailidx = 0;
  142. for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
  143. INIT_LIST_HEAD(&txq->txq_fifo[i]);
  144. INIT_LIST_HEAD(&txq->txq_fifo_pending);
  145. }
  146. return &sc->tx.txq[axq_qnum];
  147. }
  148. /*
  149. * Drain a given TX queue (could be Beacon or Data)
  150. *
  151. * This assumes output has been stopped and
  152. * we do not need to block ath_tx_tasklet.
  153. */
  154. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
  155. {
  156. struct ath_buf *bf, *lastbf __unused;
  157. struct list_head bf_head;
  158. struct ath_tx_status ts;
  159. memset(&ts, 0, sizeof(ts));
  160. INIT_LIST_HEAD(&bf_head);
  161. for (;;) {
  162. if (list_empty(&txq->axq_q)) {
  163. txq->axq_link = NULL;
  164. break;
  165. }
  166. bf = list_first_entry(&txq->axq_q, struct ath_buf,
  167. list);
  168. if (bf->bf_stale) {
  169. list_del(&bf->list);
  170. ath_tx_return_buffer(sc, bf);
  171. continue;
  172. }
  173. lastbf = bf->bf_lastbf;
  174. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  175. txq->axq_depth--;
  176. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  177. }
  178. txq->axq_tx_inprogress = 0;
  179. }
  180. int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
  181. {
  182. struct ath_hw *ah = sc->sc_ah;
  183. struct ath_txq *txq;
  184. int i, npend = 0;
  185. if (sc->sc_flags & SC_OP_INVALID)
  186. return 1;
  187. ath9k_hw_abort_tx_dma(ah);
  188. /* Check if any queue remains active */
  189. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  190. if (!ATH_TXQ_SETUP(sc, i))
  191. continue;
  192. npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
  193. }
  194. if (npend)
  195. DBG("ath9k: Failed to stop TX DMA!\n");
  196. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  197. if (!ATH_TXQ_SETUP(sc, i))
  198. continue;
  199. /*
  200. * The caller will resume queues with ieee80211_wake_queues.
  201. * Mark the queue as not stopped to prevent ath_tx_complete
  202. * from waking the queue too early.
  203. */
  204. txq = &sc->tx.txq[i];
  205. txq->stopped = 0;
  206. ath_draintxq(sc, txq, retry_tx);
  207. }
  208. return !npend;
  209. }
  210. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  211. {
  212. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  213. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  214. }
  215. /* For each axq_acq entry, for each tid, try to schedule packets
  216. * for transmit until ampdu_depth has reached min Q depth.
  217. */
  218. void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
  219. {
  220. struct ath_atx_ac *ac, *ac_tmp, *last_ac;
  221. struct ath_atx_tid *tid, *last_tid;
  222. if (list_empty(&txq->axq_acq) ||
  223. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  224. return;
  225. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  226. last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
  227. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  228. last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
  229. list_del(&ac->list);
  230. ac->sched = 0;
  231. while (!list_empty(&ac->tid_q)) {
  232. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
  233. list);
  234. list_del(&tid->list);
  235. tid->sched = 0;
  236. if (tid->paused)
  237. continue;
  238. /*
  239. * add tid to round-robin queue if more frames
  240. * are pending for the tid
  241. */
  242. if (!list_empty(&tid->buf_q))
  243. ath_tx_queue_tid(txq, tid);
  244. if (tid == last_tid ||
  245. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  246. break;
  247. }
  248. if (!list_empty(&ac->tid_q)) {
  249. if (!ac->sched) {
  250. ac->sched = 1;
  251. list_add_tail(&ac->list, &txq->axq_acq);
  252. }
  253. }
  254. if (ac == last_ac ||
  255. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  256. return;
  257. }
  258. }
  259. /***********/
  260. /* TX, DMA */
  261. /***********/
  262. /*
  263. * Insert a chain of ath_buf (descriptors) on a txq and
  264. * assume the descriptors are already chained together by caller.
  265. */
  266. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  267. struct list_head *head)
  268. {
  269. struct ath_hw *ah = sc->sc_ah;
  270. struct ath_buf *bf;
  271. /*
  272. * Insert the frame on the outbound list and
  273. * pass it on to the hardware.
  274. */
  275. if (list_empty(head))
  276. return;
  277. bf = list_first_entry(head, struct ath_buf, list);
  278. DBGIO("ath9k: "
  279. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  280. list_splice_tail_init(head, &txq->axq_q);
  281. if (txq->axq_link == NULL) {
  282. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  283. DBGIO("ath9k: TXDP[%d] = %llx (%p)\n",
  284. txq->axq_qnum, ito64(bf->bf_daddr),
  285. bf->bf_desc);
  286. } else {
  287. *txq->axq_link = bf->bf_daddr;
  288. DBGIO("ath9k: "
  289. "link[%d] (%p)=%llx (%p)\n",
  290. txq->axq_qnum, txq->axq_link,
  291. ito64(bf->bf_daddr), bf->bf_desc);
  292. }
  293. ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
  294. &txq->axq_link);
  295. ath9k_hw_txstart(ah, txq->axq_qnum);
  296. txq->axq_depth++;
  297. }
  298. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  299. struct ath_atx_tid *tid,
  300. struct list_head *bf_head)
  301. {
  302. struct ath_buf *bf;
  303. bf = list_first_entry(bf_head, struct ath_buf, list);
  304. bf->bf_state.bf_type &= ~BUF_AMPDU;
  305. /* update starting sequence number for subsequent ADDBA request */
  306. if (tid)
  307. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  308. bf->bf_lastbf = bf;
  309. ath_buf_set_rate(sc, bf, iob_len(bf->bf_mpdu) + FCS_LEN);
  310. ath_tx_txqaddbuf(sc, txq, bf_head);
  311. }
  312. static enum ath9k_pkt_type get_hw_packet_type(struct io_buffer *iob)
  313. {
  314. struct ieee80211_frame *hdr;
  315. enum ath9k_pkt_type htype;
  316. u16 fc;
  317. hdr = (struct ieee80211_frame *)iob->data;
  318. fc = hdr->fc;
  319. if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_BEACON))
  320. htype = ATH9K_PKT_TYPE_BEACON;
  321. else if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_PROBE_RESP))
  322. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  323. else
  324. htype = ATH9K_PKT_TYPE_NORMAL;
  325. return htype;
  326. }
  327. static int setup_tx_flags(struct io_buffer *iob __unused)
  328. {
  329. int flags = 0;
  330. flags |= ATH9K_TXDESC_INTREQ;
  331. return flags;
  332. }
  333. u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
  334. {
  335. struct ath_hw *ah = sc->sc_ah;
  336. struct ath9k_channel *curchan = ah->curchan;
  337. if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
  338. (curchan->channelFlags & CHANNEL_5GHZ) &&
  339. (chainmask == 0x7) && (rate < 0x90))
  340. return 0x3;
  341. else
  342. return chainmask;
  343. }
  344. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
  345. {
  346. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  347. struct ath9k_11n_rate_series series[4];
  348. const struct ath9k_legacy_rate *rate;
  349. int i, flags = 0;
  350. u8 rix = 0, ctsrate = 0;
  351. int is_pspoll;
  352. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  353. is_pspoll = 0;
  354. /*
  355. * We check if Short Preamble is needed for the CTS rate by
  356. * checking the BSS's global flag.
  357. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  358. */
  359. rate = &sc->rates[sc->hw_rix];
  360. ctsrate = rate->hw_value;
  361. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  362. ctsrate |= rate->hw_value_short;
  363. for (i = 0; i < 4; i++) {
  364. int is_40 __unused, is_sgi __unused, is_sp;
  365. int phy;
  366. rix = sc->hw_rix;
  367. series[i].Tries = ATH_TXMAXTRY;
  368. if (sc->sc_flags & SC_OP_PROTECT_ENABLE) {
  369. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  370. flags |= ATH9K_TXDESC_CTSENA;
  371. }
  372. is_sp = !!(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  373. /* legacy rates */
  374. if ((sc->dev->channels + sc->dev->channel)->band == NET80211_BAND_2GHZ)
  375. phy = CHANNEL_CCK;
  376. else
  377. phy = CHANNEL_OFDM;
  378. series[i].Rate = rate->hw_value;
  379. if (rate->hw_value_short && (sc->sc_flags & SC_OP_PREAMBLE_SHORT)) {
  380. if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  381. series[i].Rate |= rate->hw_value_short;
  382. } else {
  383. is_sp = 0;
  384. }
  385. if (bf->bf_state.bfs_paprd)
  386. series[i].ChSel = common->tx_chainmask;
  387. else
  388. series[i].ChSel = ath_txchainmask_reduction(sc,
  389. common->tx_chainmask, series[i].Rate);
  390. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  391. phy, rate->bitrate * 100, len, rix, is_sp);
  392. }
  393. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  394. if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
  395. flags &= ~ATH9K_TXDESC_RTSENA;
  396. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  397. if (flags & ATH9K_TXDESC_RTSENA)
  398. flags &= ~ATH9K_TXDESC_CTSENA;
  399. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  400. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  401. bf->bf_lastbf->bf_desc,
  402. !is_pspoll, ctsrate,
  403. 0, series, 4, flags);
  404. }
  405. static struct ath_buf *ath_tx_setup_buffer(struct net80211_device *dev,
  406. struct ath_txq *txq,
  407. struct io_buffer *iob)
  408. {
  409. struct ath_softc *sc = dev->priv;
  410. struct ath_hw *ah = sc->sc_ah;
  411. struct ath_buf *bf;
  412. struct ath_desc *ds;
  413. int frm_type;
  414. static const enum ath9k_key_type net80211_keytype_to_ath[] = {
  415. [NET80211_CRYPT_NONE] = ATH9K_KEY_TYPE_CLEAR,
  416. [NET80211_CRYPT_WEP] = ATH9K_KEY_TYPE_WEP,
  417. [NET80211_CRYPT_TKIP] = ATH9K_KEY_TYPE_TKIP,
  418. [NET80211_CRYPT_CCMP] = ATH9K_KEY_TYPE_AES,
  419. [NET80211_CRYPT_UNKNOWN] = ATH9K_KEY_TYPE_CLEAR,
  420. };
  421. bf = ath_tx_get_buffer(sc);
  422. if (!bf) {
  423. DBG("ath9k: TX buffers are full\n");
  424. return NULL;
  425. }
  426. ATH_TXBUF_RESET(bf);
  427. bf->bf_flags = setup_tx_flags(iob);
  428. bf->bf_mpdu = iob;
  429. bf->bf_buf_addr = virt_to_bus(iob->data);
  430. frm_type = get_hw_packet_type(iob);
  431. ds = bf->bf_desc;
  432. ath9k_hw_set_desc_link(ah, ds, 0);
  433. ath9k_hw_set11n_txdesc(ah, ds, iob_len(iob) + FCS_LEN, frm_type, MAX_RATE_POWER,
  434. ATH9K_TXKEYIX_INVALID, net80211_keytype_to_ath[dev->crypto->algorithm], bf->bf_flags);
  435. ath9k_hw_filltxdesc(ah, ds,
  436. iob_len(iob), /* segment length */
  437. 1, /* first segment */
  438. 1, /* last segment */
  439. ds, /* first descriptor */
  440. bf->bf_buf_addr,
  441. txq->axq_qnum);
  442. return bf;
  443. }
  444. /* FIXME: tx power */
  445. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  446. struct ath_tx_control *txctl)
  447. {
  448. struct list_head bf_head;
  449. struct ath_atx_tid *tid = NULL;
  450. INIT_LIST_HEAD(&bf_head);
  451. list_add_tail(&bf->list, &bf_head);
  452. bf->bf_state.bfs_paprd = txctl->paprd;
  453. if (txctl->paprd)
  454. bf->bf_state.bfs_paprd_timestamp = ( currticks() * 1000 ) / TICKS_PER_SEC;
  455. ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, 1);
  456. ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
  457. }
  458. /* Upon failure caller should free iob */
  459. int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob,
  460. struct ath_tx_control *txctl)
  461. {
  462. struct ath_softc *sc = dev->priv;
  463. struct ath_txq *txq = txctl->txq;
  464. struct ath_buf *bf;
  465. int q;
  466. /*
  467. * At this point, the vif, hw_key and sta pointers in the tx control
  468. * info are no longer valid (overwritten by the ath_frame_info data.
  469. */
  470. bf = ath_tx_setup_buffer(dev, txctl->txq, iob);
  471. if (!bf)
  472. return -ENOMEM;
  473. q = 0;
  474. if (txq == sc->tx.txq_map[q] &&
  475. ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
  476. txq->stopped = 1;
  477. }
  478. ath_tx_start_dma(sc, bf, txctl);
  479. return 0;
  480. }
  481. /*****************/
  482. /* TX Completion */
  483. /*****************/
  484. static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob,
  485. int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
  486. {
  487. struct net80211_device *dev = sc->dev;
  488. int q, padpos __unused, padsize __unused;
  489. DBGIO("ath9k: TX complete: iob: %p\n", iob);
  490. q = 0;
  491. if (txq == sc->tx.txq_map[q]) {
  492. if (--txq->pending_frames < 0)
  493. txq->pending_frames = 0;
  494. if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
  495. txq->stopped = 0;
  496. }
  497. }
  498. net80211_tx_complete(dev, iob, ts->ts_longretry,
  499. (ts->ts_status & ATH9K_TXERR_MASK) ? EIO : 0);
  500. }
  501. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  502. struct ath_txq *txq, struct list_head *bf_q,
  503. struct ath_tx_status *ts, int txok, int sendbar)
  504. {
  505. struct io_buffer *iob = bf->bf_mpdu;
  506. int tx_flags = 0;
  507. if (sendbar)
  508. tx_flags = ATH_TX_BAR;
  509. if (!txok) {
  510. tx_flags |= ATH_TX_ERROR;
  511. if (bf_isxretried(bf))
  512. tx_flags |= ATH_TX_XRETRY;
  513. }
  514. bf->bf_buf_addr = 0;
  515. ath_tx_complete(sc, iob, tx_flags,
  516. ts, txq);
  517. /* At this point, iob (bf->bf_mpdu) is consumed...make sure we don't
  518. * accidentally reference it later.
  519. */
  520. bf->bf_mpdu = NULL;
  521. /*
  522. * Return the list of ath_buf of this mpdu to free queue
  523. */
  524. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  525. }
  526. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  527. {
  528. struct ath_hw *ah = sc->sc_ah;
  529. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  530. struct list_head bf_head;
  531. struct ath_desc *ds;
  532. struct ath_tx_status ts;
  533. int txok;
  534. int status;
  535. DBGIO("ath9k: tx queue %d (%x), link %p\n",
  536. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  537. txq->axq_link);
  538. for (;;) {
  539. if (list_empty(&txq->axq_q)) {
  540. txq->axq_link = NULL;
  541. if (sc->sc_flags & SC_OP_TXAGGR)
  542. ath_txq_schedule(sc, txq);
  543. break;
  544. }
  545. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  546. /*
  547. * There is a race condition that a BH gets scheduled
  548. * after sw writes TxE and before hw re-load the last
  549. * descriptor to get the newly chained one.
  550. * Software must keep the last DONE descriptor as a
  551. * holding descriptor - software does so by marking
  552. * it with the STALE flag.
  553. */
  554. bf_held = NULL;
  555. if (bf->bf_stale) {
  556. bf_held = bf;
  557. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  558. break;
  559. } else {
  560. bf = list_entry(bf_held->list.next,
  561. struct ath_buf, list);
  562. }
  563. }
  564. lastbf = bf->bf_lastbf;
  565. ds = lastbf->bf_desc;
  566. memset(&ts, 0, sizeof(ts));
  567. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  568. if (status == -EINPROGRESS) {
  569. break;
  570. }
  571. /*
  572. * Remove ath_buf's of the same transmit unit from txq,
  573. * however leave the last descriptor back as the holding
  574. * descriptor for hw.
  575. */
  576. lastbf->bf_stale = 1;
  577. INIT_LIST_HEAD(&bf_head);
  578. if (!list_is_singular(&lastbf->list))
  579. list_cut_position(&bf_head,
  580. &txq->axq_q, lastbf->list.prev);
  581. txq->axq_depth--;
  582. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  583. txq->axq_tx_inprogress = 0;
  584. if (bf_held)
  585. list_del(&bf_held->list);
  586. if (bf_held)
  587. ath_tx_return_buffer(sc, bf_held);
  588. /*
  589. * This frame is sent out as a single frame.
  590. * Use hardware retry status for this frame.
  591. */
  592. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  593. bf->bf_state.bf_type |= BUF_XRETRY;
  594. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  595. if (sc->sc_flags & SC_OP_TXAGGR)
  596. ath_txq_schedule(sc, txq);
  597. }
  598. }
  599. static void ath_tx_complete_poll_work(struct ath_softc *sc)
  600. {
  601. struct ath_txq *txq;
  602. int i;
  603. int needreset = 0;
  604. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  605. if (ATH_TXQ_SETUP(sc, i)) {
  606. txq = &sc->tx.txq[i];
  607. if (txq->axq_depth) {
  608. if (txq->axq_tx_inprogress) {
  609. needreset = 1;
  610. break;
  611. } else {
  612. txq->axq_tx_inprogress = 1;
  613. }
  614. }
  615. }
  616. if (needreset) {
  617. DBG("ath9k: "
  618. "tx hung, resetting the chip\n");
  619. ath_reset(sc, 1);
  620. }
  621. sc->tx_complete_work_timer = ( currticks() * 1000 ) / TICKS_PER_SEC + ATH_TX_COMPLETE_POLL_INT;
  622. }
  623. void ath_tx_tasklet(struct ath_softc *sc)
  624. {
  625. int i;
  626. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  627. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  628. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  629. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  630. ath_tx_processq(sc, &sc->tx.txq[i]);
  631. }
  632. }
  633. /*****************/
  634. /* Init, Cleanup */
  635. /*****************/
  636. int ath_tx_init(struct ath_softc *sc, int nbufs)
  637. {
  638. int error = 0;
  639. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  640. "tx", nbufs, 1, 1);
  641. if (error != 0) {
  642. DBG("ath9k: "
  643. "Failed to allocate tx descriptors: %d\n", error);
  644. goto err;
  645. }
  646. sc->tx_complete_work = ath_tx_complete_poll_work;
  647. err:
  648. if (error != 0)
  649. ath_tx_cleanup(sc);
  650. return error;
  651. }
  652. void ath_tx_cleanup(struct ath_softc *sc)
  653. {
  654. if (sc->tx.txdma.dd_desc_len != 0)
  655. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  656. }