ath9k: Add Rx EDMA support
[linux-flexiantxendom0-natty.git] / drivers / net / wireless / ath / ath9k / recv.c
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "ath9k.h"
18
19 #define SKB_CB_ATHBUF(__skb)    (*((struct ath_buf **)__skb->cb))
20
21 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
22                                              struct ieee80211_hdr *hdr)
23 {
24         struct ieee80211_hw *hw = sc->pri_wiphy->hw;
25         int i;
26
27         spin_lock_bh(&sc->wiphy_lock);
28         for (i = 0; i < sc->num_sec_wiphy; i++) {
29                 struct ath_wiphy *aphy = sc->sec_wiphy[i];
30                 if (aphy == NULL)
31                         continue;
32                 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
33                     == 0) {
34                         hw = aphy->hw;
35                         break;
36                 }
37         }
38         spin_unlock_bh(&sc->wiphy_lock);
39         return hw;
40 }
41
42 /*
43  * Setup and link descriptors.
44  *
45  * 11N: we can no longer afford to self link the last descriptor.
46  * MAC acknowledges BA status as long as it copies frames to host
47  * buffer (or rx fifo). This can incorrectly acknowledge packets
48  * to a sender if last desc is self-linked.
49  */
50 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
51 {
52         struct ath_hw *ah = sc->sc_ah;
53         struct ath_common *common = ath9k_hw_common(ah);
54         struct ath_desc *ds;
55         struct sk_buff *skb;
56
57         ATH_RXBUF_RESET(bf);
58
59         ds = bf->bf_desc;
60         ds->ds_link = 0; /* link to null */
61         ds->ds_data = bf->bf_buf_addr;
62
63         /* virtual addr of the beginning of the buffer. */
64         skb = bf->bf_mpdu;
65         BUG_ON(skb == NULL);
66         ds->ds_vdata = skb->data;
67
68         /*
69          * setup rx descriptors. The rx_bufsize here tells the hardware
70          * how much data it can DMA to us and that we are prepared
71          * to process
72          */
73         ath9k_hw_setuprxdesc(ah, ds,
74                              common->rx_bufsize,
75                              0);
76
77         if (sc->rx.rxlink == NULL)
78                 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
79         else
80                 *sc->rx.rxlink = bf->bf_daddr;
81
82         sc->rx.rxlink = &ds->ds_link;
83         ath9k_hw_rxena(ah);
84 }
85
86 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
87 {
88         /* XXX block beacon interrupts */
89         ath9k_hw_setantenna(sc->sc_ah, antenna);
90         sc->rx.defant = antenna;
91         sc->rx.rxotherant = 0;
92 }
93
94 static void ath_opmode_init(struct ath_softc *sc)
95 {
96         struct ath_hw *ah = sc->sc_ah;
97         struct ath_common *common = ath9k_hw_common(ah);
98
99         u32 rfilt, mfilt[2];
100
101         /* configure rx filter */
102         rfilt = ath_calcrxfilter(sc);
103         ath9k_hw_setrxfilter(ah, rfilt);
104
105         /* configure bssid mask */
106         if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
107                 ath_hw_setbssidmask(common);
108
109         /* configure operational mode */
110         ath9k_hw_setopmode(ah);
111
112         /* Handle any link-level address change. */
113         ath9k_hw_setmac(ah, common->macaddr);
114
115         /* calculate and install multicast filter */
116         mfilt[0] = mfilt[1] = ~0;
117         ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
118 }
119
120 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
121                                  enum ath9k_rx_qtype qtype)
122 {
123         struct ath_hw *ah = sc->sc_ah;
124         struct ath_rx_edma *rx_edma;
125         struct sk_buff *skb;
126         struct ath_buf *bf;
127
128         rx_edma = &sc->rx.rx_edma[qtype];
129         if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
130                 return false;
131
132         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
133         list_del_init(&bf->list);
134
135         skb = bf->bf_mpdu;
136
137         ATH_RXBUF_RESET(bf);
138         memset(skb->data, 0, ah->caps.rx_status_len);
139         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
140                                 ah->caps.rx_status_len, DMA_TO_DEVICE);
141
142         SKB_CB_ATHBUF(skb) = bf;
143         ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
144         skb_queue_tail(&rx_edma->rx_fifo, skb);
145
146         return true;
147 }
148
149 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
150                                   enum ath9k_rx_qtype qtype, int size)
151 {
152         struct ath_rx_edma *rx_edma;
153         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
154         u32 nbuf = 0;
155
156         rx_edma = &sc->rx.rx_edma[qtype];
157         if (list_empty(&sc->rx.rxbuf)) {
158                 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
159                 return;
160         }
161
162         while (!list_empty(&sc->rx.rxbuf)) {
163                 nbuf++;
164
165                 if (!ath_rx_edma_buf_link(sc, qtype))
166                         break;
167
168                 if (nbuf >= size)
169                         break;
170         }
171 }
172
173 static void ath_rx_remove_buffer(struct ath_softc *sc,
174                                  enum ath9k_rx_qtype qtype)
175 {
176         struct ath_buf *bf;
177         struct ath_rx_edma *rx_edma;
178         struct sk_buff *skb;
179
180         rx_edma = &sc->rx.rx_edma[qtype];
181
182         while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
183                 bf = SKB_CB_ATHBUF(skb);
184                 BUG_ON(!bf);
185                 list_add_tail(&bf->list, &sc->rx.rxbuf);
186         }
187 }
188
189 static void ath_rx_edma_cleanup(struct ath_softc *sc)
190 {
191         struct ath_buf *bf;
192
193         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
194         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
195
196         list_for_each_entry(bf, &sc->rx.rxbuf, list) {
197                 if (bf->bf_mpdu)
198                         dev_kfree_skb_any(bf->bf_mpdu);
199         }
200
201         INIT_LIST_HEAD(&sc->rx.rxbuf);
202
203         kfree(sc->rx.rx_bufptr);
204         sc->rx.rx_bufptr = NULL;
205 }
206
207 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
208 {
209         skb_queue_head_init(&rx_edma->rx_fifo);
210         skb_queue_head_init(&rx_edma->rx_buffers);
211         rx_edma->rx_fifo_hwsize = size;
212 }
213
214 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
215 {
216         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
217         struct ath_hw *ah = sc->sc_ah;
218         struct sk_buff *skb;
219         struct ath_buf *bf;
220         int error = 0, i;
221         u32 size;
222
223
224         common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
225                                      ah->caps.rx_status_len,
226                                      min(common->cachelsz, (u16)64));
227
228         ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
229                                     ah->caps.rx_status_len);
230
231         ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
232                                ah->caps.rx_lp_qdepth);
233         ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
234                                ah->caps.rx_hp_qdepth);
235
236         size = sizeof(struct ath_buf) * nbufs;
237         bf = kzalloc(size, GFP_KERNEL);
238         if (!bf)
239                 return -ENOMEM;
240
241         INIT_LIST_HEAD(&sc->rx.rxbuf);
242         sc->rx.rx_bufptr = bf;
243
244         for (i = 0; i < nbufs; i++, bf++) {
245                 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
246                 if (!skb) {
247                         error = -ENOMEM;
248                         goto rx_init_fail;
249                 }
250
251                 memset(skb->data, 0, common->rx_bufsize);
252                 bf->bf_mpdu = skb;
253
254                 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
255                                                  common->rx_bufsize,
256                                                  DMA_BIDIRECTIONAL);
257                 if (unlikely(dma_mapping_error(sc->dev,
258                                                 bf->bf_buf_addr))) {
259                                 dev_kfree_skb_any(skb);
260                                 bf->bf_mpdu = NULL;
261                                 ath_print(common, ATH_DBG_FATAL,
262                                         "dma_mapping_error() on RX init\n");
263                                 error = -ENOMEM;
264                                 goto rx_init_fail;
265                 }
266
267                 list_add_tail(&bf->list, &sc->rx.rxbuf);
268         }
269
270         return 0;
271
272 rx_init_fail:
273         ath_rx_edma_cleanup(sc);
274         return error;
275 }
276
277 static void ath_edma_start_recv(struct ath_softc *sc)
278 {
279         spin_lock_bh(&sc->rx.rxbuflock);
280
281         ath9k_hw_rxena(sc->sc_ah);
282
283         ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
284                               sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
285
286         ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
287                               sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
288
289         spin_unlock_bh(&sc->rx.rxbuflock);
290
291         ath_opmode_init(sc);
292
293         ath9k_hw_startpcureceive(sc->sc_ah);
294 }
295
296 static void ath_edma_stop_recv(struct ath_softc *sc)
297 {
298         spin_lock_bh(&sc->rx.rxbuflock);
299         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
300         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
301         spin_unlock_bh(&sc->rx.rxbuflock);
302 }
303
304 int ath_rx_init(struct ath_softc *sc, int nbufs)
305 {
306         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
307         struct sk_buff *skb;
308         struct ath_buf *bf;
309         int error = 0;
310
311         spin_lock_init(&sc->rx.rxflushlock);
312         sc->sc_flags &= ~SC_OP_RXFLUSH;
313         spin_lock_init(&sc->rx.rxbuflock);
314
315         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
316                 return ath_rx_edma_init(sc, nbufs);
317         } else {
318                 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
319                                 min(common->cachelsz, (u16)64));
320
321                 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
322                                 common->cachelsz, common->rx_bufsize);
323
324                 /* Initialize rx descriptors */
325
326                 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
327                                 "rx", nbufs, 1);
328                 if (error != 0) {
329                         ath_print(common, ATH_DBG_FATAL,
330                                   "failed to allocate rx descriptors: %d\n",
331                                   error);
332                         goto err;
333                 }
334
335                 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
336                         skb = ath_rxbuf_alloc(common, common->rx_bufsize,
337                                               GFP_KERNEL);
338                         if (skb == NULL) {
339                                 error = -ENOMEM;
340                                 goto err;
341                         }
342
343                         bf->bf_mpdu = skb;
344                         bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
345                                         common->rx_bufsize,
346                                         DMA_FROM_DEVICE);
347                         if (unlikely(dma_mapping_error(sc->dev,
348                                                         bf->bf_buf_addr))) {
349                                 dev_kfree_skb_any(skb);
350                                 bf->bf_mpdu = NULL;
351                                 ath_print(common, ATH_DBG_FATAL,
352                                           "dma_mapping_error() on RX init\n");
353                                 error = -ENOMEM;
354                                 goto err;
355                         }
356                         bf->bf_dmacontext = bf->bf_buf_addr;
357                 }
358                 sc->rx.rxlink = NULL;
359         }
360
361 err:
362         if (error)
363                 ath_rx_cleanup(sc);
364
365         return error;
366 }
367
368 void ath_rx_cleanup(struct ath_softc *sc)
369 {
370         struct ath_hw *ah = sc->sc_ah;
371         struct ath_common *common = ath9k_hw_common(ah);
372         struct sk_buff *skb;
373         struct ath_buf *bf;
374
375         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
376                 ath_rx_edma_cleanup(sc);
377                 return;
378         } else {
379                 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
380                         skb = bf->bf_mpdu;
381                         if (skb) {
382                                 dma_unmap_single(sc->dev, bf->bf_buf_addr,
383                                                 common->rx_bufsize,
384                                                 DMA_FROM_DEVICE);
385                                 dev_kfree_skb(skb);
386                         }
387                 }
388
389                 if (sc->rx.rxdma.dd_desc_len != 0)
390                         ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
391         }
392 }
393
394 /*
395  * Calculate the receive filter according to the
396  * operating mode and state:
397  *
398  * o always accept unicast, broadcast, and multicast traffic
399  * o maintain current state of phy error reception (the hal
400  *   may enable phy error frames for noise immunity work)
401  * o probe request frames are accepted only when operating in
402  *   hostap, adhoc, or monitor modes
403  * o enable promiscuous mode according to the interface state
404  * o accept beacons:
405  *   - when operating in adhoc mode so the 802.11 layer creates
406  *     node table entries for peers,
407  *   - when operating in station mode for collecting rssi data when
408  *     the station is otherwise quiet, or
409  *   - when operating as a repeater so we see repeater-sta beacons
410  *   - when scanning
411  */
412
413 u32 ath_calcrxfilter(struct ath_softc *sc)
414 {
415 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
416
417         u32 rfilt;
418
419         rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
420                 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
421                 | ATH9K_RX_FILTER_MCAST;
422
423         /* If not a STA, enable processing of Probe Requests */
424         if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
425                 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
426
427         /*
428          * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
429          * mode interface or when in monitor mode. AP mode does not need this
430          * since it receives all in-BSS frames anyway.
431          */
432         if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
433              (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
434             (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
435                 rfilt |= ATH9K_RX_FILTER_PROM;
436
437         if (sc->rx.rxfilter & FIF_CONTROL)
438                 rfilt |= ATH9K_RX_FILTER_CONTROL;
439
440         if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
441             !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
442                 rfilt |= ATH9K_RX_FILTER_MYBEACON;
443         else
444                 rfilt |= ATH9K_RX_FILTER_BEACON;
445
446         if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
447             AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
448             (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
449             (sc->rx.rxfilter & FIF_PSPOLL))
450                 rfilt |= ATH9K_RX_FILTER_PSPOLL;
451
452         if (conf_is_ht(&sc->hw->conf))
453                 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
454
455         if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
456                 /* TODO: only needed if more than one BSSID is in use in
457                  * station/adhoc mode */
458                 /* The following may also be needed for other older chips */
459                 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
460                         rfilt |= ATH9K_RX_FILTER_PROM;
461                 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
462         }
463
464         return rfilt;
465
466 #undef RX_FILTER_PRESERVE
467 }
468
469 int ath_startrecv(struct ath_softc *sc)
470 {
471         struct ath_hw *ah = sc->sc_ah;
472         struct ath_buf *bf, *tbf;
473
474         if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
475                 ath_edma_start_recv(sc);
476                 return 0;
477         }
478
479         spin_lock_bh(&sc->rx.rxbuflock);
480         if (list_empty(&sc->rx.rxbuf))
481                 goto start_recv;
482
483         sc->rx.rxlink = NULL;
484         list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
485                 ath_rx_buf_link(sc, bf);
486         }
487
488         /* We could have deleted elements so the list may be empty now */
489         if (list_empty(&sc->rx.rxbuf))
490                 goto start_recv;
491
492         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
493         ath9k_hw_putrxbuf(ah, bf->bf_daddr);
494         ath9k_hw_rxena(ah);
495
496 start_recv:
497         spin_unlock_bh(&sc->rx.rxbuflock);
498         ath_opmode_init(sc);
499         ath9k_hw_startpcureceive(ah);
500
501         return 0;
502 }
503
504 bool ath_stoprecv(struct ath_softc *sc)
505 {
506         struct ath_hw *ah = sc->sc_ah;
507         bool stopped;
508
509         ath9k_hw_stoppcurecv(ah);
510         ath9k_hw_setrxfilter(ah, 0);
511         stopped = ath9k_hw_stopdmarecv(ah);
512
513         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
514                 ath_edma_stop_recv(sc);
515         else
516                 sc->rx.rxlink = NULL;
517
518         return stopped;
519 }
520
521 void ath_flushrecv(struct ath_softc *sc)
522 {
523         spin_lock_bh(&sc->rx.rxflushlock);
524         sc->sc_flags |= SC_OP_RXFLUSH;
525         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
526                 ath_rx_tasklet(sc, 1, true);
527         ath_rx_tasklet(sc, 1, false);
528         sc->sc_flags &= ~SC_OP_RXFLUSH;
529         spin_unlock_bh(&sc->rx.rxflushlock);
530 }
531
532 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
533 {
534         /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
535         struct ieee80211_mgmt *mgmt;
536         u8 *pos, *end, id, elen;
537         struct ieee80211_tim_ie *tim;
538
539         mgmt = (struct ieee80211_mgmt *)skb->data;
540         pos = mgmt->u.beacon.variable;
541         end = skb->data + skb->len;
542
543         while (pos + 2 < end) {
544                 id = *pos++;
545                 elen = *pos++;
546                 if (pos + elen > end)
547                         break;
548
549                 if (id == WLAN_EID_TIM) {
550                         if (elen < sizeof(*tim))
551                                 break;
552                         tim = (struct ieee80211_tim_ie *) pos;
553                         if (tim->dtim_count != 0)
554                                 break;
555                         return tim->bitmap_ctrl & 0x01;
556                 }
557
558                 pos += elen;
559         }
560
561         return false;
562 }
563
564 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
565 {
566         struct ieee80211_mgmt *mgmt;
567         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
568
569         if (skb->len < 24 + 8 + 2 + 2)
570                 return;
571
572         mgmt = (struct ieee80211_mgmt *)skb->data;
573         if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
574                 return; /* not from our current AP */
575
576         sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
577
578         if (sc->ps_flags & PS_BEACON_SYNC) {
579                 sc->ps_flags &= ~PS_BEACON_SYNC;
580                 ath_print(common, ATH_DBG_PS,
581                           "Reconfigure Beacon timers based on "
582                           "timestamp from the AP\n");
583                 ath_beacon_config(sc, NULL);
584         }
585
586         if (ath_beacon_dtim_pending_cab(skb)) {
587                 /*
588                  * Remain awake waiting for buffered broadcast/multicast
589                  * frames. If the last broadcast/multicast frame is not
590                  * received properly, the next beacon frame will work as
591                  * a backup trigger for returning into NETWORK SLEEP state,
592                  * so we are waiting for it as well.
593                  */
594                 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
595                           "buffered broadcast/multicast frame(s)\n");
596                 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
597                 return;
598         }
599
600         if (sc->ps_flags & PS_WAIT_FOR_CAB) {
601                 /*
602                  * This can happen if a broadcast frame is dropped or the AP
603                  * fails to send a frame indicating that all CAB frames have
604                  * been delivered.
605                  */
606                 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
607                 ath_print(common, ATH_DBG_PS,
608                           "PS wait for CAB frames timed out\n");
609         }
610 }
611
612 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
613 {
614         struct ieee80211_hdr *hdr;
615         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
616
617         hdr = (struct ieee80211_hdr *)skb->data;
618
619         /* Process Beacon and CAB receive in PS state */
620         if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
621             ieee80211_is_beacon(hdr->frame_control))
622                 ath_rx_ps_beacon(sc, skb);
623         else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
624                  (ieee80211_is_data(hdr->frame_control) ||
625                   ieee80211_is_action(hdr->frame_control)) &&
626                  is_multicast_ether_addr(hdr->addr1) &&
627                  !ieee80211_has_moredata(hdr->frame_control)) {
628                 /*
629                  * No more broadcast/multicast frames to be received at this
630                  * point.
631                  */
632                 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
633                 ath_print(common, ATH_DBG_PS,
634                           "All PS CAB frames received, back to sleep\n");
635         } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
636                    !is_multicast_ether_addr(hdr->addr1) &&
637                    !ieee80211_has_morefrags(hdr->frame_control)) {
638                 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
639                 ath_print(common, ATH_DBG_PS,
640                           "Going back to sleep after having received "
641                           "PS-Poll data (0x%lx)\n",
642                         sc->ps_flags & (PS_WAIT_FOR_BEACON |
643                                         PS_WAIT_FOR_CAB |
644                                         PS_WAIT_FOR_PSPOLL_DATA |
645                                         PS_WAIT_FOR_TX_ACK));
646         }
647 }
648
649 static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
650                                     struct ath_softc *sc, struct sk_buff *skb,
651                                     struct ieee80211_rx_status *rxs)
652 {
653         struct ieee80211_hdr *hdr;
654
655         hdr = (struct ieee80211_hdr *)skb->data;
656
657         /* Send the frame to mac80211 */
658         if (is_multicast_ether_addr(hdr->addr1)) {
659                 int i;
660                 /*
661                  * Deliver broadcast/multicast frames to all suitable
662                  * virtual wiphys.
663                  */
664                 /* TODO: filter based on channel configuration */
665                 for (i = 0; i < sc->num_sec_wiphy; i++) {
666                         struct ath_wiphy *aphy = sc->sec_wiphy[i];
667                         struct sk_buff *nskb;
668                         if (aphy == NULL)
669                                 continue;
670                         nskb = skb_copy(skb, GFP_ATOMIC);
671                         if (!nskb)
672                                 continue;
673                         ieee80211_rx(aphy->hw, nskb);
674                 }
675                 ieee80211_rx(sc->hw, skb);
676         } else
677                 /* Deliver unicast frames based on receiver address */
678                 ieee80211_rx(hw, skb);
679 }
680
681 static bool ath_edma_get_buffers(struct ath_softc *sc,
682                                  enum ath9k_rx_qtype qtype)
683 {
684         struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
685         struct ath_hw *ah = sc->sc_ah;
686         struct ath_common *common = ath9k_hw_common(ah);
687         struct sk_buff *skb;
688         struct ath_buf *bf;
689         int ret;
690
691         skb = skb_peek(&rx_edma->rx_fifo);
692         if (!skb)
693                 return false;
694
695         bf = SKB_CB_ATHBUF(skb);
696         BUG_ON(!bf);
697
698         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
699                                 common->rx_bufsize, DMA_FROM_DEVICE);
700
701         ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
702         if (ret == -EINPROGRESS)
703                 return false;
704
705         __skb_unlink(skb, &rx_edma->rx_fifo);
706         if (ret == -EINVAL) {
707                 /* corrupt descriptor, skip this one and the following one */
708                 list_add_tail(&bf->list, &sc->rx.rxbuf);
709                 ath_rx_edma_buf_link(sc, qtype);
710                 skb = skb_peek(&rx_edma->rx_fifo);
711                 if (!skb)
712                         return true;
713
714                 bf = SKB_CB_ATHBUF(skb);
715                 BUG_ON(!bf);
716
717                 __skb_unlink(skb, &rx_edma->rx_fifo);
718                 list_add_tail(&bf->list, &sc->rx.rxbuf);
719                 ath_rx_edma_buf_link(sc, qtype);
720         }
721         skb_queue_tail(&rx_edma->rx_buffers, skb);
722
723         return true;
724 }
725
726 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
727                                                 struct ath_rx_status *rs,
728                                                 enum ath9k_rx_qtype qtype)
729 {
730         struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
731         struct sk_buff *skb;
732         struct ath_buf *bf;
733
734         while (ath_edma_get_buffers(sc, qtype));
735         skb = __skb_dequeue(&rx_edma->rx_buffers);
736         if (!skb)
737                 return NULL;
738
739         bf = SKB_CB_ATHBUF(skb);
740         ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
741         return bf;
742 }
743
744 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
745                                            struct ath_rx_status *rs)
746 {
747         struct ath_hw *ah = sc->sc_ah;
748         struct ath_common *common = ath9k_hw_common(ah);
749         struct ath_desc *ds;
750         struct ath_buf *bf;
751         int ret;
752
753         if (list_empty(&sc->rx.rxbuf)) {
754                 sc->rx.rxlink = NULL;
755                 return NULL;
756         }
757
758         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
759         ds = bf->bf_desc;
760
761         /*
762          * Must provide the virtual address of the current
763          * descriptor, the physical address, and the virtual
764          * address of the next descriptor in the h/w chain.
765          * This allows the HAL to look ahead to see if the
766          * hardware is done with a descriptor by checking the
767          * done bit in the following descriptor and the address
768          * of the current descriptor the DMA engine is working
769          * on.  All this is necessary because of our use of
770          * a self-linked list to avoid rx overruns.
771          */
772         ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
773         if (ret == -EINPROGRESS) {
774                 struct ath_rx_status trs;
775                 struct ath_buf *tbf;
776                 struct ath_desc *tds;
777
778                 memset(&trs, 0, sizeof(trs));
779                 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
780                         sc->rx.rxlink = NULL;
781                         return NULL;
782                 }
783
784                 tbf = list_entry(bf->list.next, struct ath_buf, list);
785
786                 /*
787                  * On some hardware the descriptor status words could
788                  * get corrupted, including the done bit. Because of
789                  * this, check if the next descriptor's done bit is
790                  * set or not.
791                  *
792                  * If the next descriptor's done bit is set, the current
793                  * descriptor has been corrupted. Force s/w to discard
794                  * this descriptor and continue...
795                  */
796
797                 tds = tbf->bf_desc;
798                 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
799                 if (ret == -EINPROGRESS)
800                         return NULL;
801         }
802
803         if (!bf->bf_mpdu)
804                 return bf;
805
806         /*
807          * Synchronize the DMA transfer with CPU before
808          * 1. accessing the frame
809          * 2. requeueing the same buffer to h/w
810          */
811         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
812                         common->rx_bufsize,
813                         DMA_FROM_DEVICE);
814
815         return bf;
816 }
817
818
819 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
820 {
821         struct ath_buf *bf;
822         struct sk_buff *skb = NULL, *requeue_skb;
823         struct ieee80211_rx_status *rxs;
824         struct ath_hw *ah = sc->sc_ah;
825         struct ath_common *common = ath9k_hw_common(ah);
826         /*
827          * The hw can techncically differ from common->hw when using ath9k
828          * virtual wiphy so to account for that we iterate over the active
829          * wiphys and find the appropriate wiphy and therefore hw.
830          */
831         struct ieee80211_hw *hw = NULL;
832         struct ieee80211_hdr *hdr;
833         int retval;
834         bool decrypt_error = false;
835         struct ath_rx_status rs;
836         enum ath9k_rx_qtype qtype;
837         bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
838         int dma_type;
839
840         if (edma)
841                 dma_type = DMA_FROM_DEVICE;
842         else
843                 dma_type = DMA_BIDIRECTIONAL;
844
845         qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
846         spin_lock_bh(&sc->rx.rxbuflock);
847
848         do {
849                 /* If handling rx interrupt and flush is in progress => exit */
850                 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
851                         break;
852
853                 memset(&rs, 0, sizeof(rs));
854                 if (edma)
855                         bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
856                 else
857                         bf = ath_get_next_rx_buf(sc, &rs);
858
859                 if (!bf)
860                         break;
861
862                 skb = bf->bf_mpdu;
863                 if (!skb)
864                         continue;
865
866                 hdr = (struct ieee80211_hdr *) skb->data;
867                 rxs =  IEEE80211_SKB_RXCB(skb);
868
869                 hw = ath_get_virt_hw(sc, hdr);
870
871                 ath_debug_stat_rx(sc, &rs);
872
873                 /*
874                  * If we're asked to flush receive queue, directly
875                  * chain it back at the queue without processing it.
876                  */
877                 if (flush)
878                         goto requeue;
879
880                 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
881                                                      rxs, &decrypt_error);
882                 if (retval)
883                         goto requeue;
884
885                 /* Ensure we always have an skb to requeue once we are done
886                  * processing the current buffer's skb */
887                 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
888
889                 /* If there is no memory we ignore the current RX'd frame,
890                  * tell hardware it can give us a new frame using the old
891                  * skb and put it at the tail of the sc->rx.rxbuf list for
892                  * processing. */
893                 if (!requeue_skb)
894                         goto requeue;
895
896                 /* Unmap the frame */
897                 dma_unmap_single(sc->dev, bf->bf_buf_addr,
898                                  common->rx_bufsize,
899                                  dma_type);
900
901                 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
902                 if (ah->caps.rx_status_len)
903                         skb_pull(skb, ah->caps.rx_status_len);
904
905                 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
906                                              rxs, decrypt_error);
907
908                 /* We will now give hardware our shiny new allocated skb */
909                 bf->bf_mpdu = requeue_skb;
910                 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
911                                                  common->rx_bufsize,
912                                                  dma_type);
913                 if (unlikely(dma_mapping_error(sc->dev,
914                           bf->bf_buf_addr))) {
915                         dev_kfree_skb_any(requeue_skb);
916                         bf->bf_mpdu = NULL;
917                         ath_print(common, ATH_DBG_FATAL,
918                                   "dma_mapping_error() on RX\n");
919                         ath_rx_send_to_mac80211(hw, sc, skb, rxs);
920                         break;
921                 }
922                 bf->bf_dmacontext = bf->bf_buf_addr;
923
924                 /*
925                  * change the default rx antenna if rx diversity chooses the
926                  * other antenna 3 times in a row.
927                  */
928                 if (sc->rx.defant != rs.rs_antenna) {
929                         if (++sc->rx.rxotherant >= 3)
930                                 ath_setdefantenna(sc, rs.rs_antenna);
931                 } else {
932                         sc->rx.rxotherant = 0;
933                 }
934
935                 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
936                                              PS_WAIT_FOR_CAB |
937                                              PS_WAIT_FOR_PSPOLL_DATA)))
938                         ath_rx_ps(sc, skb);
939
940                 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
941
942 requeue:
943                 if (edma) {
944                         list_add_tail(&bf->list, &sc->rx.rxbuf);
945                         ath_rx_edma_buf_link(sc, qtype);
946                 } else {
947                         list_move_tail(&bf->list, &sc->rx.rxbuf);
948                         ath_rx_buf_link(sc, bf);
949                 }
950         } while (1);
951
952         spin_unlock_bh(&sc->rx.rxbuflock);
953
954         return 0;
955 }