2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size = entry->queue->data_size + entry->queue->desc_size;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb, head_size);
80 skb_put(skb, frame_size);
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
100 void rt2x00queue_map_txskb(struct queue_entry *entry)
102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
111 void rt2x00queue_unmap_skb(struct queue_entry *entry)
113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
128 void rt2x00queue_free_skb(struct queue_entry *entry)
133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(entry->skb);
138 void rt2x00queue_align_frame(struct sk_buff *skb)
140 unsigned int frame_length = skb->len;
141 unsigned int align = ALIGN_SIZE(skb, 0);
146 skb_push(skb, align);
147 memmove(skb->data, skb->data + align, frame_length);
148 skb_trim(skb, frame_length);
151 void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
153 unsigned int frame_length = skb->len;
154 unsigned int align = ALIGN_SIZE(skb, header_length);
159 skb_push(skb, align);
160 memmove(skb->data, skb->data + align, frame_length);
161 skb_trim(skb, frame_length);
164 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
166 unsigned int payload_length = skb->len - header_length;
167 unsigned int header_align = ALIGN_SIZE(skb, 0);
168 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
169 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
172 * Adjust the header alignment if the payload needs to be moved more
175 if (payload_align > header_align)
178 /* There is nothing to do if no alignment is needed */
182 /* Reserve the amount of space needed in front of the frame */
183 skb_push(skb, header_align);
188 memmove(skb->data, skb->data + header_align, header_length);
190 /* Move the payload, if present and if required */
191 if (payload_length && payload_align)
192 memmove(skb->data + header_length + l2pad,
193 skb->data + header_length + l2pad + payload_align,
196 /* Trim the skb to the correct size */
197 skb_trim(skb, header_length + l2pad + payload_length);
200 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
203 * L2 padding is only present if the skb contains more than just the
204 * IEEE 802.11 header.
206 unsigned int l2pad = (skb->len > header_length) ?
207 L2PAD_SIZE(header_length) : 0;
212 memmove(skb->data + l2pad, skb->data, header_length);
213 skb_pull(skb, l2pad);
216 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
217 struct txentry_desc *txdesc)
219 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
220 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
221 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
222 unsigned long irqflags;
224 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
225 unlikely(!tx_info->control.vif))
229 * Hardware should insert sequence counter.
230 * FIXME: We insert a software sequence counter first for
231 * hardware that doesn't support hardware sequence counting.
233 * This is wrong because beacons are not getting sequence
234 * numbers assigned properly.
236 * A secondary problem exists for drivers that cannot toggle
237 * sequence counting per-frame, since those will override the
238 * sequence counter given by mac80211.
240 spin_lock_irqsave(&intf->seqlock, irqflags);
242 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
244 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
245 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
247 spin_unlock_irqrestore(&intf->seqlock, irqflags);
249 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
252 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
253 struct txentry_desc *txdesc,
254 const struct rt2x00_rate *hwrate)
256 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
257 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
258 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
259 unsigned int data_length;
260 unsigned int duration;
261 unsigned int residual;
263 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
264 data_length = entry->skb->len + 4;
265 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
269 * Length calculation depends on OFDM/CCK rate.
271 txdesc->signal = hwrate->plcp;
272 txdesc->service = 0x04;
274 if (hwrate->flags & DEV_RATE_OFDM) {
275 txdesc->length_high = (data_length >> 6) & 0x3f;
276 txdesc->length_low = data_length & 0x3f;
279 * Convert length to microseconds.
281 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
282 duration = GET_DURATION(data_length, hwrate->bitrate);
288 * Check if we need to set the Length Extension
290 if (hwrate->bitrate == 110 && residual <= 30)
291 txdesc->service |= 0x80;
294 txdesc->length_high = (duration >> 8) & 0xff;
295 txdesc->length_low = duration & 0xff;
298 * When preamble is enabled we should set the
299 * preamble bit for the signal.
301 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
302 txdesc->signal |= 0x08;
306 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
307 struct txentry_desc *txdesc)
309 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
312 struct ieee80211_rate *rate =
313 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
314 const struct rt2x00_rate *hwrate;
316 memset(txdesc, 0, sizeof(*txdesc));
319 * Header and frame information.
321 txdesc->length = entry->skb->len;
322 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
325 * Check whether this frame is to be acked.
327 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
328 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
331 * Check if this is a RTS/CTS frame
333 if (ieee80211_is_rts(hdr->frame_control) ||
334 ieee80211_is_cts(hdr->frame_control)) {
335 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
336 if (ieee80211_is_rts(hdr->frame_control))
337 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
339 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
340 if (tx_info->control.rts_cts_rate_idx >= 0)
342 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
346 * Determine retry information.
348 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
349 if (txdesc->retry_limit >= rt2x00dev->long_retry)
350 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
353 * Check if more fragments are pending
355 if (ieee80211_has_morefrags(hdr->frame_control)) {
356 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
357 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
361 * Check if more frames (!= fragments) are pending
363 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
364 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
367 * Beacons and probe responses require the tsf timestamp
368 * to be inserted into the frame, except for a frame that has been injected
369 * through a monitor interface. This latter is needed for testing a
372 if ((ieee80211_is_beacon(hdr->frame_control) ||
373 ieee80211_is_probe_resp(hdr->frame_control)) &&
374 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
375 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
378 * Determine with what IFS priority this frame should be send.
379 * Set ifs to IFS_SIFS when the this is not the first fragment,
380 * or this fragment came after RTS/CTS.
382 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
383 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
384 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
385 txdesc->ifs = IFS_BACKOFF;
387 txdesc->ifs = IFS_SIFS;
390 * Determine rate modulation.
392 hwrate = rt2x00_get_rate(rate->hw_value);
393 txdesc->rate_mode = RATE_MODE_CCK;
394 if (hwrate->flags & DEV_RATE_OFDM)
395 txdesc->rate_mode = RATE_MODE_OFDM;
398 * Apply TX descriptor handling by components
400 rt2x00crypto_create_tx_descriptor(entry, txdesc);
401 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
402 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
403 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
406 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
407 struct txentry_desc *txdesc)
409 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
412 * This should not happen, we already checked the entry
413 * was ours. When the hardware disagrees there has been
414 * a queue corruption!
416 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
417 rt2x00dev->ops->lib->get_entry_state(entry))) {
419 "Corrupt queue %d, accessing entry which is not ours.\n"
420 "Please file bug report to %s.\n",
421 entry->queue->qid, DRV_PROJECT);
426 * Add the requested extra tx headroom in front of the skb.
428 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
429 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
432 * Call the driver's write_tx_data function, if it exists.
434 if (rt2x00dev->ops->lib->write_tx_data)
435 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
438 * Map the skb to DMA.
440 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
441 rt2x00queue_map_txskb(entry);
446 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
447 struct txentry_desc *txdesc)
449 struct data_queue *queue = entry->queue;
451 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
454 * All processing on the frame has been completed, this means
455 * it is now ready to be dumped to userspace through debugfs.
457 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
460 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
461 struct txentry_desc *txdesc)
464 * Check if we need to kick the queue, there are however a few rules
465 * 1) Don't kick unless this is the last in frame in a burst.
466 * When the burst flag is set, this frame is always followed
467 * by another frame which in some way are related to eachother.
468 * This is true for fragments, RTS or CTS-to-self frames.
469 * 2) Rule 1 can be broken when the available entries
470 * in the queue are less then a certain threshold.
472 if (rt2x00queue_threshold(queue) ||
473 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
474 queue->rt2x00dev->ops->lib->kick_queue(queue);
477 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
480 struct ieee80211_tx_info *tx_info;
481 struct queue_entry *entry;
482 struct txentry_desc txdesc;
483 struct skb_frame_desc *skbdesc;
484 u8 rate_idx, rate_flags;
487 spin_lock(&queue->tx_lock);
489 entry = rt2x00queue_get_entry(queue, Q_INDEX);
491 if (unlikely(rt2x00queue_full(queue))) {
492 ERROR(queue->rt2x00dev,
493 "Dropping frame due to full tx queue %d.\n", queue->qid);
498 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
500 ERROR(queue->rt2x00dev,
501 "Arrived at non-free entry in the non-full queue %d.\n"
502 "Please file bug report to %s.\n",
503 queue->qid, DRV_PROJECT);
509 * Copy all TX descriptor information into txdesc,
510 * after that we are free to use the skb->cb array
511 * for our information.
514 rt2x00queue_create_tx_descriptor(entry, &txdesc);
517 * All information is retrieved from the skb->cb array,
518 * now we should claim ownership of the driver part of that
519 * array, preserving the bitrate index and flags.
521 tx_info = IEEE80211_SKB_CB(skb);
522 rate_idx = tx_info->control.rates[0].idx;
523 rate_flags = tx_info->control.rates[0].flags;
524 skbdesc = get_skb_frame_desc(skb);
525 memset(skbdesc, 0, sizeof(*skbdesc));
526 skbdesc->entry = entry;
527 skbdesc->tx_rate_idx = rate_idx;
528 skbdesc->tx_rate_flags = rate_flags;
531 skbdesc->flags |= SKBDESC_NOT_MAC80211;
534 * When hardware encryption is supported, and this frame
535 * is to be encrypted, we should strip the IV/EIV data from
536 * the frame so we can provide it to the driver separately.
538 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
539 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
540 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
541 rt2x00crypto_tx_copy_iv(skb, &txdesc);
543 rt2x00crypto_tx_remove_iv(skb, &txdesc);
547 * When DMA allocation is required we should guarentee to the
548 * driver that the DMA is aligned to a 4-byte boundary.
549 * However some drivers require L2 padding to pad the payload
550 * rather then the header. This could be a requirement for
551 * PCI and USB devices, while header alignment only is valid
554 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
555 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
556 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
557 rt2x00queue_align_frame(entry->skb);
560 * It could be possible that the queue was corrupted and this
561 * call failed. Since we always return NETDEV_TX_OK to mac80211,
562 * this frame will simply be dropped.
564 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
565 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
571 set_bit(ENTRY_DATA_PENDING, &entry->flags);
573 rt2x00queue_index_inc(queue, Q_INDEX);
574 rt2x00queue_write_tx_descriptor(entry, &txdesc);
575 rt2x00queue_kick_tx_queue(queue, &txdesc);
578 spin_unlock(&queue->tx_lock);
582 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
583 struct ieee80211_vif *vif,
584 const bool enable_beacon)
586 struct rt2x00_intf *intf = vif_to_intf(vif);
587 struct skb_frame_desc *skbdesc;
588 struct txentry_desc txdesc;
590 if (unlikely(!intf->beacon))
593 mutex_lock(&intf->beacon_skb_mutex);
596 * Clean up the beacon skb.
598 rt2x00queue_free_skb(intf->beacon);
600 if (!enable_beacon) {
601 rt2x00queue_stop_queue(intf->beacon->queue);
602 mutex_unlock(&intf->beacon_skb_mutex);
606 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
607 if (!intf->beacon->skb) {
608 mutex_unlock(&intf->beacon_skb_mutex);
613 * Copy all TX descriptor information into txdesc,
614 * after that we are free to use the skb->cb array
615 * for our information.
617 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
620 * Fill in skb descriptor
622 skbdesc = get_skb_frame_desc(intf->beacon->skb);
623 memset(skbdesc, 0, sizeof(*skbdesc));
624 skbdesc->entry = intf->beacon;
627 * Send beacon to hardware and enable beacon genaration..
629 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
631 mutex_unlock(&intf->beacon_skb_mutex);
636 void rt2x00queue_for_each_entry(struct data_queue *queue,
637 enum queue_index start,
638 enum queue_index end,
639 void (*fn)(struct queue_entry *entry))
641 unsigned long irqflags;
642 unsigned int index_start;
643 unsigned int index_end;
646 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
647 ERROR(queue->rt2x00dev,
648 "Entry requested from invalid index range (%d - %d)\n",
654 * Only protect the range we are going to loop over,
655 * if during our loop a extra entry is set to pending
656 * it should not be kicked during this run, since it
657 * is part of another TX operation.
659 spin_lock_irqsave(&queue->index_lock, irqflags);
660 index_start = queue->index[start];
661 index_end = queue->index[end];
662 spin_unlock_irqrestore(&queue->index_lock, irqflags);
665 * Start from the TX done pointer, this guarentees that we will
666 * send out all frames in the correct order.
668 if (index_start < index_end) {
669 for (i = index_start; i < index_end; i++)
670 fn(&queue->entries[i]);
672 for (i = index_start; i < queue->limit; i++)
673 fn(&queue->entries[i]);
675 for (i = 0; i < index_end; i++)
676 fn(&queue->entries[i]);
679 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
681 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
682 const enum data_queue_qid queue)
684 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
687 return rt2x00dev->rx;
689 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
690 return &rt2x00dev->tx[queue];
695 if (queue == QID_BEACON)
696 return &rt2x00dev->bcn[0];
697 else if (queue == QID_ATIM && atim)
698 return &rt2x00dev->bcn[1];
702 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
704 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
705 enum queue_index index)
707 struct queue_entry *entry;
708 unsigned long irqflags;
710 if (unlikely(index >= Q_INDEX_MAX)) {
711 ERROR(queue->rt2x00dev,
712 "Entry requested from invalid index type (%d)\n", index);
716 spin_lock_irqsave(&queue->index_lock, irqflags);
718 entry = &queue->entries[queue->index[index]];
720 spin_unlock_irqrestore(&queue->index_lock, irqflags);
724 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
726 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
728 unsigned long irqflags;
730 if (unlikely(index >= Q_INDEX_MAX)) {
731 ERROR(queue->rt2x00dev,
732 "Index change on invalid index type (%d)\n", index);
736 spin_lock_irqsave(&queue->index_lock, irqflags);
738 queue->index[index]++;
739 if (queue->index[index] >= queue->limit)
740 queue->index[index] = 0;
742 queue->last_action[index] = jiffies;
744 if (index == Q_INDEX) {
746 } else if (index == Q_INDEX_DONE) {
751 spin_unlock_irqrestore(&queue->index_lock, irqflags);
754 void rt2x00queue_pause_queue(struct data_queue *queue)
756 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
757 !test_bit(QUEUE_STARTED, &queue->flags) ||
758 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
761 switch (queue->qid) {
767 * For TX queues, we have to disable the queue
770 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
776 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
778 void rt2x00queue_unpause_queue(struct data_queue *queue)
780 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
781 !test_bit(QUEUE_STARTED, &queue->flags) ||
782 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
785 switch (queue->qid) {
791 * For TX queues, we have to enable the queue
794 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
798 * For RX we need to kick the queue now in order to
801 queue->rt2x00dev->ops->lib->kick_queue(queue);
806 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
808 void rt2x00queue_start_queue(struct data_queue *queue)
810 mutex_lock(&queue->status_lock);
812 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
813 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
814 mutex_unlock(&queue->status_lock);
818 set_bit(QUEUE_PAUSED, &queue->flags);
820 queue->rt2x00dev->ops->lib->start_queue(queue);
822 rt2x00queue_unpause_queue(queue);
824 mutex_unlock(&queue->status_lock);
826 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
828 void rt2x00queue_stop_queue(struct data_queue *queue)
830 mutex_lock(&queue->status_lock);
832 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
833 mutex_unlock(&queue->status_lock);
837 rt2x00queue_pause_queue(queue);
839 queue->rt2x00dev->ops->lib->stop_queue(queue);
841 mutex_unlock(&queue->status_lock);
843 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
845 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
850 (queue->qid == QID_AC_VO) ||
851 (queue->qid == QID_AC_VI) ||
852 (queue->qid == QID_AC_BE) ||
853 (queue->qid == QID_AC_BK);
855 mutex_lock(&queue->status_lock);
858 * If the queue has been started, we must stop it temporarily
859 * to prevent any new frames to be queued on the device. If
860 * we are not dropping the pending frames, the queue must
861 * only be stopped in the software and not the hardware,
862 * otherwise the queue will never become empty on its own.
864 started = test_bit(QUEUE_STARTED, &queue->flags);
869 rt2x00queue_pause_queue(queue);
872 * If we are not supposed to drop any pending
873 * frames, this means we must force a start (=kick)
874 * to the queue to make sure the hardware will
875 * start transmitting.
877 if (!drop && tx_queue)
878 queue->rt2x00dev->ops->lib->kick_queue(queue);
882 * Check if driver supports flushing, we can only guarentee
883 * full support for flushing if the driver is able
884 * to cancel all pending frames (drop = true).
886 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
887 queue->rt2x00dev->ops->lib->flush_queue(queue);
890 * When we don't want to drop any frames, or when
891 * the driver doesn't fully flush the queue correcly,
892 * we must wait for the queue to become empty.
894 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
898 * The queue flush has failed...
900 if (unlikely(!rt2x00queue_empty(queue)))
901 WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
904 * Restore the queue to the previous status
907 rt2x00queue_unpause_queue(queue);
909 mutex_unlock(&queue->status_lock);
911 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
913 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
915 struct data_queue *queue;
918 * rt2x00queue_start_queue will call ieee80211_wake_queue
919 * for each queue after is has been properly initialized.
921 tx_queue_for_each(rt2x00dev, queue)
922 rt2x00queue_start_queue(queue);
924 rt2x00queue_start_queue(rt2x00dev->rx);
926 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
928 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
930 struct data_queue *queue;
933 * rt2x00queue_stop_queue will call ieee80211_stop_queue
934 * as well, but we are completely shutting doing everything
935 * now, so it is much safer to stop all TX queues at once,
936 * and use rt2x00queue_stop_queue for cleaning up.
938 ieee80211_stop_queues(rt2x00dev->hw);
940 tx_queue_for_each(rt2x00dev, queue)
941 rt2x00queue_stop_queue(queue);
943 rt2x00queue_stop_queue(rt2x00dev->rx);
945 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
947 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
949 struct data_queue *queue;
951 tx_queue_for_each(rt2x00dev, queue)
952 rt2x00queue_flush_queue(queue, drop);
954 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
956 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
958 static void rt2x00queue_reset(struct data_queue *queue)
960 unsigned long irqflags;
963 spin_lock_irqsave(&queue->index_lock, irqflags);
968 for (i = 0; i < Q_INDEX_MAX; i++) {
970 queue->last_action[i] = jiffies;
973 spin_unlock_irqrestore(&queue->index_lock, irqflags);
976 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
978 struct data_queue *queue;
981 queue_for_each(rt2x00dev, queue) {
982 rt2x00queue_reset(queue);
984 for (i = 0; i < queue->limit; i++)
985 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
989 static int rt2x00queue_alloc_entries(struct data_queue *queue,
990 const struct data_queue_desc *qdesc)
992 struct queue_entry *entries;
993 unsigned int entry_size;
996 rt2x00queue_reset(queue);
998 queue->limit = qdesc->entry_num;
999 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1000 queue->data_size = qdesc->data_size;
1001 queue->desc_size = qdesc->desc_size;
1004 * Allocate all queue entries.
1006 entry_size = sizeof(*entries) + qdesc->priv_size;
1007 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1011 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1012 (((char *)(__base)) + ((__limit) * (__esize)) + \
1013 ((__index) * (__psize)))
1015 for (i = 0; i < queue->limit; i++) {
1016 entries[i].flags = 0;
1017 entries[i].queue = queue;
1018 entries[i].skb = NULL;
1019 entries[i].entry_idx = i;
1020 entries[i].priv_data =
1021 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1022 sizeof(*entries), qdesc->priv_size);
1025 #undef QUEUE_ENTRY_PRIV_OFFSET
1027 queue->entries = entries;
1032 static void rt2x00queue_free_skbs(struct data_queue *queue)
1036 if (!queue->entries)
1039 for (i = 0; i < queue->limit; i++) {
1040 rt2x00queue_free_skb(&queue->entries[i]);
1044 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1047 struct sk_buff *skb;
1049 for (i = 0; i < queue->limit; i++) {
1050 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1053 queue->entries[i].skb = skb;
1059 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1061 struct data_queue *queue;
1064 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1068 tx_queue_for_each(rt2x00dev, queue) {
1069 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1074 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1078 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
1079 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
1080 rt2x00dev->ops->atim);
1085 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1092 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1094 rt2x00queue_uninitialize(rt2x00dev);
1099 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1101 struct data_queue *queue;
1103 rt2x00queue_free_skbs(rt2x00dev->rx);
1105 queue_for_each(rt2x00dev, queue) {
1106 kfree(queue->entries);
1107 queue->entries = NULL;
1111 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1112 struct data_queue *queue, enum data_queue_qid qid)
1114 mutex_init(&queue->status_lock);
1115 spin_lock_init(&queue->tx_lock);
1116 spin_lock_init(&queue->index_lock);
1118 queue->rt2x00dev = rt2x00dev;
1126 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1128 struct data_queue *queue;
1129 enum data_queue_qid qid;
1130 unsigned int req_atim =
1131 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1134 * We need the following queues:
1136 * TX: ops->tx_queues
1138 * Atim: 1 (if required)
1140 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1142 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1144 ERROR(rt2x00dev, "Queue allocation failed.\n");
1149 * Initialize pointers
1151 rt2x00dev->rx = queue;
1152 rt2x00dev->tx = &queue[1];
1153 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1156 * Initialize queue parameters.
1158 * TX: qid = QID_AC_VO + index
1159 * TX: cw_min: 2^5 = 32.
1160 * TX: cw_max: 2^10 = 1024.
1161 * BCN: qid = QID_BEACON
1162 * ATIM: qid = QID_ATIM
1164 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1167 tx_queue_for_each(rt2x00dev, queue)
1168 rt2x00queue_init(rt2x00dev, queue, qid++);
1170 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
1172 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
1177 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1179 kfree(rt2x00dev->rx);
1180 rt2x00dev->rx = NULL;
1181 rt2x00dev->tx = NULL;
1182 rt2x00dev->bcn = NULL;