2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 <http://rt2x00.serialmonkey.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the
18 Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 Abstract: rt2x00 queue specific routines.
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/dma-mapping.h>
32 #include "rt2x00lib.h"
34 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
35 struct queue_entry *entry)
38 struct skb_frame_desc *skbdesc;
39 unsigned int frame_size;
40 unsigned int head_size = 0;
41 unsigned int tail_size = 0;
44 * The frame size includes descriptor size, because the
45 * hardware directly receive the frame into the skbuffer.
47 frame_size = entry->queue->data_size + entry->queue->desc_size;
50 * The payload should be aligned to a 4-byte boundary,
51 * this means we need at least 3 bytes for moving the frame
52 * into the correct offset.
57 * For IV/EIV/ICV assembly we must make sure there is
58 * at least 8 bytes bytes available in headroom for IV/EIV
59 * and 8 bytes for ICV data as tailroon.
61 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
69 skb = dev_alloc_skb(frame_size + head_size + tail_size);
74 * Make sure we not have a frame with the requested bytes
75 * available in the head and tail.
77 skb_reserve(skb, head_size);
78 skb_put(skb, frame_size);
83 skbdesc = get_skb_frame_desc(skb);
84 memset(skbdesc, 0, sizeof(*skbdesc));
85 skbdesc->entry = entry;
87 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
88 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
92 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
98 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
100 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
103 * If device has requested headroom, we should make sure that
104 * is also mapped to the DMA so it can be used for transfering
105 * additional descriptor information to the hardware.
107 skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
110 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
113 * Restore data pointer to original location again.
115 skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
119 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
121 void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
123 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
125 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
126 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
128 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
131 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
133 * Add headroom to the skb length, it has been removed
134 * by the driver, but it was actually mapped to DMA.
136 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
137 skb->len + rt2x00dev->hw->extra_tx_headroom,
139 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
143 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
148 rt2x00queue_unmap_skb(rt2x00dev, skb);
149 dev_kfree_skb_any(skb);
152 void rt2x00queue_align_frame(struct sk_buff *skb)
154 unsigned int frame_length = skb->len;
155 unsigned int align = ALIGN_SIZE(skb, 0);
160 skb_push(skb, align);
161 memmove(skb->data, skb->data + align, frame_length);
162 skb_trim(skb, frame_length);
165 void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
167 unsigned int frame_length = skb->len;
168 unsigned int align = ALIGN_SIZE(skb, header_length);
173 skb_push(skb, align);
174 memmove(skb->data, skb->data + align, frame_length);
175 skb_trim(skb, frame_length);
178 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
180 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
181 unsigned int frame_length = skb->len;
182 unsigned int header_align = ALIGN_SIZE(skb, 0);
183 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
184 unsigned int l2pad = L2PAD_SIZE(header_length);
186 if (header_align == payload_align) {
188 * Both header and payload must be moved the same
189 * amount of bytes to align them properly. This means
190 * we don't use the L2 padding but just move the entire
193 rt2x00queue_align_frame(skb);
194 } else if (!payload_align) {
196 * Simple L2 padding, only the header needs to be moved,
197 * the payload is already properly aligned.
199 skb_push(skb, header_align);
200 memmove(skb->data, skb->data + header_align, header_length);
201 skbdesc->flags |= SKBDESC_L2_PADDED;
205 * Complicated L2 padding, both header and payload need
206 * to be moved. By default we only move to the start
207 * of the buffer, so our header alignment needs to be
208 * increased if there is not enough room for the header
211 if (payload_align > header_align)
214 skb_push(skb, header_align);
215 memmove(skb->data, skb->data + header_align, header_length);
216 memmove(skb->data + header_length + l2pad,
217 skb->data + header_length + l2pad + payload_align,
218 frame_length - header_length);
219 skb_trim(skb, frame_length + l2pad);
220 skbdesc->flags |= SKBDESC_L2_PADDED;
224 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
226 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
227 unsigned int l2pad = L2PAD_SIZE(header_length);
229 if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
232 memmove(skb->data + l2pad, skb->data, header_length);
233 skb_pull(skb, l2pad);
236 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
237 struct txentry_desc *txdesc)
239 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
240 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
241 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
242 unsigned long irqflags;
244 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
245 unlikely(!tx_info->control.vif))
249 * Hardware should insert sequence counter.
250 * FIXME: We insert a software sequence counter first for
251 * hardware that doesn't support hardware sequence counting.
253 * This is wrong because beacons are not getting sequence
254 * numbers assigned properly.
256 * A secondary problem exists for drivers that cannot toggle
257 * sequence counting per-frame, since those will override the
258 * sequence counter given by mac80211.
260 spin_lock_irqsave(&intf->seqlock, irqflags);
262 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
264 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
265 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
267 spin_unlock_irqrestore(&intf->seqlock, irqflags);
269 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
272 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
273 struct txentry_desc *txdesc,
274 const struct rt2x00_rate *hwrate)
276 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
277 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
278 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
279 unsigned int data_length;
280 unsigned int duration;
281 unsigned int residual;
283 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
284 data_length = entry->skb->len + 4;
285 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
289 * Length calculation depends on OFDM/CCK rate.
291 txdesc->signal = hwrate->plcp;
292 txdesc->service = 0x04;
294 if (hwrate->flags & DEV_RATE_OFDM) {
295 txdesc->length_high = (data_length >> 6) & 0x3f;
296 txdesc->length_low = data_length & 0x3f;
299 * Convert length to microseconds.
301 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
302 duration = GET_DURATION(data_length, hwrate->bitrate);
308 * Check if we need to set the Length Extension
310 if (hwrate->bitrate == 110 && residual <= 30)
311 txdesc->service |= 0x80;
314 txdesc->length_high = (duration >> 8) & 0xff;
315 txdesc->length_low = duration & 0xff;
318 * When preamble is enabled we should set the
319 * preamble bit for the signal.
321 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
322 txdesc->signal |= 0x08;
326 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
327 struct txentry_desc *txdesc)
329 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
330 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
331 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
332 struct ieee80211_rate *rate =
333 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
334 const struct rt2x00_rate *hwrate;
336 memset(txdesc, 0, sizeof(*txdesc));
339 * Initialize information from queue
341 txdesc->queue = entry->queue->qid;
342 txdesc->cw_min = entry->queue->cw_min;
343 txdesc->cw_max = entry->queue->cw_max;
344 txdesc->aifs = entry->queue->aifs;
347 * Header and alignment information.
349 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
350 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
351 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
354 * Check whether this frame is to be acked.
356 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
357 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
360 * Check if this is a RTS/CTS frame
362 if (ieee80211_is_rts(hdr->frame_control) ||
363 ieee80211_is_cts(hdr->frame_control)) {
364 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
365 if (ieee80211_is_rts(hdr->frame_control))
366 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
368 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
369 if (tx_info->control.rts_cts_rate_idx >= 0)
371 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
375 * Determine retry information.
377 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
378 if (txdesc->retry_limit >= rt2x00dev->long_retry)
379 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
382 * Check if more fragments are pending
384 if (ieee80211_has_morefrags(hdr->frame_control) ||
385 (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
386 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
387 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
391 * Beacons and probe responses require the tsf timestamp
392 * to be inserted into the frame, except for a frame that has been injected
393 * through a monitor interface. This latter is needed for testing a
396 if ((ieee80211_is_beacon(hdr->frame_control) ||
397 ieee80211_is_probe_resp(hdr->frame_control)) &&
398 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
399 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
402 * Determine with what IFS priority this frame should be send.
403 * Set ifs to IFS_SIFS when the this is not the first fragment,
404 * or this fragment came after RTS/CTS.
406 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
407 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
408 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
409 txdesc->ifs = IFS_BACKOFF;
411 txdesc->ifs = IFS_SIFS;
414 * Determine rate modulation.
416 hwrate = rt2x00_get_rate(rate->hw_value);
417 txdesc->rate_mode = RATE_MODE_CCK;
418 if (hwrate->flags & DEV_RATE_OFDM)
419 txdesc->rate_mode = RATE_MODE_OFDM;
422 * Apply TX descriptor handling by components
424 rt2x00crypto_create_tx_descriptor(entry, txdesc);
425 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
426 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
427 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
430 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
431 struct txentry_desc *txdesc)
433 struct data_queue *queue = entry->queue;
434 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
436 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
439 * All processing on the frame has been completed, this means
440 * it is now ready to be dumped to userspace through debugfs.
442 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
445 * Check if we need to kick the queue, there are however a few rules
446 * 1) Don't kick beacon queue
447 * 2) Don't kick unless this is the last in frame in a burst.
448 * When the burst flag is set, this frame is always followed
449 * by another frame which in some way are related to eachother.
450 * This is true for fragments, RTS or CTS-to-self frames.
451 * 3) Rule 2 can be broken when the available entries
452 * in the queue are less then a certain threshold.
454 if (entry->queue->qid == QID_BEACON)
457 if (rt2x00queue_threshold(queue) ||
458 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
459 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
462 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
465 struct ieee80211_tx_info *tx_info;
466 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
467 struct txentry_desc txdesc;
468 struct skb_frame_desc *skbdesc;
469 u8 rate_idx, rate_flags;
471 if (unlikely(rt2x00queue_full(queue)))
474 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
475 ERROR(queue->rt2x00dev,
476 "Arrived at non-free entry in the non-full queue %d.\n"
477 "Please file bug report to %s.\n",
478 queue->qid, DRV_PROJECT);
483 * Copy all TX descriptor information into txdesc,
484 * after that we are free to use the skb->cb array
485 * for our information.
488 rt2x00queue_create_tx_descriptor(entry, &txdesc);
491 * All information is retrieved from the skb->cb array,
492 * now we should claim ownership of the driver part of that
493 * array, preserving the bitrate index and flags.
495 tx_info = IEEE80211_SKB_CB(skb);
496 rate_idx = tx_info->control.rates[0].idx;
497 rate_flags = tx_info->control.rates[0].flags;
498 skbdesc = get_skb_frame_desc(skb);
499 memset(skbdesc, 0, sizeof(*skbdesc));
500 skbdesc->entry = entry;
501 skbdesc->tx_rate_idx = rate_idx;
502 skbdesc->tx_rate_flags = rate_flags;
505 skbdesc->flags |= SKBDESC_NOT_MAC80211;
508 * When hardware encryption is supported, and this frame
509 * is to be encrypted, we should strip the IV/EIV data from
510 * the frame so we can provide it to the driver seperately.
512 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
513 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
514 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
515 rt2x00crypto_tx_copy_iv(skb, &txdesc);
517 rt2x00crypto_tx_remove_iv(skb, &txdesc);
521 * When DMA allocation is required we should guarentee to the
522 * driver that the DMA is aligned to a 4-byte boundary.
523 * However some drivers require L2 padding to pad the payload
524 * rather then the header. This could be a requirement for
525 * PCI and USB devices, while header alignment only is valid
528 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
529 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
530 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
531 rt2x00queue_align_frame(entry->skb);
534 * It could be possible that the queue was corrupted and this
535 * call failed. Since we always return NETDEV_TX_OK to mac80211,
536 * this frame will simply be dropped.
538 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
539 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
544 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
545 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
547 set_bit(ENTRY_DATA_PENDING, &entry->flags);
549 rt2x00queue_index_inc(queue, Q_INDEX);
550 rt2x00queue_write_tx_descriptor(entry, &txdesc);
555 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
556 struct ieee80211_vif *vif,
557 const bool enable_beacon)
559 struct rt2x00_intf *intf = vif_to_intf(vif);
560 struct skb_frame_desc *skbdesc;
561 struct txentry_desc txdesc;
564 if (unlikely(!intf->beacon))
567 mutex_lock(&intf->beacon_skb_mutex);
570 * Clean up the beacon skb.
572 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
573 intf->beacon->skb = NULL;
575 if (!enable_beacon) {
576 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
577 mutex_unlock(&intf->beacon_skb_mutex);
581 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
582 if (!intf->beacon->skb) {
583 mutex_unlock(&intf->beacon_skb_mutex);
588 * Copy all TX descriptor information into txdesc,
589 * after that we are free to use the skb->cb array
590 * for our information.
592 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
595 * For the descriptor we use a local array from where the
596 * driver can move it to the correct location required for
599 memset(desc, 0, sizeof(desc));
602 * Fill in skb descriptor
604 skbdesc = get_skb_frame_desc(intf->beacon->skb);
605 memset(skbdesc, 0, sizeof(*skbdesc));
606 skbdesc->desc = desc;
607 skbdesc->desc_len = intf->beacon->queue->desc_size;
608 skbdesc->entry = intf->beacon;
611 * Write TX descriptor into reserved room in front of the beacon.
613 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
616 * Send beacon to hardware.
617 * Also enable beacon generation, which might have been disabled
618 * by the driver during the config_beacon() callback function.
620 rt2x00dev->ops->lib->write_beacon(intf->beacon);
621 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
623 mutex_unlock(&intf->beacon_skb_mutex);
628 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
629 const enum data_queue_qid queue)
631 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
634 return rt2x00dev->rx;
636 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
637 return &rt2x00dev->tx[queue];
642 if (queue == QID_BEACON)
643 return &rt2x00dev->bcn[0];
644 else if (queue == QID_ATIM && atim)
645 return &rt2x00dev->bcn[1];
649 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
651 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
652 enum queue_index index)
654 struct queue_entry *entry;
655 unsigned long irqflags;
657 if (unlikely(index >= Q_INDEX_MAX)) {
658 ERROR(queue->rt2x00dev,
659 "Entry requested from invalid index type (%d)\n", index);
663 spin_lock_irqsave(&queue->lock, irqflags);
665 entry = &queue->entries[queue->index[index]];
667 spin_unlock_irqrestore(&queue->lock, irqflags);
671 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
673 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
675 unsigned long irqflags;
677 if (unlikely(index >= Q_INDEX_MAX)) {
678 ERROR(queue->rt2x00dev,
679 "Index change on invalid index type (%d)\n", index);
683 spin_lock_irqsave(&queue->lock, irqflags);
685 queue->index[index]++;
686 if (queue->index[index] >= queue->limit)
687 queue->index[index] = 0;
689 if (index == Q_INDEX) {
691 } else if (index == Q_INDEX_DONE) {
696 spin_unlock_irqrestore(&queue->lock, irqflags);
699 static void rt2x00queue_reset(struct data_queue *queue)
701 unsigned long irqflags;
703 spin_lock_irqsave(&queue->lock, irqflags);
707 memset(queue->index, 0, sizeof(queue->index));
709 spin_unlock_irqrestore(&queue->lock, irqflags);
712 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
714 struct data_queue *queue;
716 txall_queue_for_each(rt2x00dev, queue)
717 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
720 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
722 struct data_queue *queue;
725 queue_for_each(rt2x00dev, queue) {
726 rt2x00queue_reset(queue);
728 for (i = 0; i < queue->limit; i++) {
729 queue->entries[i].flags = 0;
731 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
736 static int rt2x00queue_alloc_entries(struct data_queue *queue,
737 const struct data_queue_desc *qdesc)
739 struct queue_entry *entries;
740 unsigned int entry_size;
743 rt2x00queue_reset(queue);
745 queue->limit = qdesc->entry_num;
746 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
747 queue->data_size = qdesc->data_size;
748 queue->desc_size = qdesc->desc_size;
751 * Allocate all queue entries.
753 entry_size = sizeof(*entries) + qdesc->priv_size;
754 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
758 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
759 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
760 ((__index) * (__psize)) )
762 for (i = 0; i < queue->limit; i++) {
763 entries[i].flags = 0;
764 entries[i].queue = queue;
765 entries[i].skb = NULL;
766 entries[i].entry_idx = i;
767 entries[i].priv_data =
768 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
769 sizeof(*entries), qdesc->priv_size);
772 #undef QUEUE_ENTRY_PRIV_OFFSET
774 queue->entries = entries;
779 static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
780 struct data_queue *queue)
787 for (i = 0; i < queue->limit; i++) {
788 if (queue->entries[i].skb)
789 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
793 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
794 struct data_queue *queue)
799 for (i = 0; i < queue->limit; i++) {
800 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
803 queue->entries[i].skb = skb;
809 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
811 struct data_queue *queue;
814 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
818 tx_queue_for_each(rt2x00dev, queue) {
819 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
824 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
828 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
829 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
830 rt2x00dev->ops->atim);
835 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
842 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
844 rt2x00queue_uninitialize(rt2x00dev);
849 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
851 struct data_queue *queue;
853 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
855 queue_for_each(rt2x00dev, queue) {
856 kfree(queue->entries);
857 queue->entries = NULL;
861 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
862 struct data_queue *queue, enum data_queue_qid qid)
864 spin_lock_init(&queue->lock);
866 queue->rt2x00dev = rt2x00dev;
874 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
876 struct data_queue *queue;
877 enum data_queue_qid qid;
878 unsigned int req_atim =
879 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
882 * We need the following queues:
886 * Atim: 1 (if required)
888 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
890 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
892 ERROR(rt2x00dev, "Queue allocation failed.\n");
897 * Initialize pointers
899 rt2x00dev->rx = queue;
900 rt2x00dev->tx = &queue[1];
901 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
904 * Initialize queue parameters.
906 * TX: qid = QID_AC_BE + index
907 * TX: cw_min: 2^5 = 32.
908 * TX: cw_max: 2^10 = 1024.
909 * BCN: qid = QID_BEACON
910 * ATIM: qid = QID_ATIM
912 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
915 tx_queue_for_each(rt2x00dev, queue)
916 rt2x00queue_init(rt2x00dev, queue, qid++);
918 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
920 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
925 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
927 kfree(rt2x00dev->rx);
928 rt2x00dev->rx = NULL;
929 rt2x00dev->tx = NULL;
930 rt2x00dev->bcn = NULL;