#include "rt2x00.h"
#include "rt2x00lib.h"
-struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
- struct queue_entry *entry)
+struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct sk_buff *skb;
struct skb_frame_desc *skbdesc;
unsigned int frame_size;
return skb;
}
-void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
+void rt2x00queue_map_txskb(struct queue_entry *entry)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ struct device *dev = entry->queue->rt2x00dev->dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->skb_dma =
- dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
-void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
+void rt2x00queue_unmap_skb(struct queue_entry *entry)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ struct device *dev = entry->queue->rt2x00dev->dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
- dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
DMA_FROM_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
- }
-
- if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
- dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
+ dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
}
EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
-void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
+void rt2x00queue_free_skb(struct queue_entry *entry)
{
- if (!skb)
+ if (!entry->skb)
return;
- rt2x00queue_unmap_skb(rt2x00dev, skb);
- dev_kfree_skb_any(skb);
+ rt2x00queue_unmap_skb(entry);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
}
void rt2x00queue_align_frame(struct sk_buff *skb)
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- unsigned int l2pad = L2PAD_SIZE(header_length);
+ /*
+ * L2 padding is only present if the skb contains more than just the
+ * IEEE 802.11 header.
+ */
+ unsigned int l2pad = (skb->len > header_length) ?
+ L2PAD_SIZE(header_length) : 0;
if (!l2pad)
return;
memset(txdesc, 0, sizeof(*txdesc));
/*
- * Initialize information from queue
- */
- txdesc->queue = entry->queue->qid;
- txdesc->cw_min = entry->queue->cw_min;
- txdesc->cw_max = entry->queue->cw_max;
- txdesc->aifs = entry->queue->aifs;
-
- /*
* Header and frame information.
*/
txdesc->length = entry->skb->len;
* Map the skb to DMA.
*/
if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
- rt2x00queue_map_txskb(rt2x00dev, entry->skb);
+ rt2x00queue_map_txskb(entry);
return 0;
}
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
}
-static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
struct txentry_desc *txdesc)
{
- struct data_queue *queue = entry->queue;
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-
/*
* Check if we need to kick the queue, there are however a few rules
* 1) Don't kick unless this is the last in frame in a burst.
*/
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
- rt2x00dev->ops->lib->kick_tx_queue(queue);
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local)
{
struct ieee80211_tx_info *tx_info;
- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry *entry;
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
u8 rate_idx, rate_flags;
+ int ret = 0;
- if (unlikely(rt2x00queue_full(queue)))
- return -ENOBUFS;
+ spin_lock(&queue->tx_lock);
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+ ERROR(queue->rt2x00dev,
+ "Dropping frame due to full tx queue %d.\n", queue->qid);
+ ret = -ENOBUFS;
+ goto out;
+ }
- if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
+ if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
+ &entry->flags))) {
ERROR(queue->rt2x00dev,
"Arrived at non-free entry in the non-full queue %d.\n"
"Please file bug report to %s.\n",
queue->qid, DRV_PROJECT);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out;
}
set_bit(ENTRY_DATA_PENDING, &entry->flags);
rt2x00queue_index_inc(queue, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
- rt2x00queue_kick_tx_queue(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(queue, &txdesc);
- return 0;
+out:
+ spin_unlock(&queue->tx_lock);
+ return ret;
}
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
/*
* Clean up the beacon skb.
*/
- rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
- intf->beacon->skb = NULL;
+ rt2x00queue_free_skb(intf->beacon);
if (!enable_beacon) {
- rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
+ rt2x00queue_stop_queue(intf->beacon->queue);
mutex_unlock(&intf->beacon_skb_mutex);
return 0;
}
* it should not be kicked during this run, since it
* is part of another TX operation.
*/
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
index_start = queue->index[start];
index_end = queue->index[end];
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
/*
* Start from the TX done pointer, this guarentees that we will
return NULL;
}
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
entry = &queue->entries[queue->index[index]];
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
return entry;
}
return;
}
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
queue->index[index]++;
if (queue->index[index] >= queue->limit)
queue->index[index] = 0;
+ queue->last_action[index] = jiffies;
+
if (index == Q_INDEX) {
queue->length++;
- queue->last_index = jiffies;
} else if (index == Q_INDEX_DONE) {
queue->length--;
queue->count++;
- queue->last_index_done = jiffies;
}
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-static void rt2x00queue_reset(struct data_queue *queue)
+void rt2x00queue_pause_queue(struct data_queue *queue)
{
- unsigned long irqflags;
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
- spin_lock_irqsave(&queue->lock, irqflags);
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to disable the queue
+ * inside mac80211.
+ */
+ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
- queue->count = 0;
- queue->length = 0;
- queue->last_index = jiffies;
- queue->last_index_done = jiffies;
- memset(queue->index, 0, sizeof(queue->index));
+void rt2x00queue_unpause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
+ return;
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to enable the queue
+ * inside mac80211.
+ */
+ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ case QID_RX:
+ /*
+ * For RX we need to kick the queue now in order to
+ * receive frames.
+ */
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ default:
+ break;
+ }
}
+EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
+
+void rt2x00queue_start_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ set_bit(QUEUE_PAUSED, &queue->flags);
+
+ queue->rt2x00dev->ops->lib->start_queue(queue);
+
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
+
+void rt2x00queue_stop_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ rt2x00queue_pause_queue(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
+
+void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+ bool started;
+ bool tx_queue =
+ (queue->qid == QID_AC_VO) ||
+ (queue->qid == QID_AC_VI) ||
+ (queue->qid == QID_AC_BE) ||
+ (queue->qid == QID_AC_BK);
+
+ mutex_lock(&queue->status_lock);
+
+ /*
+ * If the queue has been started, we must stop it temporarily
+ * to prevent any new frames to be queued on the device. If
+ * we are not dropping the pending frames, the queue must
+ * only be stopped in the software and not the hardware,
+ * otherwise the queue will never become empty on its own.
+ */
+ started = test_bit(QUEUE_STARTED, &queue->flags);
+ if (started) {
+ /*
+ * Pause the queue
+ */
+ rt2x00queue_pause_queue(queue);
+
+ /*
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
+ */
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ }
+
+ /*
+ * Check if driver supports flushing, we can only guarentee
+ * full support for flushing if the driver is able
+ * to cancel all pending frames (drop = true).
+ */
+ if (drop && queue->rt2x00dev->ops->lib->flush_queue)
+ queue->rt2x00dev->ops->lib->flush_queue(queue);
+
+ /*
+ * When we don't want to drop any frames, or when
+ * the driver doesn't fully flush the queue correcly,
+ * we must wait for the queue to become empty.
+ */
+ for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
+ msleep(10);
+
+ /*
+ * The queue flush has failed...
+ */
+ if (unlikely(!rt2x00queue_empty(queue)))
+ WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+
+ /*
+ * Restore the queue to the previous status
+ */
+ if (started)
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
+
+void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_start_queue will call ieee80211_wake_queue
+ * for each queue after is has been properly initialized.
+ */
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_start_queue(queue);
+
+ rt2x00queue_start_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
- txall_queue_for_each(rt2x00dev, queue)
- rt2x00dev->ops->lib->kill_tx_queue(queue);
+ /*
+ * rt2x00queue_stop_queue will call ieee80211_stop_queue
+ * as well, but we are completely shutting doing everything
+ * now, so it is much safer to stop all TX queues at once,
+ * and use rt2x00queue_stop_queue for cleaning up.
+ */
+ ieee80211_stop_queues(rt2x00dev->hw);
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_stop_queue(queue);
+
+ rt2x00queue_stop_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
+
+void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+
+ rt2x00queue_flush_queue(rt2x00dev->rx, drop);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
+
+static void rt2x00queue_reset(struct data_queue *queue)
+{
+ unsigned long irqflags;
+ unsigned int i;
+
+ spin_lock_irqsave(&queue->index_lock, irqflags);
+
+ queue->count = 0;
+ queue->length = 0;
+
+ for (i = 0; i < Q_INDEX_MAX; i++) {
+ queue->index[i] = 0;
+ queue->last_action[i] = jiffies;
+ }
+
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
queue_for_each(rt2x00dev, queue) {
rt2x00queue_reset(queue);
- for (i = 0; i < queue->limit; i++) {
+ for (i = 0; i < queue->limit; i++)
rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
- if (queue->qid == QID_RX)
- rt2x00queue_index_inc(queue, Q_INDEX);
- }
}
}
* Allocate all queue entries.
*/
entry_size = sizeof(*entries) + qdesc->priv_size;
- entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
+ entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
if (!entries)
return -ENOMEM;
#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
- ( ((char *)(__base)) + ((__limit) * (__esize)) + \
- ((__index) * (__psize)) )
+ (((char *)(__base)) + ((__limit) * (__esize)) + \
+ ((__index) * (__psize)))
for (i = 0; i < queue->limit; i++) {
entries[i].flags = 0;
return 0;
}
-static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
+static void rt2x00queue_free_skbs(struct data_queue *queue)
{
unsigned int i;
return;
for (i = 0; i < queue->limit; i++) {
- if (queue->entries[i].skb)
- rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
+ rt2x00queue_free_skb(&queue->entries[i]);
}
}
-static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
+static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
{
unsigned int i;
struct sk_buff *skb;
for (i = 0; i < queue->limit; i++) {
- skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
+ skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
if (!skb)
return -ENOMEM;
queue->entries[i].skb = skb;
goto exit;
}
- status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
+ status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
if (status)
goto exit;
{
struct data_queue *queue;
- rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
+ rt2x00queue_free_skbs(rt2x00dev->rx);
queue_for_each(rt2x00dev, queue) {
kfree(queue->entries);
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
- spin_lock_init(&queue->lock);
+ mutex_init(&queue->status_lock);
+ spin_lock_init(&queue->tx_lock);
+ spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
queue->qid = qid;
*/
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
- queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
+ queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
if (!queue) {
ERROR(rt2x00dev, "Queue allocation failed.\n");
return -ENOMEM;
/*
* Initialize queue parameters.
* RX: qid = QID_RX
- * TX: qid = QID_AC_BE + index
+ * TX: qid = QID_AC_VO + index
* TX: cw_min: 2^5 = 32.
* TX: cw_max: 2^10 = 1024.
* BCN: qid = QID_BEACON
*/
rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
- qid = QID_AC_BE;
+ qid = QID_AC_VO;
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);