This hack appears to work.
diff --git a/include/net/fq.h b/include/net/fq.h
index 2eccbbd..5ca3f91 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -7,6 +7,8 @@
#ifndef __NET_SCHED_FQ_H
#define __NET_SCHED_FQ_H
+#include <net/skbuff_light.h>
+
struct fq_tin;
/**
@@ -27,7 +29,7 @@ struct fq_tin;
struct fq_flow {
struct fq_tin *tin;
struct list_head flowchain;
- struct sk_buff_head queue;
+ struct sk_buff_light_head queue;
u32 backlog;
int deficit;
};
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index a5f67a2..1ba0f48 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -51,7 +51,7 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
lockdep_assert_held(&fq->lock);
- skb = __skb_dequeue(&flow->queue);
+ skb = __skb_light_dequeue(&flow->queue);
if (!skb)
return NULL;
@@ -66,13 +66,12 @@ static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
unsigned int packets = 0, bytes = 0, truesize = 0;
struct fq_tin *tin = flow->tin;
struct sk_buff *skb;
- int pending;
+ u32 pending;
lockdep_assert_held(&fq->lock);
-
- pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
+ pending = min_t(u32, 32*1514, flow->backlog >> 1);
do {
- skb = __skb_dequeue(&flow->queue);
+ skb = __skb_light_dequeue(&flow->queue);
if (!skb)
break;
@@ -80,7 +79,7 @@ static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
bytes += skb->len;
truesize += skb->truesize;
free_func(fq, tin, flow, skb);
- } while (packets < pending);
+ } while (bytes < pending);
__fq_adjust_removal(fq, flow, packets, bytes, truesize);
@@ -226,7 +225,7 @@ static void fq_tin_enqueue(struct fq *fq,
&tin->new_flows);
}
- __skb_queue_tail(&flow->queue, skb);
+ __skb_light_queue_tail(&flow->queue, skb);
oom = (fq->memory_usage > fq->memory_limit);
while (fq->backlog > fq->limit || oom) {
flow = fq_find_fattest_flow(fq);
@@ -256,11 +255,11 @@ static void fq_flow_filter(struct fq *fq,
lockdep_assert_held(&fq->lock);
- skb_queue_walk_safe(&flow->queue, skb, tmp) {
+ skb_light_queue_walk_safe(&flow->queue, skb, tmp) {
if (!filter_func(fq, tin, flow, skb, filter_data))
continue;
- __skb_unlink(skb, &flow->queue);
+ __skb_light_unlink(skb, &flow->queue);
fq_adjust_removal(fq, flow, skb);
free_func(fq, tin, flow, skb);
}
@@ -331,7 +330,7 @@ static void fq_tin_reset(struct fq *fq,
static void fq_flow_init(struct fq_flow *flow)
{
INIT_LIST_HEAD(&flow->flowchain);
- __skb_queue_head_init(&flow->queue);
+ __skb_light_queue_head_init(&flow->queue);
}
static void fq_tin_init(struct fq_tin *tin)
diff --git a/include/net/skbuff_light.h b/include/net/skbuff_light.h
new file mode 100644
index 0000000..4076326
--- /dev/null
+++ b/include/net/skbuff_light.h
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ */
+
+#ifndef _LINUX_LIGHT_SKBUFF_H
+#define _LINUX_LIGHT_SKBUFF_H
+
+#include <linux/skbuff.h>
+
+struct sk_buff_light_head {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+};
+
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ */
+static inline int skb_light_queue_empty(const struct sk_buff_light_head *list)
+{
+ return list->next == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_empty_lockless - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ * This variant can be used in lockless contexts.
+ */
+static inline bool skb_light_queue_empty_lockless(const struct sk_buff_light_head *list)
+{
+ return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
+ * skb_queue_is_last - check if skb is the last entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the last buffer on the list.
+ */
+static inline bool skb_light_queue_is_last(const struct sk_buff_light_head *list,
+ const struct sk_buff *skb)
+{
+ return skb->next == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_is_first - check if skb is the first entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the first buffer on the list.
+ */
+static inline bool skb_light_queue_is_first(const struct sk_buff_light_head *list,
+ const struct sk_buff *skb)
+{
+ return skb->prev == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_next - return the next packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the next packet in @list after @skb. It is only valid to
+ * call this if skb_queue_is_last() evaluates to false.
+ */
+static inline struct sk_buff *skb_light_queue_next(const struct sk_buff_light_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_light_queue_is_last(list, skb));
+ return skb->next;
+}
+
+/**
+ * skb_queue_prev - return the prev packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the prev packet in @list before @skb. It is only valid to
+ * call this if skb_queue_is_first() evaluates to false.
+ */
+static inline struct sk_buff *skb_light_queue_prev(const struct sk_buff_light_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_light_queue_is_first(list, skb));
+ return skb->prev;
+}
+
+/**
+ * skb_peek - peek at the head of an &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_light_peek(const struct sk_buff_light_head *list_)
+{
+ struct sk_buff *skb = list_->next;
+
+ if (skb == (struct sk_buff *)list_)
+ skb = NULL;
+ return skb;
+}
+
+/**
+ * __skb_peek - peek at the head of a non-empty &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Like skb_peek(), but the caller knows that the list is not empty.
+ */
+static inline struct sk_buff *__skb_light_peek(const struct sk_buff_light_head *list_)
+{
+ return list_->next;
+}
+
+/**
+ * skb_peek_next - peek skb following the given one from a queue
+ * @skb: skb to start from
+ * @list_: list to peek at
+ *
+ * Returns %NULL when the end of the list is met or a pointer to the
+ * next element. The reference count is not incremented and the
+ * reference is therefore volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_light_peek_next(struct sk_buff *skb,
+ const struct sk_buff_light_head *list_)
+{
+ struct sk_buff *next = skb->next;
+
+ if (next == (struct sk_buff *)list_)
+ next = NULL;
+ return next;
+}
+
+/**
+ * skb_peek_tail - peek at the tail of an &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_light_peek_tail(const struct sk_buff_light_head *list_)
+{
+ struct sk_buff *skb = READ_ONCE(list_->prev);
+
+ if (skb == (struct sk_buff *)list_)
+ skb = NULL;
+ return skb;
+
+}
+
+/**
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ * @list: queue to initialize
+ *
+ * This initializes only the list and queue length aspects of
+ * an sk_buff_head object. This allows to initialize the list
+ * aspects of an sk_buff_head without reinitializing things like
+ * the spinlock. It can also be used for on-stack sk_buff_head
+ * objects where the spinlock is known to not be used.
+ */
+static inline void __skb_light_queue_head_init(struct sk_buff_light_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+}
+
+/*
+ * This function creates a split out lock class for each invocation;
+ * this is needed for now since a whole lot of users of the skb-queue
+ * infrastructure in drivers have different locking usage (in hardirq)
+ * than the networking core (in softirq only). In the long run either the
+ * network layer or drivers should need annotation to consolidate the
+ * main types of usage into 3 classes.
+ */
+static inline void skb_light_queue_head_init(struct sk_buff_light_head *list)
+{
+ __skb_light_queue_head_init(list);
+}
+
+static inline void skb_light_queue_head_init_class(struct sk_buff_light_head *list,
+ struct lock_class_key *class)
+{
+ skb_light_queue_head_init(list);
+}
+
+/*
+ * Insert an sk_buff on a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+static inline void __skb_light_insert(struct sk_buff *newsk,
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_light_head *list)
+{
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
+ WRITE_ONCE(newsk->next, next);
+ WRITE_ONCE(newsk->prev, prev);
+ WRITE_ONCE(next->prev, newsk);
+ WRITE_ONCE(prev->next, newsk);
+}
+
+static inline void __skb_light_queue_splice(const struct sk_buff_light_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *next)
+{
+ struct sk_buff *first = list->next;
+ struct sk_buff *last = list->prev;
+
+ WRITE_ONCE(first->prev, prev);
+ WRITE_ONCE(prev->next, first);
+
+ WRITE_ONCE(last->next, next);
+ WRITE_ONCE(next->prev, last);
+}
+
+/**
+ * skb_queue_splice - join two skb lists, this is designed for stacks
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_light_queue_splice(const struct sk_buff_light_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_light_queue_empty(list)) {
+ __skb_light_queue_splice(list, (struct sk_buff *) head, head->next);
+ }
+}
+
+/**
+ * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * The list at @list is reinitialised
+ */
+static inline void skb_light_queue_splice_init(struct sk_buff_light_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_light_queue_empty(list)) {
+ __skb_light_queue_splice(list, (struct sk_buff *) head, head->next);
+ __skb_light_queue_head_init(list);
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists, each list being a queue
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_light_queue_splice_tail(const struct sk_buff_light_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_light_queue_empty(list)) {
+ __skb_light_queue_splice(list, head->prev, (struct sk_buff *) head);
+ }
+}
+
+/**
+ * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void skb_light_queue_splice_tail_init(struct sk_buff_light_head *list,
+ struct sk_buff_light_head *head)
+{
+ if (!skb_light_queue_empty(list)) {
+ __skb_light_queue_splice(list, head->prev, (struct sk_buff *) head);
+ __skb_light_queue_head_init(list);
+ }
+}
+
+/**
+ * __skb_queue_after - queue a buffer at the list head
+ * @list: list to use
+ * @prev: place after this buffer
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer int the middle of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+static inline void __skb_light_queue_after(struct sk_buff_light_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *newsk)
+{
+ __skb_light_insert(newsk, prev, prev->next, list);
+}
+
+void skb_light_append(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_light_head *list);
+
+static inline void __skb_light_queue_before(struct sk_buff_light_head *list,
+ struct sk_buff *next,
+ struct sk_buff *newsk)
+{
+ __skb_light_insert(newsk, next->prev, next, list);
+}
+
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+static inline void __skb_light_queue_head(struct sk_buff_light_head *list,
+ struct sk_buff *newsk)
+{
+ __skb_light_queue_after(list, (struct sk_buff *)list, newsk);
+}
+void skb_light_queue_head(struct sk_buff_light_head *list, struct sk_buff *newsk);
+
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+static inline void __skb_light_queue_tail(struct sk_buff_light_head *list,
+ struct sk_buff *newsk)
+{
+ __skb_light_queue_before(list, (struct sk_buff *)list, newsk);
+}
+void skb_light_queue_tail(struct sk_buff_light_head *list, struct sk_buff *newsk);
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+void skb_light_unlink(struct sk_buff *skb, struct sk_buff_light_head *list);
+static inline void __skb_light_unlink(struct sk_buff *skb, struct sk_buff_light_head *list)
+{
+ struct sk_buff *next, *prev;
+
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
+}
+
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+static inline struct sk_buff *__skb_light_dequeue(struct sk_buff_light_head *list)
+{
+ struct sk_buff *skb = skb_light_peek(list);
+ if (skb)
+ __skb_light_unlink(skb, list);
+ return skb;
+}
+struct sk_buff *skb_light_dequeue(struct sk_buff_light_head *list);
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+static inline struct sk_buff *__skb_light_dequeue_tail(struct sk_buff_light_head *list)
+{
+ struct sk_buff *skb = skb_light_peek_tail(list);
+ if (skb)
+ __skb_light_unlink(skb, list);
+ return skb;
+}
+struct sk_buff *skb_light_dequeue_tail(struct sk_buff_light_head *list);
+
+#define skb_light_queue_walk(queue, skb) \
+ for (skb = (queue)->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = skb->next)
+
+#define skb_light_queue_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->next, tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
+#define skb_light_queue_walk_from(queue, skb) \
+ for (; skb != (struct sk_buff *)(queue); \
+ skb = skb->next)
+
+#define skb_light_queue_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
+#define skb_light_queue_reverse_walk(queue, skb) \
+ for (skb = (queue)->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = skb->prev)
+
+#define skb_light_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_light_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+
+#endif
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 50025ff..7f52580 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3347,7 +3347,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
tin = &txqi->tin;
flow = fq_flow_classify(fq, tin, flow_idx, skb);
- head = skb_peek_tail(&flow->queue);
+ head = skb_light_peek_tail(&flow->queue);
if (!head || skb_is_gso(head))
goto out;
I've done some basic tests with https://fast.com. The latency under load was around 16-18ms with this patch on the the 802.11ac 5Ghz radio of an Archer C7. The router as an AP has been stable so far with these changes.
It might be a good idea to make this scheduler an O(1) scheduler with some changes.
I've tested fq_pie to see how it behaves on the upload side (on the eth0 interface of the router in AP mode, not on the wan port). The upload barely reached 100 mbps. The default fq_codel allowed the AP to go as high as 300 mbps on the upload side.