aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLoic Poulain <loic.poulain@linaro.org>2021-01-18 12:31:13 +0100
committerLoic Poulain <loic.poulain@linaro.org>2021-01-19 12:11:53 +0100
commitc933cccc35791db99d0dd6374a84ef6fced5e4af (patch)
tree2a048aecdc27c1c4c4626953d0d5e36022f77db2
parent0f6c4dfd02c372a0c669fd0e5cf802f17619f8bc (diff)
net: mhi: Add dedicated alloc threadsdx55-v124
The buffer allocation for RX path is currently done by a work executed in the system workqueue. The work to do is quite simple and consists mostly in allocating and queueing as much as possible buffers to the MHI RX channel. This patch replaces the system work with a simple kthread that loops on buffer allocation and sleeps when queue is full. After practical testing, it appears that thid change improves: - Peek throughput (slightly, by few mbps) - Throughput stability when concurrent loads are running (stress) Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
-rw-r--r--drivers/net/mhi_net.c101
1 files changed, 57 insertions, 44 deletions
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index a833850be9ab..162d816d03b6 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -5,6 +5,7 @@
*/
#include <linux/if_arp.h>
+#include <linux/kthread.h>
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -16,6 +17,8 @@
#define MHI_NET_MAX_MTU 0xffff
#define MHI_NET_DEFAULT_MTU 0x4000
+#define RX_FEFILL_LVL 50 /* Refill the RX queue if lower than 50% */
+
struct mhi_net_stats {
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
@@ -32,18 +35,65 @@ struct mhi_net_stats {
struct mhi_net_dev {
struct mhi_device *mdev;
struct net_device *ndev;
- struct delayed_work rx_refill;
+ struct task_struct *refill_task;
struct sk_buff *skbagg;
- struct mhi_net_stats stats;
u32 rx_queue_sz;
+ u32 refill_level;
+ struct mhi_net_stats stats;
+ wait_queue_head_t refill_wq;
};
+static int mhi_net_refill_thread(void *data)
+{
+ struct mhi_net_dev *mhi_netdev = data;
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ while (1) {
+ err = wait_event_interruptible(mhi_netdev->refill_wq,
+ !mhi_queue_is_full(mdev, DMA_FROM_DEVICE)
+ || kthread_should_stop());
+ if (err || kthread_should_stop())
+ break;
+
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb)) {
+ /* No memory, retry later */
+ net_warn_ratelimited("%s: Unable to allocate RX buf\n",
+ ndev->name);
+ schedule_timeout_interruptible(msecs_to_jiffies(250));
+ continue;
+ }
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ }
+
+ /* Do not hog the CPU */
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int mhi_ndo_open(struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ unsigned int qsz = mhi_netdev->rx_queue_sz;
/* Feed the rx buffer pool */
- schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+ mhi_netdev->refill_level = qsz - qsz * RX_FEFILL_LVL / 100;
+
+ mhi_netdev->refill_task = kthread_run(mhi_net_refill_thread, mhi_netdev,
+ "mhi-net-rx");
+ if (IS_ERR(mhi_netdev->refill_task))
+ return PTR_ERR(mhi_netdev->refill_task);
/* Carrier is established via out-of-band channel (e.g. qmi) */
netif_carrier_on(ndev);
@@ -57,9 +107,9 @@ static int mhi_ndo_stop(struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ kthread_stop(mhi_netdev->refill_task);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- cancel_delayed_work_sync(&mhi_netdev->rx_refill);
return 0;
}
@@ -163,9 +213,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = mhi_res->buf_addr;
- int free_desc_count;
-
- free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (unlikely(mhi_res->transaction_status)) {
switch (mhi_res->transaction_status) {
@@ -224,8 +271,8 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
}
/* Refill if RX buffers queue becomes low */
- if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
- schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+ if (mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE) >= mhi_netdev->refill_level)
+ wake_up_interruptible(&mhi_netdev->refill_wq);
}
static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
@@ -261,40 +308,6 @@ static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
netif_wake_queue(ndev);
}
-static void mhi_net_rx_refill_work(struct work_struct *work)
-{
- struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
- rx_refill.work);
- struct net_device *ndev = mhi_netdev->ndev;
- struct mhi_device *mdev = mhi_netdev->mdev;
- int size = READ_ONCE(ndev->mtu);
- struct sk_buff *skb;
- int err;
-
- while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
- skb = netdev_alloc_skb(ndev, size);
- if (unlikely(!skb))
- break;
-
- err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
- if (unlikely(err)) {
- net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
- ndev->name, err);
- kfree_skb(skb);
- break;
- }
-
- /* Do not hog the CPU if rx buffers are consumed faster than
- * queued (unlikely).
- */
- cond_resched();
- }
-
- /* If we're still starved of rx buffers, reschedule later */
- if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
- schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
-}
-
static struct device_type wwan_type = {
.name = "wwan",
};
@@ -321,7 +334,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
SET_NETDEV_DEVTYPE(ndev, &wwan_type);
- INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
+ init_waitqueue_head(&mhi_netdev->refill_wq);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
u64_stats_init(&mhi_netdev->stats.tx_syncp);