summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Murthy <arun.murthy@stericsson.com>2011-12-13 22:44:34 +0530
committerPhilippe Langlais <philippe.langlais@linaro.org>2012-03-19 09:02:57 +0100
commit22932e1cd93b84f5f7963b417bd82afe40b94bf3 (patch)
tree3ad0535757569a3157e5f42750dd5277d3ff2c8a
parent30ec12b2cf82d33c9e2cc8275e9ed9d674ca83a1 (diff)
caif-shm: synchronize caif flow control
In the shm tx work function caif flow control is turned ON and without acquiring the spin lock. In the rx callback cail flow control is turned off. The expected sequence would be on caif writing message to modem via shm, if shm finds that there is no empty space then, it is not sets caif flow control to OFF. On shm receiving messages from modem, it sets caif flow control to ON. Now this since this is in interrupt context, there are chances of this fucntion being preeempted by the caif OFF that is being set in the tx work function. Hence create a seperate workqueue and two work functions to turn caif flow control ON and OFF. With this implementation it is ensured that caif flow control is turned OFF first by tx work function and turned ON by the rx callback. ST-Ericsson Linux next: NA ST-Ericsson ID: 372652 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ide75415233d7b4560faf25862876c15421df7c2c Signed-off-by: Arun Murthy <arun.murthy@stericsson.com> Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/42438 Reviewed-by: QABUILD Reviewed-by: Hemant-vilas RAMDASI <hemant.ramdasi@stericsson.com> Reviewed-by: Sjur BRENDELAND <sjur.brandeland@stericsson.com>
-rw-r--r--drivers/net/caif/caif_shmcore.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a32..7bd35e0c18b5 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -106,9 +106,12 @@ struct shmdrv_layer {
struct workqueue_struct *pshm_tx_workqueue;
struct workqueue_struct *pshm_rx_workqueue;
+ struct workqueue_struct *pshm_flow_ctrl_workqueue;
struct work_struct shm_tx_work;
struct work_struct shm_rx_work;
+ struct work_struct shm_flow_on_work;
+ struct work_struct shm_flow_off_work;
struct sk_buff_head sk_qhead;
struct shmdev_layer *pshm_dev;
@@ -126,6 +129,24 @@ static int shm_netdev_close(struct net_device *shm_netdev)
return 0;
}
+static void shm_flow_on_work_func(struct work_struct *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_on_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+}
+
+static void shm_flow_off_work_func(struct work_struct *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_off_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+}
+
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
struct buf_list *pbuf;
@@ -238,11 +259,9 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ queue_work(pshm_drv->pshm_flow_ctrl_workqueue,
+ &pshm_drv->shm_flow_on_work);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -426,11 +445,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
+ queue_work(pshm_drv->pshm_flow_ctrl_workqueue,
+ &pshm_drv->shm_flow_off_work);
}
/*
* We simply return back to the caller if we do not have space
@@ -621,11 +637,16 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+ INIT_WORK(&pshm_drv->shm_flow_on_work, shm_flow_on_work_func);
+ INIT_WORK(&pshm_drv->shm_flow_off_work, shm_flow_off_work_func);
pshm_drv->pshm_tx_workqueue =
create_singlethread_workqueue("shm_tx_work");
pshm_drv->pshm_rx_workqueue =
create_singlethread_workqueue("shm_rx_work");
+ pshm_drv->pshm_flow_ctrl_workqueue =
+ create_singlethread_workqueue(
+ "shm_caif_flow_ctrl_work");
for (j = 0; j < NR_TX_BUF; j++) {
struct buf_list *tx_buf =
@@ -744,6 +765,7 @@ void caif_shmcore_remove(struct net_device *pshm_netdev)
/* Destroy work queues. */
destroy_workqueue(pshm_drv->pshm_tx_workqueue);
destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+ destroy_workqueue(pshm_drv->pshm_flow_ctrl_workqueue);
unregister_netdev(pshm_netdev);
}