aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2011-12-01 13:32:19 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-12-01 13:32:17 +0100
commit817e5000ebc4d448ca514db49b55073a724f8552 (patch)
treeeea9284fcc6cd702161eff7611de394822329c30 /drivers/s390
parentcfc9066bcd3ab498268e1d075f1556bb5244c0aa (diff)
[S390] hibernate: directly trigger subchannel evaluation
Using the generic css_schedule_eval to evaluate subchannels while resuming from hibernation is very slow when used with many devices. Provide a new evaluation trigger which exploits css_sched_sch_todo and use this in the resume callback for ccw devices. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
3 files changed, 66 insertions, 47 deletions
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e..4a1ff5c2eb8 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1..21908e67bf6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0eca..47269858ecb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);