aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/chsc.c68
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c10
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/device_pgid.c123
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_main.c12
10 files changed, 166 insertions, 68 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 10729bbcece..31ceef1beb8 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -435,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
-#ifdef CONFIG_PCI
switch (sei_area->cc) {
case 1:
zpci_event_error(sei_area->ccdf);
@@ -444,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
zpci_event_availability(sei_area->ccdf);
break;
default:
- CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
sei_area->cc);
break;
}
-#endif
}
static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
@@ -471,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
chsc_process_sei_scm_change(sei_area);
break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
}
-static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
do {
memset(sei, 0, sizeof(*sei));
@@ -488,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
if (chsc(sei))
break;
- if (sei->response.code == 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei successful\n");
-
- /* Check if we might have lost some information. */
- if (sei->u.nt0_area.flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
- }
-
- switch (sei->nt) {
- case 0:
- chsc_process_sei_nt0(&sei->u.nt0_area);
- break;
- case 2:
- chsc_process_sei_nt2(&sei->u.nt2_area);
- break;
- default:
- CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
- sei->nt);
- break;
- }
- } else {
+ if (sei->response.code != 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei->response.code);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
- return 0;
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+ } while (sei->u.nt0_area.flags & 0x80);
}
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei *sei;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -531,14 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4b93e..227e05f674b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
#else /* CONFIG_SCM_BUS */
-#define scm_update_information() 0
+static inline int scm_update_information(void) { return 0; }
#endif /* CONFIG_SCM_BUS */
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c8faf6230b0..986ef6a92a4 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -962,9 +962,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_clock() > timeout)
+ if (get_tod_clock() > timeout)
break;
cpu_relax();
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c0a86..4495e0627a4 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -33,7 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/timex.h> /* get_clock() */
+#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
- cmb_data->last_update = get_clock();
+ cmb_data->last_update = get_tod_clock();
kfree(reference_buf);
return 0;
}
@@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
- cdev->private->cmb_start_time = get_clock();
+ cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd8b85..a239237d43f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -780,7 +780,7 @@ static int __init setup_css(int nr)
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_clock() >> 32);
+ tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7cd5c6812ac..c6767f5a58b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
&dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
NULL,
};
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00095a..c7638c54325 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
- "device information:\n", get_clock());
+ "device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c..37ada05e82a 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
#define PGID_RETRIES 256
#define PGID_TIMEOUT (10 * HZ)
+static void verify_start(struct ccw_device *cdev);
+
/*
* Process path verification data and report result.
*/
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- /* Adjust lpm. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
sch->vpm |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev);
return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
req->cp = cp;
}
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
/*
* Perform establish/resign SET PGID on a single path.
*/
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
return;
out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
-static void verify_start(struct ccw_device *cdev);
-
/*
* Process SET PGID request result for a single path.
*/
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
case 0:
sch->vpm |= req->lpm & sch->opm;
break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
case -EOPNOTSUPP:
if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc)
else {
donepm = pgid_to_donepm(cdev);
sch->vpm = donepm & sch->opm;
- cdev->private->pgid_todo_mask &= ~donepm;
cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
pgid_fill(cdev, pgid);
}
out:
@@ -341,6 +404,10 @@ out:
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
/* Anything left to do? */
if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
+ int ret;
- /* Adjust lpm if paths are not set in pam. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
snid_build_cp(cdev);
@@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
return;
out_nopath:
- snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
}
/*
@@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
cdev->private->pgid_valid_mask |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev);
return;
@@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev)
sch->vpm = 0;
sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
@@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev)
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
-
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
- /* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
- cdev->private->pgid_valid_mask = 0;
- cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
@@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253dfcc1b..b108f4a5c7d 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -126,6 +126,10 @@ struct ccw_device_private {
u8 pgid_valid_mask; /* mask of valid PGIDs */
u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */
struct {
@@ -145,6 +149,7 @@ struct ccw_device_private {
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d3461f2..abc550e5dd3 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
retries++;
if (!start_time) {
- start_time = get_clock();
+ start_time = get_tod_clock();
goto again;
}
- if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if (bufnr != q->last_move) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_clock();
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&