aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c1296
1 files changed, 972 insertions, 324 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1d7da3f41eb..8fa79b83f2d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 1000,
.cache_line_size = 0x20,
.clear_isr = 0,
+ .iopoll_weight = 64,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+ trace_entry = &ioa_cfg->trace[atomic_add_return
+ (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
+ wmb();
}
#else
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
+ int hrrq_id;
+ hrrq_id = ioarcb->cmd_pkt.hrrq_id;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->cmd_pkt.hrrq_id = hrrq_id;
ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
{
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd = NULL;
+
+ if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+ ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+ struct ipr_cmnd, queue);
+ list_del(&ipr_cmd->queue);
+ }
- ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
- list_del(&ipr_cmd->queue);
return ipr_cmd;
}
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
- struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ struct ipr_cmnd *ipr_cmd =
+ __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
u32 clr_ints)
{
volatile u32 int_reg;
+ int i;
/* Stop new interrupts */
- ioa_cfg->allow_interrupts = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
**/
static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
}
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd, *temp;
+ struct ipr_hrr_queue *hrrq;
ENTER;
- list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
- list_del(&ipr_cmd->queue);
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry_safe(ipr_cmd,
+ temp, &hrrq->hrrq_pending_q, queue) {
+ list_del(&ipr_cmd->queue);
- ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
- ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+ ipr_cmd->s.ioasa.hdr.ioasc =
+ cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid =
+ cpu_to_be32(IPR_DRIVER_ILID);
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+ IPR_IOASC_IOA_WAS_RESET);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->done(ipr_cmd);
+ }
+ spin_unlock(&hrrq->_lock);
}
-
LEAVE;
}
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
void (*done) (struct ipr_cmnd *),
void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = done;
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
spin_lock_irq(ioa_cfg->host->host_lock);
}
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+ if (ioa_cfg->hrrq_num == 1)
+ return 0;
+ else
+ return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
/**
* ipr_send_hcam - Send an HCAM to the adapter.
* @ioa_cfg: ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
- if (ioa_cfg->allow_cmds) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
}
/**
- * ipr_format_res_path - Format the resource path for printing.
+ * __ipr_format_res_path - Format the resource path for printing.
* @res_path: resource path
* @buf: buffer
+ * @len: length of buffer provided
*
* Return value:
* pointer to buffer
**/
-static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
{
int i;
char *p = buffer;
@@ -1187,6 +1222,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
}
/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg: ioa config struct
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+ u8 *res_path, char *buffer, int len)
+{
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (buffer - p));
+ return buffer;
+}
+
+/**
* ipr_update_res_entry - Update the resource entry.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
if (res->sdev && new_path)
sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(res->ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
} else {
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err_separator;
ipr_err("Device %d : %s", i + 1,
- ipr_format_res_path(dev_entry->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(dev_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_log_ext_vpd(&dev_entry->vpd);
ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
path_active_desc[i].desc, path_state_desc[j].desc,
- ipr_format_res_path(fabric->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ fabric->res_path,
+ buffer, sizeof(buffer)));
return;
}
}
ipr_err("Path state=%02X Resource Path=%s\n", path_state,
- ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+ buffer, sizeof(buffer)));
}
static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
- ipr_format_res_path(cfg->res_path, buffer,
- sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]),
+ be32_to_cpu(cfg->wwid[1]));
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status,
- ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("RAID %s Array Configuration: %s\n",
error->protection_level,
- ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, error->last_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("Array Member %d:\n", i);
ipr_log_ext_vpd(&array_entry->vpd);
ipr_err("Current Location: %s\n",
- ipr_format_res_path(array_entry->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, array_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_err("Expected Location: %s\n",
- ipr_format_res_path(array_entry->expected_res_path,
- buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ array_entry->expected_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
}
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2491,36 +2553,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_reset_reload - Reset/Reload the IOA
- * @ioa_cfg: ioa config struct
- * @shutdown_type: shutdown type
- *
- * This function resets the adapter and re-initializes it.
- * This function assumes that all new host commands have been stopped.
- * Return value:
- * SUCCESS / FAILED
- **/
-static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
- enum ipr_shutdown_type shutdown_type)
-{
- if (!ioa_cfg->in_reset_reload)
- ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irq(ioa_cfg->host->host_lock);
-
- /* If we got hit with a host reset while we were already resetting
- the adapter for some reason, and the reset failed. */
- if (ioa_cfg->ioa_is_dead) {
- ipr_trace;
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
* ipr_find_ses_entry - Find matching SES in SES table
* @res: resource entry struct of SES
*
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
restart:
do {
did_work = 0;
- if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ !ioa_cfg->allow_ml_add_del) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
int len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
len = snprintf(buf, PAGE_SIZE, "offline\n");
else
len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
- int result = count;
+ int result = count, i;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
- ioa_cfg->ioa_is_dead = 0;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+ !strncmp(buf, "online", 6)) {
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ioa_cfg->reset_retries = 0;
ioa_cfg->in_ioa_bringdown = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
.store = ipr_store_reset_adapter
};
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long user_iopoll_weight;
+ unsigned long lock_flags = 0;
+ int i;
+
+ if (!ioa_cfg->sis64) {
+ dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+ return -EINVAL;
+ }
+ if (kstrtoul(buf, 10, &user_iopoll_weight))
+ return -EINVAL;
+
+ if (user_iopoll_weight > 256) {
+ dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+ return -EINVAL;
+ }
+
+ if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+ dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+ return strlen(buf);
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ ioa_cfg->iopoll_weight = user_iopoll_weight;
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+ .attr = {
+ .name = "iopoll_weight",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_iopoll_weight,
+ .store = ipr_store_iopoll_weight
+};
+
/**
* ipr_alloc_ucode_buffer - Allocates a microcode download buffer
* @buf_len: buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
&ipr_ioa_fw_type_attr,
+ &ipr_iopoll_weight_attr,
NULL,
};
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->dump = dump;
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
- if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
ioa_cfg->dump_taken = 1;
schedule_work(&ioa_cfg->work_q);
}
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ioa_cfg->sis64)
len = snprintf(buf, PAGE_SIZE, "%s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(res->res_path, buffer,
+ sizeof(buffer)));
else if (res)
len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
-/**
- * ipr_eh_host_reset - Reset the host adapter
- * @scsi_cmd: scsi command struct
- *
- * Return value:
- * SUCCESS / FAILED
- **/
-static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ unsigned long lock_flags = 0;
+ int rc = SUCCESS;
ENTER;
- ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->in_reset_reload) {
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
ioa_cfg->sdt_state = GET_DUMP;
}
- rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
-
- LEAVE;
- return rc;
-}
-
-static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
-{
- int rc;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __ipr_eh_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ /* If we got hit with a host reset while we were already resetting
+ the adapter for some reason, and the reset failed. */
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_trace;
+ rc = FAILED;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
return rc;
}
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ata_port *ap;
int rc = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
*/
if (ioa_cfg->in_reset_reload)
return FAILED;
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->qc &&
+ !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ }
}
}
+ spin_unlock(&hrrq->_lock);
}
-
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- rc = -EIO;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd,
+ &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle ==
+ res->res_handle) {
+ rc = -EIO;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
} else
rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
else
ipr_cmd->sibling->done(ipr_cmd->sibling);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
LEAVE;
}
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
int op_found = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* This will force the mid-layer to call ipr_eh_host_reset,
* which will then go to sleep and wait for the reset to complete
*/
- if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->in_reset_reload ||
+ ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
if (!res)
return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
if (!ipr_is_gscsi(res))
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->scsi_cmd == scsi_cmd) {
+ ipr_cmd->done = ipr_scsi_eh_done;
+ op_found = 1;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+ dev_err(&ioa_cfg->pdev->dev,
+ "No Host RRQ. 0x%08X\n", int_reg);
else
dev_err(&ioa_cfg->pdev->dev,
"Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* none
**/
-static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
{
ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+ dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
}
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+ struct list_head *doneq)
+{
+ u32 ioasc;
+ u16 cmd_index;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+ int num_hrrq = 0;
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrr_queue->allow_interrupts)
+ return 0;
+
+ while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrr_queue->toggle_bit) {
+
+ cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+ IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+ IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+ if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+ cmd_index < hrr_queue->min_cmd_id)) {
+ ipr_isr_eh(ioa_cfg,
+ "Invalid response handle from IOA: ",
+ cmd_index);
+ break;
+ }
+
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+ list_move_tail(&ipr_cmd->queue, doneq);
+
+ if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+ hrr_queue->hrrq_curr++;
+ } else {
+ hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+ hrr_queue->toggle_bit ^= 1u;
+ }
+ num_hrrq++;
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ unsigned long hrrq_flags;
+ int completed_ops;
+ LIST_HEAD(doneq);
+
+ hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+ ioa_cfg = hrrq->ioa_cfg;
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+ if (completed_ops < budget)
+ blk_iopoll_complete(iop);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
+ return completed_ops;
+}
+
/**
* ipr_isr - Interrupt service routine
* @irq: irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
**/
static irqreturn_t ipr_isr(int irq, void *devp)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
- unsigned long lock_flags = 0;
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
u32 int_reg = 0;
- u32 ioasc;
- u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/* If interrupts are disabled, ignore the interrupt */
- if (!ioa_cfg->allow_interrupts) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return IRQ_NONE;
}
while (1) {
- ipr_cmd = NULL;
-
- while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
- ioa_cfg->toggle_bit) {
-
- cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
- IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
-
- if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- rc = IRQ_HANDLED;
- goto unlock_out;
- }
-
- ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
-
- ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+ rc = IRQ_HANDLED;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
- list_move_tail(&ipr_cmd->queue, &doneq);
-
- rc = IRQ_HANDLED;
-
- if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
- ioa_cfg->hrrq_curr++;
- } else {
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit ^= 1u;
- }
- }
-
- if (ipr_cmd && !ioa_cfg->clear_isr)
- break;
+ if (!ioa_cfg->clear_isr)
+ break;
- if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
do {
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ writel(IPR_PCII_HRRQ_UPDATED,
+ ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
- num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ ipr_isr_eh(ioa_cfg,
+ "Error clearing HRRQ: ", num_hrrq);
rc = IRQ_HANDLED;
- goto unlock_out;
+ break;
} else
break;
}
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
-unlock_out:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
+ return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit) {
+ if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+ blk_iopoll_sched(&hrrq->iopoll);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+ } else {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit)
+
+ if (ipr_process_hrrq(hrrq, -1, &doneq))
+ rc = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
return rc;
}
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long lock_flags;
+ unsigned long hrrq_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
} else {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
}
}
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long lock_flags;
+ unsigned long hrrq_flags, lock_flags;
int rc;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
+ if (ipr_is_gata(res) && res->sata_port) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+ }
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead || !res)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
- if (ipr_is_gata(res) && res->sata_port) {
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
- return rc;
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
}
if (scsi_cmd->cmnd[0] >= 0xC0 &&
- (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
+ (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ }
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- spin_lock_irqsave(shost->host_lock, lock_flags);
- if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (unlikely(ioa_cfg->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
err_nodev:
- spin_lock_irqsave(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
goto out_unlock;
rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
struct ipr_resource_entry *res = sata_port->res;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ spin_lock(&ipr_cmd->hrrq->_lock);
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
ata_qc_complete(qc);
}
@@ -6244,6 +6502,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
}
/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ qc->lldd_task = NULL;
+ spin_lock(&hrrq->_lock);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ spin_unlock(&hrrq->_lock);
+ return 0;
+ }
+
+ if (unlikely(!hrrq->allow_cmds)) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ qc->lldd_task = ipr_cmd;
+ spin_unlock(&hrrq->_lock);
+ return 0;
+}
+
+/**
* ipr_qc_issue - Issue a SATA qc to a device
* @qc: queued command
*
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
struct ipr_ioarcb *ioarcb;
struct ipr_ioarcb_ata_regs *regs;
- if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+ if (qc->lldd_task == NULL)
+ ipr_qc_defer(qc);
+
+ ipr_cmd = qc->lldd_task;
+ if (ipr_cmd == NULL)
return AC_ERR_SYSTEM;
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ qc->lldd_task = NULL;
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+ ipr_cmd->hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_SYSTEM;
+ }
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
ioarcb = &ipr_cmd->ioarcb;
if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
memset(regs, 0, sizeof(*regs));
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
default:
WARN_ON(1);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_INVALID;
}
ipr_send_command(ipr_cmd);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return 0;
}
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
.hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
.qc_prep = ata_noop_qc_prep,
+ .qc_defer = ipr_qc_defer,
.qc_issue = ipr_qc_issue,
.qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
@@ -6427,7 +6743,7 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
ENTER;
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock_irq(ioa_cfg->host->host_lock);
@@ -6454,11 +6770,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_hostrcb *hostrcb, *temp;
- int i = 0;
+ int i = 0, j;
ENTER;
ioa_cfg->in_reset_reload = 0;
- ioa_cfg->allow_cmds = 1;
+ for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+ spin_lock(&ioa_cfg->hrrq[j]._lock);
+ ioa_cfg->hrrq[j].allow_cmds = 1;
+ spin_unlock(&ioa_cfg->hrrq[j]._lock);
+ }
+ wmb();
ioa_cfg->reset_cmd = NULL;
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
@@ -6482,14 +6803,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
spin_lock(ioa_cfg->host->host_lock);
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
scsi_block_requests(ioa_cfg->host);
LEAVE;
@@ -6560,9 +6881,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
if (!ioa_cfg->sis64)
ipr_cmd->job_step = ipr_set_supported_devs;
+ LEAVE;
return IPR_RC_JOB_RETURN;
}
+ LEAVE;
return IPR_RC_JOB_CONTINUE;
}
@@ -6820,7 +7143,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
return IPR_RC_JOB_RETURN;
}
@@ -7278,46 +7601,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_hrr_queue *hrrq;
ENTER;
+ ipr_cmd->job_step = ipr_ioafp_std_inquiry;
dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
- ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
- ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+ hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (ioa_cfg->sis64)
- ioarcb->cmd_pkt.cdb[1] = 0x1;
- ioarcb->cmd_pkt.cdb[2] =
- ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
- ioarcb->cmd_pkt.cdb[3] =
- ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
- ioarcb->cmd_pkt.cdb[4] =
- ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[5] =
- ((u64) ioa_cfg->host_rrq_dma) & 0xff;
- ioarcb->cmd_pkt.cdb[7] =
- ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[8] =
- (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+ ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
- if (ioa_cfg->sis64) {
- ioarcb->cmd_pkt.cdb[10] =
- ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
- ioarcb->cmd_pkt.cdb[11] =
- ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
- ioarcb->cmd_pkt.cdb[12] =
- ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
- ioarcb->cmd_pkt.cdb[13] =
- ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
- }
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
- ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+ if (ioa_cfg->nvectors == 1)
+ ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+ else
+ ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+ ioarcb->cmd_pkt.cdb[2] =
+ ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+ ioarcb->cmd_pkt.cdb[3] =
+ ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[4] =
+ ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[5] =
+ ((u64) hrrq->host_rrq_dma) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] =
+ ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] =
+ (sizeof(u32) * hrrq->size) & 0xff;
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[9] =
+ ioa_cfg->identify_hrrq_index;
- ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+ }
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[14] =
+ ioa_cfg->identify_hrrq_index;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_INTERNAL_TIMEOUT);
+
+ if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
LEAVE;
- return IPR_RC_JOB_RETURN;
+ return IPR_RC_JOB_CONTINUE;
}
/**
@@ -7365,7 +7713,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
unsigned long timeout)
{
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+
+ ENTER;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7733,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
**/
static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
{
- memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+ struct ipr_hrr_queue *hrrq;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+ /* Initialize Host RRQ pointers */
+ hrrq->hrrq_start = hrrq->host_rrq;
+ hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+ hrrq->hrrq_curr = hrrq->hrrq_start;
+ hrrq->toggle_bit = 1;
+ spin_unlock(&hrrq->_lock);
+ }
+ wmb();
- /* Initialize Host RRQ pointers */
- ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
- ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit = 1;
+ ioa_cfg->identify_hrrq_index = 0;
+ if (ioa_cfg->hrrq_num == 1)
+ atomic_set(&ioa_cfg->hrrq_index, 0);
+ else
+ atomic_set(&ioa_cfg->hrrq_index, 1);
/* Zero out config table */
memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7809,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
return IPR_RC_JOB_RETURN;
}
@@ -7466,12 +7830,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
volatile u64 maskval;
+ int i;
ENTER;
ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
- ioa_cfg->allow_interrupts = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7881,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -8030,7 +8400,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+ if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8449,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
* We are doing nested adapter resets and this is
* not the current reset job.
*/
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue,
+ &ipr_cmd->hrrq->hrrq_free_q);
return;
}
@@ -8113,9 +8485,15 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
struct ipr_cmnd *ipr_cmd;
+ int i;
ioa_cfg->in_reset_reload = 1;
- ioa_cfg->allow_cmds = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
@@ -8141,7 +8519,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
- if (ioa_cfg->ioa_is_dead)
+ int i;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return;
if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8536,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
"IOA taken offline - error recovery failed\n");
ioa_cfg->reset_retries = 0;
- ioa_cfg->ioa_is_dead = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg->reset_cmd = NULL;
@@ -8188,9 +8573,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
*/
static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
/* Disallow new interrupts, avoid loop */
- ipr_cmd->ioa_cfg->allow_interrupts = 0;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
return IPR_RC_JOB_RETURN;
}
@@ -8247,13 +8640,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
{
unsigned long flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
- ioa_cfg->allow_cmds = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8310,12 +8709,11 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
} else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
-
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- if (ioa_cfg->ioa_is_dead) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
rc = -EIO;
} else if (ipr_invalid_adapter(ioa_cfg)) {
if (!ipr_testmode)
@@ -8376,8 +8774,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ipr_free_cmd_blks(ioa_cfg);
- pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++)
+ pci_free_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+
pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
@@ -8408,8 +8811,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- free_irq(pdev->irq, ioa_cfg);
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ int i;
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ } else
+ free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ pci_disable_msi(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ pci_disable_msix(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ }
+
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -8430,7 +8848,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
dma_addr_t dma_addr;
- int i;
+ int i, entries_each_hrrq, hrrq_id = 0;
ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8864,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
return -ENOMEM;
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ if (ioa_cfg->hrrq_num > 1) {
+ if (i == 0) {
+ entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
+ } else {
+ entries_each_hrrq =
+ IPR_NUM_BASE_CMD_BLKS/
+ (ioa_cfg->hrrq_num - 1);
+ ioa_cfg->hrrq[i].min_cmd_id =
+ IPR_NUM_INTERNAL_CMD_BLKS +
+ (i - 1) * entries_each_hrrq;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (IPR_NUM_INTERNAL_CMD_BLKS +
+ i * entries_each_hrrq - 1);
+ }
+ } else {
+ entries_each_hrrq = IPR_NUM_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+ }
+ ioa_cfg->hrrq[i].size = entries_each_hrrq;
+ }
+
+ BUG_ON(ioa_cfg->hrrq_num == 0);
+
+ i = IPR_NUM_CMD_BLKS -
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+ if (i > 0) {
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8484,7 +8937,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ipr_cmd->sense_buffer_dma = dma_addr +
offsetof(struct ipr_cmnd, sense_buffer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+ ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+ hrrq_id++;
}
return 0;
@@ -8516,6 +8973,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+ if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+ || !ioa_cfg->vset_ids)
+ goto out_free_res_entries;
}
for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8991,34 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+ }
+
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
- ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
- sizeof(u32) * IPR_NUM_CMD_BLKS,
- &ioa_cfg->host_rrq_dma);
-
- if (!ioa_cfg->host_rrq)
- goto out_ipr_free_cmd_blocks;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ &ioa_cfg->hrrq[i].host_rrq_dma);
+
+ if (!ioa_cfg->hrrq[i].host_rrq) {
+ while (--i > 0)
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ goto out_ipr_free_cmd_blocks;
+ }
+ ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+ }
ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
ioa_cfg->cfg_table_size,
@@ -8582,8 +9062,12 @@ out_free_hostrcb_dma:
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
out_free_host_rrq:
- pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ }
out_ipr_free_cmd_blocks:
ipr_free_cmd_blks(ioa_cfg);
out_free_vpd_cbs:
@@ -8591,6 +9075,9 @@ out_free_vpd_cbs:
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
kfree(ioa_cfg->res_entries);
+ kfree(ioa_cfg->target_ids);
+ kfree(ioa_cfg->array_ids);
+ kfree(ioa_cfg->vset_ids);
goto out;
}
@@ -8638,15 +9125,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->doorbell = IPR_DOORBELL;
sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
- sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
- sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
- INIT_LIST_HEAD(&ioa_cfg->free_q);
- INIT_LIST_HEAD(&ioa_cfg->pending_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8724,6 +9207,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+ int i, err, vectors;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msix(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, err, vectors;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msi(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+ for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+ snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+ "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+ ioa_cfg->vectors_info[vec_idx].
+ desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+ }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, rc;
+
+ for (i = 1; i < ioa_cfg->nvectors; i++) {
+ rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ ipr_isr_mhrrq,
+ 0,
+ ioa_cfg->vectors_info[i].desc,
+ &ioa_cfg->hrrq[i]);
+ if (rc) {
+ while (--i >= 0)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
/**
* ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
* @pdev: PCI device struct
@@ -8740,6 +9305,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
unsigned long lock_flags = 0;
irqreturn_t rc = IRQ_HANDLED;
+ dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg->msi_received = 1;
@@ -8787,9 +9353,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->msi_received) {
/* MSI test failed */
dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -8806,8 +9372,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
return rc;
}
-/**
- * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
*
@@ -8823,6 +9388,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
+ unsigned long lock_flags;
ENTER;
@@ -8918,17 +9484,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
- /* Enable MSI style interrupts if they are supported. */
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+ dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+ IPR_MAX_MSIX_VECTORS);
+ ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+ }
+
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msix(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSIX;
+ else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msi(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSI;
+ else {
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ }
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP)
- pci_disable_msi(pdev);
+ if (rc == -EOPNOTSUPP) {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ pci_disable_msi(pdev);
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ pci_disable_msix(pdev);
+ }
+
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ }
else if (rc)
goto out_msi_disable;
- else
- dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
- } else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ else {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ dev_info(&pdev->dev,
+ "Request for %d MSIs succeeded with starting IRQ: %d\n",
+ ioa_cfg->nvectors, pdev->irq);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ dev_info(&pdev->dev,
+ "Request for %d MSIXs succeeded.",
+ ioa_cfg->nvectors);
+ }
+ }
+
+ ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+ (unsigned int)num_online_cpus(),
+ (unsigned int)IPR_MAX_HRRQ_NUM);
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -8975,11 +9580,24 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr,
- ioa_cfg->msi_received ? 0 : IRQF_SHARED,
- IPR_NAME, ioa_cfg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI
+ || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ name_msi_vectors(ioa_cfg);
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+ 0,
+ ioa_cfg->vectors_info[0].desc,
+ &ioa_cfg->hrrq[0]);
+ if (!rc)
+ rc = ipr_request_other_msi_irqs(ioa_cfg);
+ } else {
+ rc = request_irq(pdev->irq, ipr_isr,
+ IRQF_SHARED,
+ IPR_NAME, &ioa_cfg->hrrq[0]);
+ }
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
pdev->irq, rc);
@@ -9004,7 +9622,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ pci_disable_msi(pdev);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_release_regions:
@@ -9138,7 +9759,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -9185,6 +9806,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
ioa_cfg->allow_ml_add_del = 1;
ioa_cfg->host->max_channel = IPR_VSET_BUS;
+ ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+
schedule_work(&ioa_cfg->work_q);
return 0;
}
@@ -9203,8 +9835,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ ioa_cfg->iopoll_weight = 0;
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9277,6 +9917,8 @@ static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
@@ -9290,6 +9932,14 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9316,9 +9966,7 @@ static struct pci_driver ipr_driver = {
**/
static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -9340,7 +9988,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}