/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ /** * qla24xx_calc_iocbs() - Determine number of Command Type 3 and * Continuation Type 1 IOCBs to allocate. * * @dsds: number of data segment decriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ static inline uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) { uint16_t iocbs; iocbs = 1; if (dsds > 1) { iocbs += (dsds - 1) / 5; if ((dsds - 1) % 5) iocbs++; } return iocbs; } /* * qla2x00_debounce_register * Debounce register. * * Input: * port = register address. * * Returns: * register value. */ static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t __iomem *addr) { volatile uint16_t first; volatile uint16_t second; do { first = RD_REG_WORD(addr); barrier(); cpu_relax(); second = RD_REG_WORD(addr); } while (first != second); return (first); } static inline void qla2x00_poll(struct rsp_que *rsp) { unsigned long flags; struct qla_hw_data *ha = rsp->hw; local_irq_save(flags); if (IS_QLA82XX(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); local_irq_restore(flags); } static inline uint8_t * host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) { uint32_t *ifcp = (uint32_t *) fcp; uint32_t *ofcp = (uint32_t *) fcp; uint32_t iter = bsize >> 2; for (; iter ; iter--) *ofcp++ = swab32(*ifcp++); return fcp; } static inline void host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) { uint32_t *isrc = (uint32_t *) src; uint32_t *odest = (uint32_t *) dst; uint32_t iter = bsize >> 2; for (; iter ; iter--) *odest++ = cpu_to_le32(*isrc++); } static inline void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) { int i; if (IS_FWI2_CAPABLE(ha)) return; for (i = 0; i < SNS_FIRST_LOOP_ID; i++) set_bit(i, ha->loop_id_map); set_bit(MANAGEMENT_SERVER, ha->loop_id_map); set_bit(BROADCAST, ha->loop_id_map); } static inline int qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) { struct qla_hw_data *ha = vha->hw; if (IS_FWI2_CAPABLE(ha)) return (loop_id > NPH_LAST_HANDLE); return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); } static inline void qla2x00_clear_loop_id(fc_port_t *fcport) { struct qla_hw_data *ha = fcport->vha->hw; if (fcport->loop_id == FC_NO_LOOP_ID || qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) return; clear_bit(fcport->loop_id, ha->loop_id_map); fcport->loop_id = FC_NO_LOOP_ID; } static inline void qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) { struct dsd_dma *dsd_ptr, *tdsd_ptr; struct crc_context *ctx; ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); /* clean up allocated prev pool */ list_for_each_entry_safe(dsd_ptr, tdsd_ptr, &ctx->dsd_list, list) { dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(&dsd_ptr->list); kfree(dsd_ptr); } INIT_LIST_HEAD(&ctx->dsd_list); } static inline void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; old_state = atomic_read(&fcport->state); atomic_set(&fcport->state, state); /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, "FCPort state transitioned from %s to %s - " "portid=%02x%02x%02x.\n", port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } } static inline int qla2x00_hba_err_chk_enabled(srb_t *sp) { /* * Uncomment when corresponding SCSI changes are done. * if (!sp->cmd->prot_chk) return 0; * */ switch (scsi_get_prot_op(GET_CMD_SP(sp))) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: if (ql2xenablehba_err_chk >= 1) return 1; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: if (ql2xenablehba_err_chk >= 2) return 1; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: return 1; } return 0; } static inline int qla2x00_reset_active(scsi_qla_host_t *vha) { scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); /* Test appropriate base-vha and vha flags. */ return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); } static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; struct qla_hw_data *ha = vha->hw; uint8_t bail; QLA_VHA_MARK_BUSY(vha, bail); if (unlikely(bail)) return NULL; sp = mempool_alloc(ha->srb_mempool, flag); if (!sp) goto done; memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; done: if (!sp) QLA_VHA_MARK_NOT_BUSY(vha); return sp; } static inline void qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) { mempool_free(sp, vha->hw->srb_mempool); QLA_VHA_MARK_NOT_BUSY(vha); } static inline void qla2x00_init_timer(srb_t *sp, unsigned long tmo) { init_timer(&sp->u.iocb_cmd.timer); sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; sp->u.iocb_cmd.timer.data = (unsigned long)sp; sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; if ((IS_QLAFX00(sp->fcport->vha->hw)) && (sp->type == SRB_FXIOCB_DCMD)) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); } static inline int qla2x00_gid_list_size(struct qla_hw_data *ha) { if (IS_QLAFX00(ha)) return sizeof(uint32_t) * 32; else return sizeof(struct gid_list_info) * ha->max_fibre_devices; } static inline void qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) { if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth) return; /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */ if (time_before(jiffies, (vha->hw->host_last_rampdown_time + HOST_QUEUE_RAMPDOWN_INTERVAL))) return; /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */ if (time_before(jiffies, (vha->hw->host_last_rampup_time + HOST_QUEUE_RAMPUP_INTERVAL))) return; set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); }