aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSravanthi Palakonda <srapal@codeaurora.org>2020-06-06 21:22:42 +0530
committerSravanthi Palakonda <srapal@codeaurora.org>2020-06-07 07:56:49 +0530
commitc815d7543db92377d85d214af0dbf1d11324726e (patch)
tree68ca935c4d9b8a151a81ead7029a810ca53fe43e
parentc942c752b1cf0835e9a3bf402ae40246c69b04be (diff)
parentb9bef2026c4d1298835c1a9ce240608c1367af16 (diff)
Merge commit 'b9bef2026c4d1298835c1a9ce240608c1367af16' into kernel.lnx.4.19.r1LA.UM.8.12.r1-11900-sm8250.0
Change-Id: I481cc75603cd451eb409190a1c5af3f7a9a12866 Signed-off-by: Sravanthi Palakonda <srapal@codeaurora.org>
-rw-r--r--arch/arm64/configs/vendor/lito-perf_defconfig1
-rw-r--r--arch/arm64/configs/vendor/lito_defconfig1
-rw-r--r--drivers/bus/mhi/controllers/mhi_arch_qcom.c20
-rw-r--r--drivers/bus/mhi/core/mhi_boot.c24
-rw-r--r--drivers/bus/mhi/core/mhi_init.c70
-rw-r--r--drivers/bus/mhi/core/mhi_internal.h5
-rw-r--r--drivers/bus/mhi/core/mhi_main.c146
-rw-r--r--drivers/bus/mhi/core/mhi_pm.c18
-rw-r--r--drivers/char/adsprpc.c19
-rw-r--r--drivers/clk/qcom/gdsc-regulator.c17
-rw-r--r--drivers/clk/qcom/videocc-lagoon.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c200
-rw-r--r--drivers/dma/qcom/gpi.c5
-rw-r--r--drivers/gpu/msm/adreno_a6xx_preempt.c4
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c31
-rw-r--r--drivers/gpu/msm/kgsl_gmu.c24
-rw-r--r--drivers/gpu/msm/kgsl_hfi.c11
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.c2
-rw-r--r--drivers/gpu/msm/kgsl_sync.c10
-rw-r--r--drivers/gpu/msm/kgsl_sync.h4
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c11
-rw-r--r--drivers/media/platform/msm/npu/npu_host_ipc.c11
-rw-r--r--drivers/media/platform/msm/synx/synx.c6
-rw-r--r--drivers/mmc/host/sdhci-msm.c165
-rw-r--r--drivers/mmc/host/sdhci-msm.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c9
-rw-r--r--drivers/net/wireless/cnss2/main.c28
-rw-r--r--drivers/net/wireless/cnss2/pci.c39
-rw-r--r--drivers/power/supply/qcom/battery.c56
-rw-r--r--drivers/power/supply/qcom/battery.h1
-rw-r--r--drivers/power/supply/qcom/fg-alg.c4
-rw-r--r--drivers/power/supply/qcom/qg-core.h2
-rw-r--r--drivers/power/supply/qcom/qg-util.c17
-rw-r--r--drivers/power/supply/qcom/qg-util.h2
-rw-r--r--drivers/power/supply/qcom/qpnp-qg.c19
-rw-r--r--drivers/power/supply/qcom/qpnp-smb5.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/qcom/dcc_v2.c3
-rw-r--r--drivers/soc/qcom/icnss.c5
-rw-r--r--drivers/soc/qcom/icnss2/main.c2
-rw-r--r--drivers/soc/qcom/rpmh_master_stat.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c4
-rw-r--r--drivers/thermal/msm-tsens.c6
-rw-r--r--drivers/thermal/tsens2xxx.c122
-rwxr-xr-x[-rw-r--r--]drivers/tty/serial/msm_geni_serial.c924
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/gadget.c34
-rw-r--r--drivers/usb/gadget/function/f_fs.c37
-rw-r--r--include/dt-bindings/clock/mdss-7nm-pll-clk.h52
-rw-r--r--include/linux/mhi.h2
-rwxr-xr-x[-rw-r--r--]include/linux/qcom-geni-se.h9
-rw-r--r--include/uapi/linux/v4l2-controls.h3
-rw-r--r--sound/soc/soc-core.c22
-rw-r--r--sound/usb/usb_audio_qmi_svc.c29
54 files changed, 1722 insertions, 534 deletions
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index a820d843b858..5a687c65b76e 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -599,6 +599,7 @@ CONFIG_MSM_PERFORMANCE=y
CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_CDSP_RM=y
CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
CONFIG_ICNSS=y
CONFIG_ICNSS_QMI=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index a467421be86a..54732fa620b9 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -619,6 +619,7 @@ CONFIG_MSM_PERFORMANCE=y
CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_CDSP_RM=y
CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
CONFIG_ICNSS=y
CONFIG_ICNSS_DEBUG=y
CONFIG_ICNSS_QMI=y
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index f9f173111f69..ce4a33bcc8f2 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -587,12 +587,17 @@ static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
if (cur_link_info->target_link_speed != PCI_EXP_LNKSTA_CLS_2_5GB) {
link_info.target_link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
link_info.target_link_width = cur_link_info->target_link_width;
- ret = mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, &link_info);
+
+ ret = msm_pcie_set_link_bandwidth(pci_dev,
+ link_info.target_link_speed,
+ link_info.target_link_width);
if (ret) {
MHI_CNTRL_ERR("Failed to switch Gen1 speed\n");
return -EBUSY;
}
+ /* no DDR votes when doing a drv suspend */
+ mhi_arch_set_bus_request(mhi_cntrl, 0);
bw_switched = true;
}
@@ -601,9 +606,7 @@ static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
pci_dev, NULL, mhi_cntrl->wake_set ?
MSM_PCIE_CONFIG_NO_L1SS_TO : 0);
- /*
- * we failed to suspend and scaled down pcie bw.. need to scale up again
- */
+ /* failed to suspend and scaled down pcie bw, need to scale up again */
if (ret && bw_switched) {
mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, cur_link_info);
return ret;
@@ -723,9 +726,14 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
case MHI_FAST_LINK_OFF:
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus,
pci_dev, NULL, 0);
- if (ret ||
- cur_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ if (ret)
+ break;
+
+ if (cur_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ mhi_arch_set_bus_request(mhi_cntrl,
+ cur_info->target_link_speed);
break;
+ }
/*
* BW request from device isn't for gen 1 link speed, we can
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index 21d7202ef5b2..a106ba209afe 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -565,8 +565,21 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
- MHI_CNTRL_ERR("Error loading firmware, ret:%d\n", ret);
- return;
+ if (!mhi_cntrl->fw_image_fallback) {
+ MHI_ERR("Error loading fw, ret:%d\n", ret);
+ return;
+ }
+
+ /* re-try with fall back fw image */
+ ret = request_firmware(&firmware, mhi_cntrl->fw_image_fallback,
+ mhi_cntrl->dev);
+ if (ret) {
+ MHI_ERR("Error loading fw_fb, ret:%d\n", ret);
+ return;
+ }
+
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_FW_FALLBACK_IMG);
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
@@ -663,3 +676,10 @@ error_read:
error_alloc_fw_table:
release_firmware(firmware);
}
+
+void mhi_perform_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+}
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 32d62856f8c5..3c3938800bbe 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -346,6 +346,9 @@ void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
}
spin_unlock(&mhi_tsync->lock);
+ if (mhi_tsync->db_response_pending)
+ complete(&mhi_tsync->db_completion);
+
kfree(mhi_cntrl->mhi_tsync);
mhi_cntrl->mhi_tsync = NULL;
mutex_unlock(&mhi_cntrl->tsync_mutex);
@@ -520,6 +523,12 @@ static int mhi_init_debugfs_mhi_vote_open(struct inode *inode, struct file *fp)
return single_open(fp, mhi_debugfs_mhi_vote_show, inode->i_private);
}
+static int mhi_init_debugfs_mhi_regdump_open(struct inode *inode,
+ struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_mhi_regdump_show, inode->i_private);
+}
+
static const struct file_operations debugfs_state_ops = {
.open = mhi_init_debugfs_mhi_states_open,
.release = single_release,
@@ -544,9 +553,18 @@ static const struct file_operations debugfs_vote_ops = {
.read = seq_read,
};
+static const struct file_operations debugfs_regdump_ops = {
+ .open = mhi_init_debugfs_mhi_regdump_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_reset_fops, NULL,
mhi_debugfs_trigger_reset, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_soc_reset_fops, NULL,
+ mhi_debugfs_trigger_soc_reset, "%llu\n");
+
void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
{
struct dentry *dentry;
@@ -573,6 +591,11 @@ void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
&debugfs_vote_ops);
debugfs_create_file_unsafe("reset", 0444, dentry, mhi_cntrl,
&debugfs_trigger_reset_fops);
+ debugfs_create_file_unsafe("regdump", 0444, dentry, mhi_cntrl,
+ &debugfs_regdump_ops);
+ debugfs_create_file_unsafe("soc_reset", 0444, dentry, mhi_cntrl,
+ &debugfs_trigger_soc_reset_fops);
+
mhi_cntrl->dentry = dentry;
}
@@ -1923,7 +1946,8 @@ static int mhi_driver_remove(struct device *dev)
MHI_CH_STATE_DISABLED,
MHI_CH_STATE_DISABLED
};
- int dir;
+ int dir, ret;
+ bool interrupted = false;
/* control device has no work to do */
if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
@@ -1931,11 +1955,11 @@ static int mhi_driver_remove(struct device *dev)
MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name);
- /* reset both channels */
+ /* move both channels to suspended state and disallow processing */
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
- if (!mhi_chan)
+ if (!mhi_chan || mhi_chan->offload_ch)
continue;
/* wake all threads waiting for completion */
@@ -1944,15 +1968,45 @@ static int mhi_driver_remove(struct device *dev)
complete_all(&mhi_chan->completion);
write_unlock_irq(&mhi_chan->lock);
- /* move channel state to disable, no more processing */
mutex_lock(&mhi_chan->mutex);
write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ }
+ write_unlock_irq(&mhi_chan->lock);
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ /* wait for each channel to close and reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan || mhi_chan->offload_ch)
+ continue;
+
+ /* unbind request from userspace, wait for channel reset */
+ if (!(mhi_cntrl->power_down ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) &&
+ ch_state[dir] != MHI_CH_STATE_DISABLED && !interrupted) {
+ MHI_ERR("Channel %s busy, wait for it to be reset\n",
+ mhi_dev->chan_name);
+ ret = wait_event_interruptible(mhi_cntrl->state_event,
+ mhi_chan->ch_state == MHI_CH_STATE_DISABLED ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state));
+ if (unlikely(ret))
+ interrupted = true;
+ }
+
+ /* update channel state as an error can exit above wait */
+ mutex_lock(&mhi_chan->mutex);
+
+ write_lock_irq(&mhi_chan->lock);
ch_state[dir] = mhi_chan->ch_state;
- mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
write_unlock_irq(&mhi_chan->lock);
- /* reset the channel */
- if (!mhi_chan->offload_ch)
+ /* reset channel if it was left enabled */
+ if (ch_state[dir] != MHI_CH_STATE_DISABLED)
mhi_reset_chan(mhi_cntrl, mhi_chan);
mutex_unlock(&mhi_chan->mutex);
@@ -1970,7 +2024,7 @@ static int mhi_driver_remove(struct device *dev)
mutex_lock(&mhi_chan->mutex);
- if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+ if (ch_state[dir] != MHI_CH_STATE_DISABLED &&
!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index c2b99e802158..2adf0e7435e3 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -727,8 +727,10 @@ struct mhi_timesync {
void __iomem *time_reg;
u32 int_sequence;
u64 local_time;
+ u64 remote_time;
bool db_support;
bool db_response_pending;
+ struct completion db_completion;
spinlock_t lock; /* list protection */
struct list_head head;
};
@@ -754,11 +756,13 @@ extern struct mhi_bus mhi_bus;
struct mhi_controller *find_mhi_controller_by_name(const char *name);
/* debug fs related functions */
+int mhi_debugfs_mhi_regdump_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_vote_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d);
int mhi_debugfs_trigger_reset(void *data, u64 val);
+int mhi_debugfs_trigger_soc_reset(void *data, u64 val);
void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl);
void mhi_init_debugfs(struct mhi_controller *mhi_cntrl);
@@ -965,6 +969,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl);
void mhi_force_reg_write(struct mhi_controller *mhi_cntrl);
+void mhi_perform_soc_reset(struct mhi_controller *mhi_cntrl);
/* isr handlers */
irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 24d8ceeaff6f..611f601cac62 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1402,6 +1402,10 @@ int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
if (unlikely(mhi_tsync->int_sequence != sequence)) {
MHI_ASSERT(1, "Unexpected response:0x%llx Expected:0x%llx\n",
sequence, mhi_tsync->int_sequence);
+
+ mhi_device_put(mhi_cntrl->mhi_dev,
+ MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+
mutex_unlock(&mhi_cntrl->tsync_mutex);
goto exit_tsync_process;
}
@@ -1427,6 +1431,11 @@ int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
} while (true);
mhi_tsync->db_response_pending = false;
+ mhi_tsync->remote_time = remote_time;
+ complete(&mhi_tsync->db_completion);
+
+ mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+
mutex_unlock(&mhi_cntrl->tsync_mutex);
exit_tsync_process:
@@ -1484,13 +1493,10 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
read_unlock_bh(&mhi_cntrl->pm_lock);
spin_unlock_bh(&mhi_event->lock);
- atomic_inc(&mhi_cntrl->pending_pkts);
ret = mhi_device_get_sync(mhi_cntrl->mhi_dev,
MHI_VOTE_DEVICE | MHI_VOTE_BUS);
- if (ret) {
- atomic_dec(&mhi_cntrl->pending_pkts);
+ if (ret)
goto exit_bw_scale_process;
- }
mutex_lock(&mhi_cntrl->pm_mutex);
@@ -1508,7 +1514,6 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
read_unlock_bh(&mhi_cntrl->pm_lock);
mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
- atomic_dec(&mhi_cntrl->pending_pkts);
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -2050,19 +2055,22 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
{
int ret;
bool in_mission_mode = false;
+ bool notify = false;
MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan);
/* no more processing events for this channel */
mutex_lock(&mhi_chan->mutex);
write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan);
write_unlock_irq(&mhi_chan->lock);
mutex_unlock(&mhi_chan->mutex);
return;
}
-
+ if (mhi_chan->ch_state == MHI_CH_STATE_SUSPENDED)
+ notify = true;
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
write_unlock_irq(&mhi_chan->lock);
@@ -2103,11 +2111,73 @@ error_invalid_state:
if (!mhi_chan->offload_ch) {
mhi_reset_chan(mhi_cntrl, mhi_chan);
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ /* notify waiters to proceed with unbinding channel */
+ if (notify)
+ wake_up_all(&mhi_cntrl->state_event);
}
MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan);
mutex_unlock(&mhi_chan->mutex);
}
+int mhi_debugfs_mhi_regdump_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ enum mhi_dev_state state;
+ enum mhi_ee ee;
+ int i, ret;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void __iomem *base;
+ } debug_reg[] = {
+ { "MHI_CNTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ return -EIO;
+
+ seq_printf(m, "host pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+
+ seq_printf(m, "device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
+ for (i = 0; debug_reg[i].name; i++) {
+ if (!debug_reg[i].base)
+ continue;
+ ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
+ debug_reg[i].offset, &val);
+ seq_printf(m, "reg:%s val:0x%x, ret:%d\n", debug_reg[i].name,
+ val, ret);
+ }
+
+ return 0;
+}
+
int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
@@ -2319,7 +2389,8 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
mhi_chan->chan, cmd == MHI_CMD_START_CHAN ? "START" : "STOP");
/* if channel is not active state state do not allow to state change */
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
ret = -EINVAL;
MHI_LOG("channel:%d is not in active state, ch_state%d\n",
mhi_chan->chan, mhi_chan->ch_state);
@@ -2502,13 +2573,37 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+ u64 local_time;
int ret;
- mutex_lock(&mhi_cntrl->tsync_mutex);
/* not all devices support time features */
- if (!mhi_tsync) {
- ret = -EIO;
- goto err_unlock;
+ if (!mhi_tsync)
+ return -EINVAL;
+
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_ERR("MHI is not in active state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ mutex_lock(&mhi_cntrl->tsync_mutex);
+
+ /* return times from last async request completion */
+ if (mhi_tsync->db_response_pending) {
+ local_time = mhi_tsync->local_time;
+ mutex_unlock(&mhi_cntrl->tsync_mutex);
+
+ ret = wait_for_completion_timeout(&mhi_tsync->db_completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || !ret) {
+ MHI_ERR("Pending DB request did not complete, abort\n");
+ return -EAGAIN;
+ }
+
+ *t_host = local_time;
+ *t_dev = mhi_tsync->remote_time;
+
+ return 0;
}
/* bring to M0 state */
@@ -2581,14 +2676,13 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
int ret = 0;
/* not all devices support all time features */
+ if (!mhi_tsync || !mhi_tsync->db_support)
+ return -EINVAL;
+
mutex_lock(&mhi_cntrl->tsync_mutex);
- if (!mhi_tsync || !mhi_tsync->db_support) {
- ret = -EIO;
- goto error_unlock;
- }
- /* tsync db can only be rung in M0 state */
- ret = __mhi_device_get_sync(mhi_cntrl);
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev,
+ MHI_VOTE_DEVICE | MHI_VOTE_BUS);
if (ret)
goto error_unlock;
@@ -2656,21 +2750,21 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
MHI_VERB("time DB request with seq:0x%llx\n", mhi_tsync->int_sequence);
mhi_tsync->db_response_pending = true;
+ init_completion(&mhi_tsync->db_completion);
skip_tsync_db:
spin_lock(&mhi_tsync->lock);
list_add_tail(&tsync_node->node, &mhi_tsync->head);
spin_unlock(&mhi_tsync->lock);
- ret = 0;
+ mutex_unlock(&mhi_cntrl->tsync_mutex);
+
+ return 0;
error_invalid_state:
- if (ret)
- kfree(tsync_node);
+ kfree(tsync_node);
error_no_mem:
- read_lock_bh(&mhi_cntrl->pm_lock);
- mhi_cntrl->wake_put(mhi_cntrl, false);
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
error_unlock:
mutex_unlock(&mhi_cntrl->tsync_mutex);
return ret;
@@ -2690,7 +2784,7 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
struct {
const char *name;
int offset;
- void *base;
+ void __iomem *base;
} debug_reg[] = {
{ "MHI_CNTRL", MHICTRL, mhi_base},
{ "MHI_STATUS", MHISTATUS, mhi_base},
@@ -2720,6 +2814,8 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
TO_MHI_STATE_STR(state));
for (i = 0; debug_reg[i].name; i++) {
+ if (!debug_reg[i].base)
+ continue;
ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
debug_reg[i].offset, &val);
MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val,
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index a4e63bc9456b..61bcbfd61754 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -716,6 +716,17 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_cntrl->pm_mutex);
}
+int mhi_debugfs_trigger_soc_reset(void *data, u64 val)
+{
+ struct mhi_controller *mhi_cntrl = data;
+
+ MHI_LOG("Trigger MHI SOC Reset\n");
+
+ mhi_perform_soc_reset(mhi_cntrl);
+
+ return 0;
+}
+
int mhi_debugfs_trigger_reset(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
@@ -1229,6 +1240,7 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
int ret;
enum MHI_PM_STATE new_state;
struct mhi_chan *itr, *tmp;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
read_lock_bh(&mhi_cntrl->pm_lock);
if (mhi_cntrl->pm_state == MHI_PM_DISABLE) {
@@ -1243,7 +1255,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
read_unlock_bh(&mhi_cntrl->pm_lock);
/* do a quick check to see if any pending votes to keep us busy */
- if (atomic_read(&mhi_cntrl->pending_pkts)) {
+ if (atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_dev->bus_vote)) {
MHI_VERB("Busy, aborting M3\n");
return -EBUSY;
}
@@ -1262,7 +1275,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
* Check the votes once more to see if we should abort
* suspend.
*/
- if (atomic_read(&mhi_cntrl->pending_pkts)) {
+ if (atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_dev->bus_vote)) {
MHI_VERB("Busy, aborting M3\n");
ret = -EBUSY;
goto error_suspend;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index f6ca64cfbc74..90a98163b841 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -3143,7 +3143,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
__func__, current->comm);
err = EBADR;
- goto bail;
+ return err;
}
mutex_lock(&fl->internal_map_mutex);
@@ -3192,6 +3192,11 @@ bail:
return err;
}
+/*
+ * fastrpc_internal_munmap_fd can only be used for buffers
+ * mapped with persist attributes. This can only be called
+ * once for any persist buffer
+ */
static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
struct fastrpc_ioctl_munmap_fd *ud)
{
@@ -3200,14 +3205,15 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
VERIFY(err, (fl && ud));
if (err)
- goto bail;
+ return err;
VERIFY(err, fl->dsp_proc_init == 1);
if (err) {
pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
__func__, current->comm);
err = EBADR;
- goto bail;
+ return err;
}
+ mutex_lock(&fl->internal_map_mutex);
mutex_lock(&fl->map_mutex);
if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
pr_err("adsprpc: mapping not found to unmap fd 0x%x, va 0x%llx, len 0x%x\n",
@@ -3217,10 +3223,13 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
mutex_unlock(&fl->map_mutex);
goto bail;
}
- if (map)
+ if (map && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
+ map->attr = map->attr & (~FASTRPC_ATTR_KEEP_MAP);
fastrpc_mmap_free(map, 0);
+ }
mutex_unlock(&fl->map_mutex);
bail:
+ mutex_unlock(&fl->internal_map_mutex);
return err;
}
@@ -3239,7 +3248,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n",
__func__, current->comm);
err = EBADR;
- goto bail;
+ return err;
}
mutex_lock(&fl->internal_map_mutex);
if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index e16a9e5baad6..94ab96168b41 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -146,6 +146,9 @@ static int gdsc_is_enabled(struct regulator_dev *rdev)
if (!sc->toggle_logic)
return !sc->resets_asserted;
+ if (sc->skip_disable_before_enable)
+ return false;
+
if (sc->parent_regulator) {
/*
* The parent regulator for the GDSC is required to be on to
@@ -209,6 +212,9 @@ static int gdsc_enable(struct regulator_dev *rdev)
uint32_t regval, hw_ctrl_regval = 0x0;
int i, ret = 0;
+ if (sc->skip_disable_before_enable)
+ return 0;
+
if (sc->parent_regulator) {
ret = regulator_set_voltage(sc->parent_regulator,
RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
@@ -367,7 +373,6 @@ static int gdsc_enable(struct regulator_dev *rdev)
clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
sc->is_gdsc_enabled = true;
- sc->skip_disable_before_enable = false;
end:
if (ret && sc->bus_handle) {
msm_bus_scale_client_update_request(sc->bus_handle, 0);
@@ -386,16 +391,6 @@ static int gdsc_disable(struct regulator_dev *rdev)
uint32_t regval;
int i, ret = 0;
- /*
- * Protect GDSC against late_init disabling when the GDSC is enabled
- * by an entity outside external to HLOS.
- */
- if (sc->skip_disable_before_enable) {
- dev_dbg(&rdev->dev, "Skip Disabling: %s\n", sc->rdesc.name);
- sc->skip_disable_before_enable = false;
- return 0;
- }
-
if (sc->force_root_en)
clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
diff --git a/drivers/clk/qcom/videocc-lagoon.c b/drivers/clk/qcom/videocc-lagoon.c
index 32838a6f67d6..0f49c37f8756 100644
--- a/drivers/clk/qcom/videocc-lagoon.c
+++ b/drivers/clk/qcom/videocc-lagoon.c
@@ -127,7 +127,6 @@ static struct clk_alpha_pll_postdiv video_pll0_out_even = {
};
static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index b5481aba2d0b..87f65201bcb4 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -24,13 +24,18 @@
#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
#define GT_IRQ_STATUS BIT(2)
-#define MAX_FN_SIZE 12
+#define MAX_FN_SIZE 20
#define LIMITS_POLLING_DELAY_MS 10
#define CYCLE_CNTR_OFFSET(c, m, acc_count) \
(acc_count ? ((c - cpumask_first(m) + 1) * 4) : 0)
enum {
+ CPUFREQ_HW_LOW_TEMP_LEVEL,
+ CPUFREQ_HW_HIGH_TEMP_LEVEL,
+};
+
+enum {
REG_ENABLE,
REG_FREQ_LUT_TABLE,
REG_VOLT_LUT_TABLE,
@@ -48,17 +53,29 @@ static unsigned int lut_row_size = LUT_ROW_SIZE;
static unsigned int lut_max_entries = LUT_MAX_ENTRIES;
static bool accumulative_counter;
+struct skipped_freq {
+ bool skip;
+ u32 freq;
+ u32 cc;
+ u32 high_temp_index;
+ u32 low_temp_index;
+ u32 final_index;
+ spinlock_t lock;
+};
+
struct cpufreq_qcom {
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
cpumask_t related_cpus;
unsigned int max_cores;
+ unsigned int lut_max_entries;
unsigned long xo_rate;
unsigned long cpu_hw_rate;
unsigned long dcvsh_freq_limit;
struct delayed_work freq_poll_work;
struct mutex dcvsh_lock;
struct device_attribute freq_limit_attr;
+ struct skipped_freq skip_data;
int dcvsh_irq;
char dcvsh_irq_name[MAX_FN_SIZE];
bool is_irq_enabled;
@@ -71,6 +88,13 @@ struct cpufreq_counter {
spinlock_t lock;
};
+struct cpufreq_cooling_cdev {
+ int cpu_id;
+ bool cpu_cooling_state;
+ struct thermal_cooling_device *cdev;
+ struct device_node *np;
+};
+
static const u16 cpufreq_qcom_std_offsets[REG_ARRAY_SIZE] = {
[REG_ENABLE] = 0x0,
[REG_FREQ_LUT_TABLE] = 0x110,
@@ -228,8 +252,17 @@ qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_qcom *c = policy->driver_data;
+ unsigned long flags;
+
+ if (c->skip_data.skip && index == c->skip_data.high_temp_index) {
+ spin_lock_irqsave(&c->skip_data.lock, flags);
+ writel_relaxed(c->skip_data.final_index,
+ c->reg_bases[REG_PERF_STATE]);
+ spin_unlock_irqrestore(&c->skip_data.lock, flags);
+ } else {
+ writel_relaxed(index, c->reg_bases[REG_PERF_STATE]);
+ }
- writel_relaxed(index, c->reg_bases[REG_PERF_STATE]);
arch_set_freq_scale(policy->related_cpus,
policy->freq_table[index].frequency,
policy->cpuinfo.max_freq);
@@ -250,7 +283,7 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
c = policy->driver_data;
index = readl_relaxed(c->reg_bases[REG_PERF_STATE]);
- index = min(index, lut_max_entries - 1);
+ index = min(index, c->lut_max_entries - 1);
return policy->freq_table[index].frequency;
}
@@ -390,6 +423,7 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
if (!c->table)
return -ENOMEM;
+ spin_lock_init(&c->skip_data.lock);
base_freq = c->reg_bases[REG_FREQ_LUT_TABLE];
base_volt = c->reg_bases[REG_VOLT_LUT_TABLE];
@@ -414,8 +448,17 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
i, c->table[i].frequency, core_count);
if (core_count != c->max_cores) {
- cur_freq = CPUFREQ_ENTRY_INVALID;
- c->table[i].flags = CPUFREQ_BOOST_FREQ;
+ if (core_count == (c->max_cores - 1)) {
+ c->skip_data.skip = true;
+ c->skip_data.high_temp_index = i;
+ c->skip_data.freq = cur_freq;
+ c->skip_data.cc = core_count;
+ c->skip_data.final_index = i + 1;
+ c->skip_data.low_temp_index = i + 1;
+ } else {
+ cur_freq = CPUFREQ_ENTRY_INVALID;
+ c->table[i].flags = CPUFREQ_BOOST_FREQ;
+ }
}
/*
@@ -423,13 +466,17 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
* end of table.
*/
if (i > 0 && c->table[i - 1].frequency ==
- c->table[i].frequency && prev_cc == core_count) {
- struct cpufreq_frequency_table *prev = &c->table[i - 1];
-
- if (prev_freq == CPUFREQ_ENTRY_INVALID)
- prev->flags = CPUFREQ_BOOST_FREQ;
+ c->table[i].frequency) {
+ if (prev_cc == core_count) {
+ struct cpufreq_frequency_table *prev =
+ &c->table[i - 1];
+
+ if (prev_freq == CPUFREQ_ENTRY_INVALID)
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ }
break;
}
+
prev_cc = core_count;
prev_freq = cur_freq;
@@ -442,8 +489,17 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
}
}
+ c->lut_max_entries = i;
c->table[i].frequency = CPUFREQ_TABLE_END;
+ if (c->skip_data.skip) {
+ pr_debug("%s Skip: Index[%u], Frequency[%u], Core Count %u, Final Index %u Actual Index %u\n",
+ __func__, c->skip_data.high_temp_index,
+ c->skip_data.freq, c->skip_data.cc,
+ c->skip_data.final_index,
+ c->skip_data.low_temp_index);
+ }
+
return 0;
}
@@ -603,6 +659,128 @@ static int qcom_resources_init(struct platform_device *pdev)
return 0;
}
+static int cpufreq_hw_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpufreq_cooling_cdev *cpu_cdev = cdev->devdata;
+ struct cpufreq_policy *policy;
+ struct cpufreq_qcom *c;
+ unsigned long flags;
+
+
+ if (cpu_cdev->cpu_id == -1)
+ return -ENODEV;
+
+ if (state > CPUFREQ_HW_HIGH_TEMP_LEVEL)
+ return -EINVAL;
+
+ if (cpu_cdev->cpu_cooling_state == state)
+ return 0;
+
+ policy = cpufreq_cpu_get_raw(cpu_cdev->cpu_id);
+ if (!policy)
+ return 0;
+
+ c = policy->driver_data;
+ cpu_cdev->cpu_cooling_state = state;
+
+ if (state == CPUFREQ_HW_HIGH_TEMP_LEVEL) {
+ spin_lock_irqsave(&c->skip_data.lock, flags);
+ c->skip_data.final_index = c->skip_data.high_temp_index;
+ spin_unlock_irqrestore(&c->skip_data.lock, flags);
+ } else {
+ spin_lock_irqsave(&c->skip_data.lock, flags);
+ c->skip_data.final_index = c->skip_data.low_temp_index;
+ spin_unlock_irqrestore(&c->skip_data.lock, flags);
+ }
+
+ if (policy->cur != c->skip_data.freq)
+ return 0;
+
+ return qcom_cpufreq_hw_target_index(policy,
+ c->skip_data.high_temp_index);
+}
+
+static int cpufreq_hw_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpufreq_cooling_cdev *cpu_cdev = cdev->devdata;
+
+ *state = (cpu_cdev->cpu_cooling_state) ?
+ CPUFREQ_HW_HIGH_TEMP_LEVEL : CPUFREQ_HW_LOW_TEMP_LEVEL;
+
+ return 0;
+}
+
+static int cpufreq_hw_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = CPUFREQ_HW_HIGH_TEMP_LEVEL;
+ return 0;
+}
+
+static struct thermal_cooling_device_ops cpufreq_hw_cooling_ops = {
+ .get_max_state = cpufreq_hw_get_max_state,
+ .get_cur_state = cpufreq_hw_get_cur_state,
+ .set_cur_state = cpufreq_hw_set_cur_state,
+};
+
+static int cpufreq_hw_register_cooling_device(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node, *cpu_np, *phandle;
+ struct cpufreq_cooling_cdev *cpu_cdev = NULL;
+ struct device *cpu_dev;
+ struct cpufreq_policy *policy;
+ struct cpufreq_qcom *c;
+ char cdev_name[MAX_FN_SIZE] = "";
+ int cpu;
+
+ for_each_available_child_of_node(np, cpu_np) {
+ cpu_cdev = devm_kzalloc(&pdev->dev, sizeof(*cpu_cdev),
+ GFP_KERNEL);
+ if (!cpu_cdev)
+ return -ENOMEM;
+ cpu_cdev->cpu_id = -1;
+ cpu_cdev->cpu_cooling_state = false;
+ cpu_cdev->cdev = NULL;
+ cpu_cdev->np = cpu_np;
+
+ phandle = of_parse_phandle(cpu_np, "qcom,cooling-cpu", 0);
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ continue;
+ c = policy->driver_data;
+ if (!c->skip_data.skip)
+ continue;
+ cpu_dev = get_cpu_device(cpu);
+ if (cpu_dev && cpu_dev->of_node == phandle) {
+ cpu_cdev->cpu_id = cpu;
+ snprintf(cdev_name, sizeof(cdev_name),
+ "cpufreq-hw-%d", cpu);
+ cpu_cdev->cdev =
+ thermal_of_cooling_device_register(
+ cpu_cdev->np, cdev_name,
+ cpu_cdev,
+ &cpufreq_hw_cooling_ops);
+ if (IS_ERR(cpu_cdev->cdev)) {
+ pr_err("Cooling register failed for %s, ret: %d\n",
+ cdev_name,
+ PTR_ERR(cpu_cdev->cdev));
+ c->skip_data.final_index =
+ c->skip_data.high_temp_index;
+ break;
+ }
+ pr_info("CPUFREQ-HW cooling device %d %s\n",
+ cpu, cdev_name);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
struct cpu_cycle_counter_cb cycle_counter_cb = {
@@ -635,6 +813,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ cpufreq_hw_register_cooling_device(pdev);
+
return 0;
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 22cbca7133fc..5947176879ef 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -279,6 +279,7 @@ static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
[MSM_GPI_QUP_NOTIFY] = "NOTIFY",
[MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
[MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
+ [MSM_GPI_QUP_FW_ERROR] = "UNHANDLED ERROR",
[MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
[MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
[MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
@@ -2238,6 +2239,10 @@ int gpi_terminate_all(struct dma_chan *chan)
if (ret) {
GPII_ERR(gpii, gpii_chan->chid,
"Error resetting channel ret:%d\n", ret);
+ if (!gpii->reg_table_dump) {
+ gpi_dump_debug_reg(gpii);
+ gpii->reg_table_dump = true;
+ }
goto terminate_exit;
}
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index d537084da357..b01286e7248c 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -145,7 +145,7 @@ static void _a6xx_preemption_fault(struct adreno_device *adreno_dev)
if (kgsl_state_is_awake(device)) {
adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
- if (status == 0) {
+ if (!(status & 0x1)) {
adreno_set_preempt_state(adreno_dev,
ADRENO_PREEMPT_COMPLETE);
@@ -155,7 +155,7 @@ static void _a6xx_preemption_fault(struct adreno_device *adreno_dev)
}
dev_err(device->dev,
- "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
+ "Preemption Fault: cur=%d R/W=0x%x/0x%x, next=%d R/W=0x%x/0x%x\n",
adreno_dev->cur_rb->id,
adreno_get_rptr(adreno_dev->cur_rb),
adreno_dev->cur_rb->wptr,
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 2fe694f67638..b0342c703905 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/msm-bus.h>
@@ -908,17 +908,34 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
* The problem is that IB size from the register is the unprocessed size
* of the buffer not the original size, so if we didn't catch this
* buffer being directly used in the RB, then we might not be able to
- * dump the whole thing. Print a warning message so we can try to
+ * dump the whole thing. Try to dump the maximum possible size from the
+ * IB1 base address till the end of memdesc size so that we dont miss
+ * what we are interested in. Print a warning message so we can try to
* figure how often this really happens.
*/
if (-ENOENT == find_object(snapshot->ib1base, snapshot->process) &&
snapshot->ib1size) {
- kgsl_snapshot_push_object(device, snapshot->process,
- snapshot->ib1base, snapshot->ib1size);
- dev_err(device->dev,
- "CP_IB1_BASE not found in the ringbuffer.Dumping %x dwords of the buffer\n",
- snapshot->ib1size);
+ struct kgsl_mem_entry *entry;
+ u64 ibsize;
+
+ entry = kgsl_sharedmem_find(snapshot->process,
+ snapshot->ib1base);
+ if (entry == NULL) {
+ dev_err(device->dev,
+ "Can't find a memory entry containing IB1BASE %16llx\n",
+ snapshot->ib1base);
+ } else {
+ ibsize = entry->memdesc.size -
+ (snapshot->ib1base - entry->memdesc.gpuaddr);
+ kgsl_mem_entry_put(entry);
+
+ kgsl_snapshot_push_object(device, snapshot->process,
+ snapshot->ib1base, ibsize >> 2);
+ dev_err(device->dev,
+ "CP_IB1_BASE is not found in the ringbuffer. Dumping %llx dwords of the buffer\n",
+ ibsize >> 2);
+ }
}
/*
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index e36522b3ab89..a9fe84909352 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -282,6 +282,7 @@ static int gmu_iommu_cb_probe(struct gmu_device *gmu,
struct platform_device *pdev = of_find_device_by_node(node);
struct device *dev;
int ret;
+ int no_stall = 1;
dev = &pdev->dev;
of_dma_configure(dev, node, true);
@@ -294,6 +295,14 @@ static int gmu_iommu_cb_probe(struct gmu_device *gmu,
return -ENODEV;
}
+ /*
+ * Disable stall on fault for the GMU context bank.
+ * This sets SCTLR.CFCFG = 0.
+ * Also note that, the smmu driver sets SCTLR.HUPCF = 0 by default.
+ */
+ iommu_domain_set_attr(ctx->domain,
+ DOMAIN_ATTR_FAULT_MODEL_NO_STALL, &no_stall);
+
ret = iommu_attach_device(ctx->domain, dev);
if (ret) {
dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
@@ -1338,14 +1347,17 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
device->gmu_core.reg_len = gmu->reg_len;
/* Initialize HFI and GMU interrupts */
- hfi->hfi_interrupt_num = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq",
- hfi_irq_handler, device);
-
- gmu->gmu_interrupt_num = kgsl_request_irq(gmu->pdev, "kgsl_gmu_irq",
- gmu_irq_handler, device);
+ ret = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq",
+ hfi_irq_handler, device);
+ if (ret < 0)
+ goto error;
+ hfi->hfi_interrupt_num = ret;
- if (hfi->hfi_interrupt_num < 0 || gmu->gmu_interrupt_num < 0)
+ ret = kgsl_request_irq(gmu->pdev, "kgsl_gmu_irq",
+ gmu_irq_handler, device);
+ if (ret < 0)
goto error;
+ gmu->gmu_interrupt_num = ret;
/* Don't enable GMU interrupts until GMU started */
/* We cannot use irq_disable because it writes registers */
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index ec5354857742..6ce56650a3c2 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -268,9 +268,9 @@ static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- u64 ao_pre_poll, ao_post_poll;
+ u64 ts1, ts2;
- ao_pre_poll = gmu_core_dev_read_ao_counter(device);
+ ts1 = gmu_core_dev_read_ao_counter(device);
while (time_is_after_jiffies(timeout)) {
adreno_read_gmureg(adreno_dev, offset_name, &val);
@@ -279,15 +279,16 @@ static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
usleep_range(10, 100);
}
- ao_post_poll = gmu_core_dev_read_ao_counter(device);
+ ts2 = gmu_core_dev_read_ao_counter(device);
/* Check one last time */
adreno_read_gmureg(adreno_dev, offset_name, &val);
if ((val & mask) == expected_val)
return 0;
- dev_err(&gmu->pdev->dev, "kgsl hfi poll timeout: always on: %lld ms\n",
- div_u64((ao_post_poll - ao_pre_poll) * 52, USEC_PER_SEC));
+ dev_err(&gmu->pdev->dev,
+ "Timed out waiting for HFI response. Wait start=%llx end=%llx\n",
+ ts1, ts2);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 0397177d8c6c..59917d223383 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1331,7 +1331,7 @@ done:
snapshot->ib2base);
gmu_only:
- complete_all(&snapshot->dump_gate);
BUG_ON(!snapshot->device->skip_ib_capture &
snapshot->device->force_panic);
+ complete_all(&snapshot->dump_gate);
}
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 38b5c88ae46a..a90b9e359571 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/file.h>
@@ -309,8 +309,7 @@ int kgsl_sync_timeline_create(struct kgsl_context *context)
}
kref_init(&ktimeline->kref);
- snprintf(ktimeline->name, sizeof(ktimeline->name),
- "%s_%d-%.15s(%d)-%.15s(%d)",
+ ktimeline->name = kasprintf(GFP_KERNEL, "%s_%d-%.15s(%d)-%.15s(%d)",
context->device->name, context->id,
current->group_leader->comm, current->group_leader->pid,
current->comm, current->pid);
@@ -354,7 +353,10 @@ static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *ktimeline,
void kgsl_sync_timeline_destroy(struct kgsl_context *context)
{
- kfree(context->ktimeline);
+ struct kgsl_sync_timeline *ktimeline = context->ktimeline;
+
+ kfree(ktimeline->name);
+ kfree(ktimeline);
}
static void kgsl_sync_timeline_release(struct kref *kref)
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 989658d58eca..43209b1532cb 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012-2014,2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014,2018-2020 The Linux Foundation. All rights reserved.
*/
#ifndef __KGSL_SYNC_H
#define __KGSL_SYNC_H
@@ -21,7 +21,7 @@
*/
struct kgsl_sync_timeline {
struct kref kref;
- char name[32];
+ char *name;
u64 fence_context;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index f90553bf2069..b71b457b8d87 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -360,6 +360,7 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
case MSM_GPI_QUP_MAX_EVENT:
/* fall through to stall impacted channel */
case MSM_GPI_QUP_CH_ERROR:
+ case MSM_GPI_QUP_FW_ERROR:
case MSM_GPI_QUP_PENDING_EVENT:
case MSM_GPI_QUP_EOT_DESC_MISMATCH:
break;
@@ -377,9 +378,9 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
}
if (cb_str->cb_event != MSM_GPI_QUP_NOTIFY)
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
- "GSI QN err:0x%x, status:0x%x, err:%d slv_addr: 0x%x R/W: %d\n",
+ "GSI QN err:0x%x, status:0x%x, err:%d\n",
cb_str->error_log.error_code, m_stat,
- cb_str->cb_event, gi2c->cur->addr, gi2c->cur->flags);
+ cb_str->cb_event);
}
static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
@@ -512,6 +513,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
struct msm_gpi_tre *go_t = &gi2c->go_t;
struct device *rx_dev = gi2c->wrapper_dev;
struct device *tx_dev = gi2c->wrapper_dev;
+ reinit_completion(&gi2c->xfer);
gi2c->cur = &msgs[i];
@@ -731,7 +733,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
int i, ret = 0, timeout = 0;
gi2c->err = 0;
- reinit_completion(&gi2c->xfer);
/* Client to respect system suspend */
if (!pm_runtime_enabled(gi2c->dev)) {
@@ -873,6 +874,8 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
geni_abort_m_cmd(gi2c->base);
}
}
+ gi2c->cur_wr = 0;
+ gi2c->cur_rd = 0;
if (mode == SE_DMA) {
if (gi2c->err) {
@@ -905,8 +908,6 @@ geni_i2c_txn_ret:
pm_runtime_mark_last_busy(gi2c->dev);
pm_runtime_put_autosuspend(gi2c->dev);
- gi2c->cur_wr = 0;
- gi2c->cur_rd = 0;
gi2c->cur = NULL;
gi2c->err = 0;
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 0d2816fa4036..5ea693b1d3c5 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
/* -------------------------------------------------------------------------
@@ -367,8 +367,6 @@ static int ipc_queue_write(struct npu_device *npu_dev,
/* Update qhdr_write_idx */
queue.qhdr_write_idx = new_write_idx;
- *is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
-
/* Update Write pointer -- queue.qhdr_write_idx */
exit:
/* Update TX request -- queue.qhdr_tx_req */
@@ -379,6 +377,13 @@ exit:
(size_t)&(queue.qhdr_write_idx) - (size_t)&queue))),
&queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx));
+ /* check if irq is required after write_idx is updated */
+ MEMR(npu_dev, (void *)((size_t)(offset + (uint32_t)(
+ (size_t)&(queue.qhdr_rx_req) - (size_t)&queue))),
+ (uint8_t *)&queue.qhdr_rx_req,
+ sizeof(queue.qhdr_rx_req));
+ *is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
+
return status;
}
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index eb467c025e2e..cd6795cb0bcb 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "synx: " fmt
@@ -1327,7 +1327,7 @@ static void synx_object_cleanup(struct synx_client *client)
struct synx_table_row *row =
synx_dev->synx_table + i;
- mutex_lock(&synx_dev->row_locks[row->index]);
+ mutex_lock(&synx_dev->row_locks[i]);
if (row->index) {
list_for_each_entry_safe(payload_info,
temp_payload_info,
@@ -1339,7 +1339,7 @@ static void synx_object_cleanup(struct synx_client *client)
}
}
}
- mutex_unlock(&synx_dev->row_locks[row->index]);
+ mutex_unlock(&synx_dev->row_locks[i]);
}
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b6d9d8300d6d..027eb3b562dc 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -162,6 +162,8 @@
#define INVALID_TUNING_PHASE -1
#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+#define sdhci_is_valid_gpio_testbus_trigger_int(_h) \
+ ((_h)->pdata->testbus_trigger_irq >= 0)
#define NUM_TUNING_PHASES 16
#define MAX_DRV_TYPES_SUPPORTED_HS200 4
@@ -1206,7 +1208,129 @@ static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
drv_type);
}
+#define MAX_TESTBUS 127
#define IPCAT_MINOR_MASK(val) ((val & 0x0fff0000) >> 0x10)
+#define TB_CONF_MASK 0x7f
+#define TB_TRIG_CONF 0xff80ffff
+#define TB_WRITE_STATUS BIT(8)
+
+/*
+ * This function needs to be used when getting mask and
+ * match pattern either from cmdline or sysfs
+ */
+void sdhci_msm_mm_dbg_configure(struct sdhci_host *host, u32 mask,
+ u32 match, u32 bit_shift, u32 testbus)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct platform_device *pdev = msm_host->pdev;
+ u32 val;
+ u32 enable_dbg_feature = 0;
+ int ret = 0;
+
+ if (testbus > MAX_TESTBUS) {
+ dev_err(&pdev->dev, "%s: testbus should be less than 128.\n",
+ __func__);
+ return;
+ }
+
+ /* Enable debug mode */
+ writel_relaxed(ENABLE_DBG,
+ host->ioaddr + SDCC_TESTBUS_CONFIG);
+ writel_relaxed(DUMMY,
+ host->ioaddr + SDCC_DEBUG_EN_DIS_REG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ SDCC_TESTBUS_CONFIG) | TESTBUS_EN),
+ host->ioaddr + SDCC_TESTBUS_CONFIG);
+
+ /* Enable particular feature */
+ enable_dbg_feature |= MM_TRIGGER_DISABLE;
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ SDCC_DEBUG_FEATURE_CFG_REG) | enable_dbg_feature),
+ host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+
+ /* Configure Mask & Match pattern*/
+ writel_relaxed((mask << bit_shift),
+ host->ioaddr + SDCC_DEBUG_MASK_PATTERN_REG);
+ writel_relaxed((match << bit_shift),
+ host->ioaddr + SDCC_DEBUG_MATCH_PATTERN_REG);
+
+ /* Configure test bus for above mm */
+ writel_relaxed((testbus & TB_CONF_MASK), host->ioaddr +
+ SDCC_DEBUG_MM_TB_CFG_REG);
+ /* Initiate conf shifting */
+ writel_relaxed(BIT(8),
+ host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG);
+
+ /* Wait for test bus to be configured */
+ ret = readl_poll_timeout(host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG,
+ val, !(val & TB_WRITE_STATUS), 50, 1000);
+ if (ret == -ETIMEDOUT)
+ pr_err("%s: Unable to set mask & match\n",
+ mmc_hostname(host->mmc));
+
+ /* Direct test bus to GPIO */
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ SDCC_TESTBUS_CONFIG) & TB_TRIG_CONF)
+ | (testbus << 16)), host->ioaddr +
+ SDCC_TESTBUS_CONFIG);
+
+ /* Read back to ensure write went through */
+ readl_relaxed(host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+}
+
+/* Dummy func for Mask and Match show */
+static ssize_t show_mask_and_match(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ pr_info("%s: M&M show func\n", mmc_hostname(host->mmc));
+
+ return 0;
+}
+
+static ssize_t store_mask_and_match(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ unsigned long value;
+ char *token;
+ int i = 0;
+ u32 mask, match, bit_shift, testbus;
+
+ char *temp = (char *)buf;
+
+ if (!host)
+ return -EINVAL;
+
+ while ((token = strsep(&temp, " "))) {
+ kstrtoul(token, 0, &value);
+ if (i == 0)
+ mask = value;
+ else if (i == 1)
+ match = value;
+ else if (i == 2)
+ bit_shift = value;
+ else if (i == 3) {
+ testbus = value;
+ break;
+ }
+ i++;
+ }
+
+ pr_info("%s: M&M parameter passed are: %d %d %d %d\n",
+ mmc_hostname(host->mmc), mask, match, bit_shift, testbus);
+ pm_runtime_get_sync(dev);
+ sdhci_msm_mm_dbg_configure(host, mask, match, bit_shift, testbus);
+ pm_runtime_put_sync(dev);
+
+ pr_debug("%s: M&M debug enabled.\n", mmc_hostname(host->mmc));
+ return count;
+}
/* Enter sdcc debug mode */
void sdhci_msm_enter_dbg_mode(struct sdhci_host *host)
@@ -2887,6 +3011,16 @@ static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
msm_host->is_sdiowakeup_enabled = enable;
}
+static irqreturn_t sdhci_msm_testbus_trigger_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+
+ pr_info("%s: match happened against mask\n",
+ mmc_hostname(host->mmc));
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
@@ -5619,6 +5753,22 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
}
+ msm_host->pdata->testbus_trigger_irq = platform_get_irq_byname(pdev,
+ "tb_trig_irq");
+ if (sdhci_is_valid_gpio_testbus_trigger_int(msm_host)) {
+ dev_info(&pdev->dev, "%s: testbus_trigger_irq = %d\n", __func__,
+ msm_host->pdata->testbus_trigger_irq);
+ ret = request_irq(msm_host->pdata->testbus_trigger_irq,
+ sdhci_msm_testbus_trigger_irq,
+ IRQF_SHARED | IRQF_TRIGGER_RISING,
+ "sdhci-msm tb_trig", host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: request tb_trig IRQ %d: failed: %d\n",
+ __func__, msm_host->pdata->testbus_trigger_irq,
+ ret);
+ }
+ }
+
if (of_device_is_compatible(node, "qcom,sdhci-msm-cqe")) {
dev_dbg(&pdev->dev, "node with qcom,sdhci-msm-cqe\n");
ret = sdhci_msm_cqe_add_host(host, pdev);
@@ -5677,6 +5827,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
}
+ if (IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr +
+ SDCC_IP_CATALOG)) >= 2) {
+ msm_host->mask_and_match.show = show_mask_and_match;
+ msm_host->mask_and_match.store = store_mask_and_match;
+ sysfs_attr_init(&msm_host->mask_and_match.attr);
+ msm_host->mask_and_match.attr.name = "mask_and_match";
+ msm_host->mask_and_match.attr.mode = 0644;
+ ret = device_create_file(&pdev->dev,
+ &msm_host->mask_and_match);
+ if (ret) {
+ pr_err("%s: %s: failed creating M&M attr: %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ }
+ }
+
if (sdhci_msm_is_bootdevice(&pdev->dev))
mmc_flush_detect_work(host->mmc);
/* Successful initialization */
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 82b02bff0573..7d03b3d4fb1b 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -205,6 +205,7 @@ struct sdhci_msm_pltfm_data {
u32 *sup_clk_table;
unsigned char sup_clk_cnt;
int sdiowakeup_irq;
+ int testbus_trigger_irq;
struct sdhci_msm_pm_qos_data pm_qos_data;
u32 *bus_clk_table;
unsigned char bus_clk_cnt;
@@ -294,6 +295,7 @@ struct sdhci_msm_host {
struct completion pwr_irq_completion;
struct sdhci_msm_bus_vote msm_bus_vote;
struct device_attribute polling;
+ struct device_attribute mask_and_match;
u32 clk_rate; /* Keeps track of current clock rate that is set */
bool tuning_done;
bool calibration_done;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
index df92f4b3f49b..560e4ece3975 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* RMNET Data Generic Netlink
*
@@ -187,11 +187,8 @@ static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
tx_bytes_cur = node_p->tx_bytes;
if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
/* Dont send inactive pids to userspace */
- /* TODO: can remove from hash table probably */
- node_p->tx_bps = 0;
- node_p->timstamp_last_query =
- pid_bps_resp_ptr->timestamp;
- node_p->sched_boost_remaining_ms = 0;
+ hash_del(&node_p->list);
+ kfree(node_p);
continue;
}
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 4246f46f9598..ed5541e0dfa7 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1082,6 +1082,7 @@ static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
cnss_pr_err("Recovery is already in progress\n");
+ CNSS_ASSERT(0);
ret = -EINVAL;
goto out;
}
@@ -1134,7 +1135,8 @@ void cnss_schedule_recovery(struct device *dev,
struct cnss_recovery_data *data;
int gfp = GFP_KERNEL;
- cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
@@ -1743,6 +1745,26 @@ static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
ramdump_info->ramdump_pa);
}
+/**
+ * cnss_ignore_dump_data_reg_fail - Ignore Ramdump table register failure
+ * @ret: Error returned by msm_dump_data_register_nominidump
+ *
+ * If we dont have support for mem dump feature, ignore failure.
+ *
+ * Return: Same given error code if mem dump feature enabled, 0 otherwise
+ */
+#ifdef CONFIG_QCOM_MEMORY_DUMP_V2
+static int cnss_ignore_dump_data_reg_fail(int ret)
+{
+ return ret;
+}
+#else
+static int cnss_ignore_dump_data_reg_fail(int ret)
+{
+ return 0;
+}
+#endif
+
static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -1779,7 +1801,9 @@ static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
&dump_entry);
if (ret) {
- cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ ret = cnss_ignore_dump_data_reg_fail(ret);
+ cnss_pr_err("Failed to setup dump table, %s (%d)\n",
+ ret ? "Error" : "Ignoring", ret);
goto free_ramdump;
}
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 51c063629c4c..d9efc222cb75 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1064,16 +1064,20 @@ static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
ret = 0;
break;
case CNSS_MHI_SUSPEND:
+ mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
if (pci_priv->drv_connected_last)
ret = mhi_pm_fast_suspend(pci_priv->mhi_ctrl, true);
else
ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
+ mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
break;
case CNSS_MHI_RESUME:
+ mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
if (pci_priv->drv_connected_last)
ret = mhi_pm_fast_resume(pci_priv->mhi_ctrl, true);
else
ret = mhi_pm_resume(pci_priv->mhi_ctrl);
+ mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
break;
case CNSS_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
@@ -1881,7 +1885,7 @@ static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
struct ramdump_segment *ramdump_segs, *s;
int i, ret = 0;
- if (!info_v2->dump_data_valid ||
+ if (!info_v2->dump_data_valid || !dump_seg ||
dump_data->nentries == 0)
return 0;
@@ -3926,6 +3930,11 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
rddm_image = pci_priv->mhi_ctrl->rddm_image;
dump_data->nentries = 0;
+ if (!dump_seg) {
+ cnss_pr_warn("FW image dump collection not setup");
+ goto skip_dump;
+ }
+
cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
fw_image->entries);
@@ -3971,6 +3980,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
if (dump_data->nentries > 0)
plat_priv->ramdump_info_v2.dump_data_valid = true;
+skip_dump:
cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
complete(&plat_priv->rddm_complete);
}
@@ -3984,6 +3994,9 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
int i, j;
+ if (!dump_seg)
+ return;
+
fw_image = pci_priv->mhi_ctrl->fbc_image;
rddm_image = pci_priv->mhi_ctrl->rddm_image;
@@ -4216,6 +4229,29 @@ static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
return 0;
}
+static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
+ struct mhi_link_info *link_info)
+{
+ struct cnss_pci_data *pci_priv = mhi_ctrl->priv_data;
+ int ret = 0;
+
+ ret = msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
+ link_info->target_link_speed,
+ link_info->target_link_width);
+
+ if (ret)
+ return ret;
+
+ pci_priv->def_link_speed = link_info->target_link_speed;
+ pci_priv->def_link_width = link_info->target_link_width;
+
+ cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
+ link_info->target_link_speed,
+ link_info->target_link_width);
+
+ return 0;
+}
+
static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -4264,6 +4300,7 @@ static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
mhi_ctrl->status_cb = cnss_mhi_notify_status;
mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
+ mhi_ctrl->bw_scale = cnss_mhi_bw_scale;
mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
mhi_ctrl->sbl_size = SZ_512K;
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index a87725bf2eb7..8a119caac50d 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -43,6 +43,7 @@
#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER"
#define FCC_VOTER "FCC_VOTER"
#define MAIN_FCC_VOTER "MAIN_FCC_VOTER"
+#define PD_VOTER "PD_VOTER"
struct pl_data {
int pl_mode;
@@ -190,30 +191,57 @@ static int cp_get_parallel_mode(struct pl_data *chip, int mode)
return pval.intval;
}
-static int get_hvdcp3_icl_limit(struct pl_data *chip)
+static int get_adapter_icl_based_ilim(struct pl_data *chip)
{
- int main_icl, target_icl = -EINVAL;
+ int main_icl, adapter_icl = -EINVAL, rc = -EINVAL, final_icl = -EINVAL;
+ union power_supply_propval pval = {0, };
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &pval);
+ if (rc < 0)
+ pr_err("Failed to read PD_ACTIVE status rc=%d\n",
+ rc);
+ /* Check for QC 3, 3.5 and PPS adapters, return if its none of them */
if (chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3 &&
- chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3P5)
- return target_icl;
+ chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3P5 &&
+ pval.intval != POWER_SUPPLY_PD_PPS_ACTIVE)
+ return final_icl;
+
+ /*
+ * For HVDCP3/HVDCP_3P5 adapters, limit max. ILIM as:
+ * HVDCP3_ICL: Maximum ICL of HVDCP3 adapter(from DT
+ * configuration).
+ *
+ * For PPS adapters, limit max. ILIM to
+ * MIN(qc4_max_icl, PD_CURRENT_MAX)
+ */
+ if (pval.intval == POWER_SUPPLY_PD_PPS_ACTIVE) {
+ adapter_icl = min_t(int, chip->chg_param->qc4_max_icl_ua,
+ get_client_vote_locked(chip->usb_icl_votable,
+ PD_VOTER));
+ if (adapter_icl <= 0)
+ adapter_icl = chip->chg_param->qc4_max_icl_ua;
+ } else {
+ adapter_icl = chip->chg_param->hvdcp3_max_icl_ua;
+ }
/*
- * For HVDCP3 adapters, limit max. ILIM as follows:
- * HVDCP3_ICL: Maximum ICL of HVDCP3 adapter(from DT configuration)
* For Parallel input configurations:
- * VBUS: target_icl = HVDCP3_ICL - main_ICL
- * VMID: target_icl = HVDCP3_ICL
+ * VBUS: final_icl = adapter_icl - main_ICL
+ * VMID: final_icl = adapter_icl
*/
- target_icl = chip->chg_param->hvdcp3_max_icl_ua;
+ final_icl = adapter_icl;
if (cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE)
== POWER_SUPPLY_PL_USBIN_USBIN) {
main_icl = get_effective_result_locked(chip->usb_icl_votable);
- if ((main_icl >= 0) && (main_icl < target_icl))
- target_icl -= main_icl;
+ if ((main_icl >= 0) && (main_icl < adapter_icl))
+ final_icl = adapter_icl - main_icl;
}
- return target_icl;
+ pr_debug("charger_type=%d final_icl=%d adapter_icl=%d main_icl=%d\n",
+ chip->charger_type, final_icl, adapter_icl, main_icl);
+
+ return final_icl;
}
/*
@@ -244,7 +272,7 @@ static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
== POWER_SUPPLY_PL_OUTPUT_VPH)
return;
- target_icl = get_hvdcp3_icl_limit(chip);
+ target_icl = get_adapter_icl_based_ilim(chip);
ilim = (target_icl > 0) ? min(ilim, target_icl) : ilim;
rc = power_supply_get_property(chip->cp_master_psy,
@@ -742,7 +770,7 @@ static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
if (!chip->cp_ilim_votable)
chip->cp_ilim_votable = find_votable("CP_ILIM");
- target_icl = get_hvdcp3_icl_limit(chip) * 2;
+ target_icl = get_adapter_icl_based_ilim(chip) * 2;
total_fcc_ua -= chip->main_fcc_ua;
/*
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
index b25afd6ce0ff..36d91d23ef98 100644
--- a/drivers/power/supply/qcom/battery.h
+++ b/drivers/power/supply/qcom/battery.h
@@ -12,6 +12,7 @@ struct charger_param {
u32 smb_version;
u32 hvdcp3_max_icl_ua;
u32 forced_main_fcc;
+ u32 qc4_max_icl_ua;
};
int qcom_batt_init(struct charger_param *param);
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index ead483964315..c17c61e382a2 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "ALG: %s: " fmt, __func__
@@ -714,7 +714,7 @@ void cap_learning_abort(struct cap_learning *cl)
pr_debug("Aborting cap_learning\n");
cl->active = false;
cl->init_cap_uah = 0;
- mutex_lock(&cl->lock);
+ mutex_unlock(&cl->lock);
}
/**
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 4e29fb65741c..91ce355e9d81 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -66,6 +66,7 @@ struct qg_dt {
bool esr_disable;
bool esr_discharge_enable;
bool qg_ext_sense;
+ bool use_cp_iin_sns;
bool use_s7_ocv;
bool qg_sleep_config;
bool qg_fast_chg_cfg;
@@ -121,6 +122,7 @@ struct qpnp_qg {
struct power_supply *usb_psy;
struct power_supply *dc_psy;
struct power_supply *parallel_psy;
+ struct power_supply *cp_psy;
struct qg_esr_data esr_data[QG_MAX_ESR_COUNT];
/* status variable */
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index 0f09ef5040b7..294bacfea1dd 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -307,7 +307,7 @@ bool is_input_present(struct qpnp_qg *chip)
return is_usb_present(chip) || is_dc_present(chip);
}
-static bool is_parallel_available(struct qpnp_qg *chip)
+bool is_parallel_available(struct qpnp_qg *chip)
{
if (chip->parallel_psy)
return true;
@@ -319,6 +319,18 @@ static bool is_parallel_available(struct qpnp_qg *chip)
return true;
}
+bool is_cp_available(struct qpnp_qg *chip)
+{
+ if (chip->cp_psy)
+ return true;
+
+ chip->cp_psy = power_supply_get_by_name("charge_pump_master");
+ if (!chip->cp_psy)
+ return false;
+
+ return true;
+}
+
bool is_parallel_enabled(struct qpnp_qg *chip)
{
union power_supply_propval pval = {0, };
@@ -326,6 +338,9 @@ bool is_parallel_enabled(struct qpnp_qg *chip)
if (is_parallel_available(chip)) {
power_supply_get_property(chip->parallel_psy,
POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+ } else if (is_cp_available(chip)) {
+ power_supply_get_property(chip->cp_psy,
+ POWER_SUPPLY_PROP_CP_ENABLE, &pval);
}
return pval.intval ? true : false;
diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h
index 7a1892b0e627..ae9ef81f9ed9 100644
--- a/drivers/power/supply/qcom/qg-util.h
+++ b/drivers/power/supply/qcom/qg-util.h
@@ -19,6 +19,8 @@ bool is_usb_present(struct qpnp_qg *chip);
bool is_dc_present(struct qpnp_qg *chip);
bool is_input_present(struct qpnp_qg *chip);
bool is_parallel_enabled(struct qpnp_qg *chip);
+bool is_cp_available(struct qpnp_qg *chip);
+bool is_parallel_available(struct qpnp_qg *chip);
int qg_write_monotonic_soc(struct qpnp_qg *chip, int msoc);
int qg_get_battery_temp(struct qpnp_qg *chip, int *batt_temp);
int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua);
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index ccc38f8a223b..e61ff4b8823a 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -2415,13 +2415,15 @@ static int qg_parallel_status_update(struct qpnp_qg *chip)
"Parallel status changed Enabled=%d\n", parallel_enabled);
mutex_lock(&chip->data_lock);
-
/*
- * Parallel charger uses the same external sense, hence do not
- * enable SMB sensing if PMI632 is configured for external sense.
+ * dt.qg_ext_sense = Uses external rsense, if defined do not
+ * enable SMB sensing (for non-CP parallel charger).
+ * dt.cp_iin_sns = Uses CP IIN_SNS, enable SMB sensing (for CP charger).
*/
- if (!chip->dt.qg_ext_sense)
- update_smb = true;
+ if (is_cp_available(chip))
+ update_smb = chip->dt.use_cp_iin_sns ? true : false;
+ else if (is_parallel_available(chip))
+ update_smb = chip->dt.qg_ext_sense ? false : true;
rc = process_rt_fifo_data(chip, update_smb);
if (rc < 0)
@@ -2672,7 +2674,8 @@ static int qg_notifier_cb(struct notifier_block *nb,
if ((strcmp(psy->desc->name, "battery") == 0)
|| (strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "usb") == 0)
- || (strcmp(psy->desc->name, "dc") == 0)) {
+ || (strcmp(psy->desc->name, "dc") == 0)
+ || (strcmp(psy->desc->name, "charge_pump_master") == 0)) {
/*
* We cannot vote for awake votable here as that takes
* a mutex lock and this is executed in an atomic context.
@@ -4263,6 +4266,9 @@ static int qg_parse_dt(struct qpnp_qg *chip)
chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
+ chip->dt.use_cp_iin_sns = of_property_read_bool(node,
+ "qcom,use-cp-iin-sns");
+
chip->dt.use_s7_ocv = of_property_read_bool(node, "qcom,qg-use-s7-ocv");
rc = of_property_read_u32(node, "qcom,min-sleep-time-secs", &temp);
@@ -4350,6 +4356,7 @@ static int process_suspend(struct qpnp_qg *chip)
return rc;
}
sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK;
+ sleep_fifo_length++;
if (chip->dt.qg_sleep_config) {
qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Forcing S2_SLEEP\n");
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index f41bcb19eb19..a6cf64b234f6 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -432,6 +432,7 @@ static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
#define MICRO_P1A 100000
#define MICRO_1PA 1000000
#define MICRO_3PA 3000000
+#define MICRO_4PA 4000000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
#define DEFAULT_WD_BARK_TIME 64
#define DEFAULT_WD_SNARL_TIME_8S 0x07
@@ -599,6 +600,12 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
if (chg->chg_param.hvdcp3_max_icl_ua <= 0)
chg->chg_param.hvdcp3_max_icl_ua = MICRO_3PA;
+ /* Used only in Adapter CV mode of operation */
+ of_property_read_u32(node, "qcom,qc4-max-icl-ua",
+ &chg->chg_param.qc4_max_icl_ua);
+ if (chg->chg_param.qc4_max_icl_ua <= 0)
+ chg->chg_param.qc4_max_icl_ua = MICRO_4PA;
+
return 0;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 78a9b9256415..18ae18ef0b4d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8501,8 +8501,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- /* Enable WB only for UFS-3.1 OR if desc len >= 0x59 */
+ /* Enable WB only for UFS-3.1 or UFS-2.2 OR if desc len >= 0x59 */
if ((dev_desc->wspecversion >= 0x310) ||
+ (dev_desc->wspecversion == 0x220) ||
(dev_desc->wmanufacturerid == UFS_VENDOR_TOSHIBA &&
dev_desc->wspecversion >= 0x300 &&
hba->desc_size.dev_desc >= 0x59)) {
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index ada4be813924..d8e2fd368006 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -1580,7 +1580,8 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
if (drvdata->ram_size <= *ppos)
return 0;
- if ((*ppos + len) > drvdata->ram_size)
+ if ((*ppos + len) < len
+ || (*ppos + len) > drvdata->ram_size)
len = (drvdata->ram_size - *ppos);
buf = kzalloc(len, GFP_KERNEL);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index d806f562978b..3fb84756cb66 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -846,6 +846,11 @@ static int icnss_driver_event_server_arrive(void *data)
clear_bit(ICNSS_FW_DOWN, &penv->state);
icnss_ignore_fw_timeout(false);
+ if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
+ icnss_pr_err("QMI Server already in Connected State\n");
+ ICNSS_ASSERT(0);
+ }
+
ret = icnss_connect_to_fw_server(penv, data);
if (ret)
goto fail;
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index b869497d911c..ae57a27ab499 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -448,6 +448,8 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
set_bit(ICNSS_WLFW_EXISTS, &priv->state);
clear_bit(ICNSS_FW_DOWN, &priv->state);
+ clear_bit(ICNSS_FW_READY, &priv->state);
+
icnss_ignore_fw_timeout(false);
if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
index 5403a0c1e90a..d4418baaa1ab 100644
--- a/drivers/soc/qcom/rpmh_master_stat.c
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
@@ -126,7 +126,6 @@ static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
{
ssize_t length;
int i = 0;
- size_t size = 0;
struct msm_rpmh_master_stats *record = NULL;
mutex_lock(&rpmh_stats_mutex);
@@ -141,7 +140,7 @@ static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
for (i = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
record = (struct msm_rpmh_master_stats *) qcom_smem_get(
rpmh_masters[i].pid,
- rpmh_masters[i].smem_id, &size);
+ rpmh_masters[i].smem_id, NULL);
if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
length += msm_rpmh_master_stats_print_data(
buf + length, PAGE_SIZE - length,
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index c667518f090b..4e525203be58 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -413,11 +413,13 @@ err_free_sg2:
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
if (vmid > 0)
- ion_hyp_unassign_sg(table, &vmid, 1, true, false);
+ if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
+ goto err_free_table_sync;
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
get_order(sg->length));
+err_free_table_sync:
if (nents_sync)
sg_free_table(&table_sync);
err_free_sg:
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index c4099614f859..d79e32fd291f 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
@@ -273,7 +273,7 @@ static void tsens_therm_fwk_notify(struct work_struct *work)
}
TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n",
i);
- of_thermal_handle_trip_temp(tmdev->sensor[i].tzd, temp);
+ of_thermal_handle_trip(tmdev->sensor[i].tzd);
}
}
if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
@@ -284,7 +284,7 @@ static void tsens_therm_fwk_notify(struct work_struct *work)
return;
}
TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n", i);
- of_thermal_handle_trip_temp(tmdev->min_temp.tzd, temp);
+ of_thermal_handle_trip(tmdev->min_temp.tzd);
}
}
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 941f7f437843..d04ea031aa43 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -83,12 +83,70 @@ static void msm_tsens_convert_temp(int last_temp, int *temp)
*temp = last_temp * TSENS_TM_SCALE_DECI_MILLIDEG;
}
+static int __tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+ void __iomem *srot_addr;
+ void __iomem *sensor_int_mask_addr;
+ unsigned int srot_val, crit_mask, crit_val;
+ void __iomem *int_mask_addr;
+
+ srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+ srot_val = readl_relaxed(srot_addr);
+ if (!(srot_val & TSENS_EN)) {
+ pr_err("TSENS device is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (tmdev->ctrl_data->cycle_monitor) {
+ sensor_int_mask_addr =
+ TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+ crit_mask = readl_relaxed(sensor_int_mask_addr);
+ crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
+ if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
+ writel_relaxed((crit_mask | crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ else
+ writel_relaxed((crit_mask & ~crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ /*Update critical cycle monitoring*/
+ mb();
+ }
+
+ if (tmdev->ctrl_data->wd_bark) {
+ sensor_int_mask_addr =
+ TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+ crit_mask = readl_relaxed(sensor_int_mask_addr);
+ crit_val = TSENS_TM_CRITICAL_WD_BARK;
+ if (tmdev->ctrl_data->wd_bark_mask)
+ writel_relaxed((crit_mask | crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ else
+ writel_relaxed((crit_mask & ~crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ /*Update watchdog monitoring*/
+ mb();
+ }
+
+ int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
+ writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
+
+ writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+ TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+ TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+
+ return 0;
+}
+
static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
{
struct tsens_device *tmdev = NULL, *tmdev_itr;
unsigned int code, ret, tsens_ret;
void __iomem *sensor_addr, *trdy;
- int last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
+ int rc = 0, last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
static atomic_t in_tsens_reinit;
if (!sensor)
@@ -172,6 +230,13 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
/* Notify thermal fwk */
list_for_each_entry(tmdev_itr,
&tsens_device_list, list) {
+ rc = __tsens2xxx_hw_init(tmdev_itr);
+ if (rc) {
+ pr_err(
+ "%s: Failed to re-initialize TSENS controller\n",
+ __func__);
+ BUG();
+ }
queue_work(tmdev_itr->tsens_reinit_work,
&tmdev_itr->therm_fwk_notify);
}
@@ -713,58 +778,11 @@ static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
static int tsens2xxx_hw_init(struct tsens_device *tmdev)
{
- void __iomem *srot_addr;
- void __iomem *sensor_int_mask_addr;
- unsigned int srot_val, crit_mask, crit_val;
- void __iomem *int_mask_addr;
-
- srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
- srot_val = readl_relaxed(srot_addr);
- if (!(srot_val & TSENS_EN)) {
- pr_err("TSENS device is not enabled\n");
- return -ENODEV;
- }
-
- if (tmdev->ctrl_data->cycle_monitor) {
- sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
- crit_mask = readl_relaxed(sensor_int_mask_addr);
- crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
- if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
- writel_relaxed((crit_mask | crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- else
- writel_relaxed((crit_mask & ~crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- /*Update critical cycle monitoring*/
- mb();
- }
-
- if (tmdev->ctrl_data->wd_bark) {
- sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
- crit_mask = readl_relaxed(sensor_int_mask_addr);
- crit_val = TSENS_TM_CRITICAL_WD_BARK;
- if (tmdev->ctrl_data->wd_bark_mask)
- writel_relaxed((crit_mask | crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- else
- writel_relaxed((crit_mask & ~crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- /*Update watchdog monitoring*/
- mb();
- }
-
- int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
- writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
+ int rc = 0;
- writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
- TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
- TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+ rc = __tsens2xxx_hw_init(tmdev);
+ if (rc)
+ return rc;
spin_lock_init(&tmdev->tsens_crit_lock);
spin_lock_init(&tmdev->tsens_upp_low_lock);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 0d4c0e6b2616..2995ef8f92b8 100644..100755
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -10,6 +10,7 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/ipc_logging.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -118,6 +119,26 @@
#define IPC_LOG_TX_RX_PAGES (10)
#define DATA_BYTES_PER_LINE (32)
+#define M_IRQ_BITS (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN |\
+ M_CMD_CANCEL_EN | M_CMD_ABORT_EN)
+#define S_IRQ_BITS (S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN |\
+ S_CMD_CANCEL_EN | S_CMD_ABORT_EN)
+#define DMA_TX_IRQ_BITS (TX_RESET_DONE | TX_DMA_DONE |\
+ TX_GENI_CANCEL_IRQ)
+#define DMA_RX_IRQ_BITS (RX_EOT | RX_GENI_CANCEL_IRQ |\
+ RX_RESET_DONE | UART_DMA_RX_ERRS |\
+ UART_DMA_RX_PARITY_ERR | UART_DMA_RX_BREAK |\
+ RX_DMA_DONE)
+
+/* Required for polling for 100 msecs */
+#define POLL_WAIT_TIMEOUT_MSEC 100
+
+/*
+ * Number of iterrations required while polling
+ * where each iterration has a delay of 100 usecs
+ */
+#define POLL_ITERATIONS 1000
+
#define IPC_LOG_MSG(ctx, x...) do { \
if (ctx) \
ipc_log_string(ctx, x); \
@@ -136,7 +157,7 @@ struct msm_geni_serial_ver_info {
struct msm_geni_serial_port {
struct uart_port uport;
- char name[20];
+ const char *name;
unsigned int tx_fifo_depth;
unsigned int tx_fifo_width;
unsigned int rx_fifo_depth;
@@ -151,7 +172,8 @@ struct msm_geni_serial_port {
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx);
+ bool drop_rx,
+ unsigned long *flags);
struct device *wrapper_dev;
struct se_geni_rsc serial_rsc;
dma_addr_t tx_dma;
@@ -176,6 +198,12 @@ struct msm_geni_serial_port {
bool startup_in_progress;
bool is_console;
bool rumi_platform;
+ bool m_cmd_done;
+ bool s_cmd_done;
+ bool m_cmd;
+ bool s_cmd;
+ struct completion m_cmd_timeout;
+ struct completion s_cmd_timeout;
};
static const struct uart_ops msm_geni_serial_pops;
@@ -185,12 +213,12 @@ static int handle_rx_console(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx);
+ bool drop_rx, unsigned long *flags);
static int handle_rx_hs(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx);
+ bool drop_rx, unsigned long *flags);
static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
static int msm_geni_serial_power_on(struct uart_port *uport);
static void msm_geni_serial_power_off(struct uart_port *uport);
@@ -209,6 +237,160 @@ static int uart_line_id;
static struct msm_geni_serial_port msm_geni_console_port;
static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
+static void msm_geni_serial_handle_isr(struct uart_port *uport,
+ unsigned long *flags);
+
+/*
+ * The below API is required to check if uport->lock (spinlock)
+ * is taken by the serial layer or not. If the lock is not taken
+ * then we can rely on the isr to be fired and if the lock is taken
+ * by the serial layer then we need to poll for the interrupts.
+ *
+ * Returns true(1) if spinlock is already taken by framework (serial layer)
+ * Return false(0) if spinlock is not taken by framework.
+ */
+static int msm_geni_serial_spinlocked(struct uart_port *uport)
+{
+ unsigned long flags;
+ bool locked;
+
+ locked = spin_trylock_irqsave(&uport->lock, flags);
+ if (locked)
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ return !locked;
+}
+
+/*
+ * We are enabling the interrupts once the polling operations
+ * is completed.
+ */
+static void msm_geni_serial_enable_interrupts(struct uart_port *uport)
+{
+ unsigned int geni_m_irq_en, geni_s_irq_en;
+ unsigned int dma_m_irq_en, dma_s_irq_en;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_EN);
+ if (port->xfer_mode == SE_DMA) {
+ dma_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_DMA_TX_IRQ_EN);
+ dma_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_DMA_RX_IRQ_EN);
+ }
+
+ geni_m_irq_en |= M_IRQ_BITS;
+ geni_s_irq_en |= S_IRQ_BITS;
+ if (port->xfer_mode == SE_DMA) {
+ dma_m_irq_en |= DMA_TX_IRQ_BITS;
+ dma_s_irq_en |= DMA_RX_IRQ_BITS;
+ }
+
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+ if (port->xfer_mode == SE_DMA) {
+ geni_write_reg_nolog(dma_m_irq_en, uport->membase,
+ SE_DMA_TX_IRQ_EN);
+ geni_write_reg_nolog(dma_s_irq_en, uport->membase,
+ SE_DMA_RX_IRQ_EN);
+ }
+}
+
+/* Try disabling interrupts in order to do polling in an atomic contexts. */
+static bool msm_serial_try_disable_interrupts(struct uart_port *uport)
+{
+ unsigned int geni_m_irq_en, geni_s_irq_en;
+ unsigned int dma_m_irq_en, dma_s_irq_en;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ /*
+ * We don't need to disable interrupts if spinlock is not taken
+ * by framework as we can rely on ISR.
+ */
+ if (!msm_geni_serial_spinlocked(uport))
+ return false;
+
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_S_IRQ_EN);
+ if (port->xfer_mode == SE_DMA) {
+ dma_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_DMA_TX_IRQ_EN);
+ dma_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_DMA_RX_IRQ_EN);
+ }
+
+ geni_m_irq_en &= ~M_IRQ_BITS;
+ geni_s_irq_en &= ~S_IRQ_BITS;
+ if (port->xfer_mode == SE_DMA) {
+ dma_m_irq_en &= ~DMA_TX_IRQ_BITS;
+ dma_s_irq_en &= ~DMA_RX_IRQ_BITS;
+ }
+
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+ if (port->xfer_mode == SE_DMA) {
+ geni_write_reg_nolog(dma_m_irq_en, uport->membase,
+ SE_DMA_TX_IRQ_EN);
+ geni_write_reg_nolog(dma_s_irq_en, uport->membase,
+ SE_DMA_RX_IRQ_EN);
+ }
+
+ return true;
+}
+
+/*
+ * We need to poll for interrupt if we are in an atomic context
+ * as serial framework might be taking spinlocks and depend on the isr
+ * in a non-atomic context. This API decides wheather to poll for
+ * interrupt or depend on the isr based on in_atomic() call.
+ */
+bool geni_wait_for_cmd_done(struct uart_port *uport, bool is_irq_masked)
+{
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ unsigned long timeout = POLL_ITERATIONS;
+ unsigned long ret;
+ unsigned long flags = 0;
+
+ /*
+ * We need to do polling if spinlock is taken
+ * by framework as we cannot rely on ISR.
+ */
+ if (is_irq_masked) {
+ /*
+ * Polling is done for 1000 iterrations with
+ * 10 usecs interval which in total accumulates
+ * to 10 msecs
+ */
+ if (msm_port->m_cmd) {
+ while (!msm_port->m_cmd_done && timeout > 0) {
+ msm_geni_serial_handle_isr(uport, &flags);
+ timeout--;
+ udelay(100);
+ }
+ } else if (msm_port->s_cmd) {
+ while (!msm_port->s_cmd_done && timeout > 0) {
+ msm_geni_serial_handle_isr(uport, &flags);
+ timeout--;
+ udelay(100);
+ }
+ }
+ } else {
+ /* Waiting for 10 milli second for interrupt to be fired */
+ if (msm_port->m_cmd)
+ ret = wait_for_completion_timeout
+ (&msm_port->m_cmd_timeout,
+ msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC));
+ else if (msm_port->s_cmd)
+ ret = wait_for_completion_timeout
+ (&msm_port->s_cmd_timeout,
+ msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC));
+ }
+
+ return ret ? 0 : 1;
+}
static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
{
@@ -528,7 +710,6 @@ static int msm_geni_serial_power_on(struct uart_port *uport)
} else {
pm_runtime_get_noresume(uport->dev);
pm_runtime_set_active(uport->dev);
- enable_irq(uport->irq);
}
pm_runtime_enable(uport->dev);
if (lock)
@@ -573,7 +754,6 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport,
unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS;
unsigned long total_iter = 1000;
-
if (uport->private_data && !uart_console(uport)) {
port = GET_DEV_PORT(uport);
baud = (port->cur_baud ? port->cur_baud : 115200);
@@ -614,58 +794,32 @@ static void msm_geni_serial_setup_tx(struct uart_port *uport,
mb();
}
-static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport)
+static void msm_geni_serial_poll_tx_done(struct uart_port *uport)
{
int done = 0;
- unsigned int irq_clear = M_CMD_DONE_EN;
+ unsigned int irq_clear = 0;
done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
- geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase,
- SE_GENI_M_CMD_CTRL_REG);
- irq_clear |= M_CMD_ABORT_EN;
- msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN, true);
+ /*
+ * Failure IPC logs are not added as this API is
+ * used by early console and it doesn't have log handle.
+ */
+ geni_write_reg(M_GENI_CMD_CANCEL, uport->membase,
+ SE_GENI_M_CMD_CTRL_REG);
+ done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true);
+ if (!done) {
+ geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase,
+ SE_GENI_M_CMD_CTRL_REG);
+ msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ }
}
- geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
-}
-
-static void msm_geni_serial_abort_rx(struct uart_port *uport)
-{
- unsigned int irq_clear = S_CMD_DONE_EN;
- struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-
- geni_abort_s_cmd(uport->membase);
- /* Ensure this goes through before polling. */
- mb();
- irq_clear |= S_CMD_ABORT_EN;
- msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
- S_GENI_CMD_ABORT, false);
- geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR);
- /* FORCE_DEFAULT makes RFR default high, hence set manually Low */
- msm_geni_serial_set_manual_flow(true, port);
- geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG);
-}
-
-static void msm_geni_serial_complete_rx_eot(struct uart_port *uport)
-{
- int poll_done = 0, tries = 0;
- struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-
- do {
- poll_done = msm_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
- RX_EOT, true);
- tries++;
- } while (!poll_done && tries < 5);
- if (!poll_done)
- IPC_LOG_MSG(port->ipc_log_misc,
- "%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__,
- geni_read_reg_nolog(uport->membase, SE_GENI_STATUS),
- geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0));
- else
- geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR);
+ irq_clear = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_STATUS);
+ geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -721,7 +875,9 @@ static void msm_geni_serial_poll_put_char(struct uart_port *uport,
* Ensure FIFO write goes through before polling for status but.
*/
mb();
- msm_geni_serial_poll_cancel_tx(uport);
+ msm_serial_try_disable_interrupts(uport);
+ msm_geni_serial_poll_tx_done(uport);
+ msm_geni_serial_enable_interrupts(uport);
}
#endif
@@ -757,6 +913,7 @@ __msm_geni_serial_console_write(struct uart_port *uport, const char *s,
SE_GENI_TX_WATERMARK_REG);
msm_geni_serial_setup_tx(uport, bytes_to_send);
i = 0;
+
while (i < count) {
u32 chars_to_write = 0;
u32 avail_fifo_bytes = (fifo_depth - tx_wm);
@@ -781,7 +938,9 @@ __msm_geni_serial_console_write(struct uart_port *uport, const char *s,
mb();
i += chars_to_write;
}
- msm_geni_serial_poll_cancel_tx(uport);
+ msm_serial_try_disable_interrupts(uport);
+ msm_geni_serial_poll_tx_done(uport);
+ msm_geni_serial_enable_interrupts(uport);
}
static void msm_geni_serial_console_write(struct console *co, const char *s,
@@ -792,6 +951,8 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
bool locked = true;
unsigned long flags;
unsigned int geni_status;
+ bool timeout;
+ bool is_irq_masked;
int irq_en;
/* Max 1 port supported as of now */
@@ -810,24 +971,45 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
/* Cancel the current write to log the fault */
- if (!locked) {
+ if ((geni_status & M_GENI_CMD_ACTIVE) && !locked) {
+ port->m_cmd_done = false;
+ port->m_cmd = true;
+ reinit_completion(&port->m_cmd_timeout);
+ is_irq_masked = msm_serial_try_disable_interrupts(uport);
geni_cancel_m_cmd(uport->membase);
- if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN, true)) {
+
+ /*
+ * console should be in polling mode. Hence directly pass true
+ * as argument for wait_for_cmd_done here to handle cancel tx
+ * in polling mode.
+ */
+ timeout = geni_wait_for_cmd_done(uport, true);
+ if (timeout) {
+ IPC_LOG_MSG(port->console_log,
+ "%s: tx_cancel failed 0x%x\n",
+ __func__, geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS));
+
+ reinit_completion(&port->m_cmd_timeout);
geni_abort_m_cmd(uport->membase);
- msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN, true);
- geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
+ timeout = geni_wait_for_cmd_done(uport, true);
+ if (timeout)
+ IPC_LOG_MSG(port->console_log,
+ "%s: tx abort failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS));
}
- writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
+
+ msm_geni_serial_enable_interrupts(uport);
+ port->m_cmd = false;
} else if ((geni_status & M_GENI_CMD_ACTIVE) &&
!port->cur_tx_remaining) {
/* It seems we can interrupt existing transfers unless all data
* has been sent, in which case we need to look for done first.
*/
- msm_geni_serial_poll_cancel_tx(uport);
+ msm_serial_try_disable_interrupts(uport);
+ msm_geni_serial_poll_tx_done(uport);
+ msm_geni_serial_enable_interrupts(uport);
/* Enable WM interrupt for every new console write op */
if (uart_circ_chars_pending(&uport->state->xmit)) {
@@ -851,12 +1033,13 @@ static int handle_rx_console(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx)
+ bool drop_rx, unsigned long *flags)
{
int i, c;
unsigned char *rx_char;
struct tty_port *tport;
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ bool locked;
tport = &uport->state->port;
for (i = 0; i < rx_fifo_wc; i++) {
@@ -890,9 +1073,14 @@ static int handle_rx_console(struct uart_port *uport,
* release the port lock before calling tty_flip_buffer_push()
* to avoid deadlock scenarios.
*/
- spin_unlock(&uport->lock);
- tty_flip_buffer_push(tport);
- spin_lock(&uport->lock);
+ locked = msm_geni_serial_spinlocked(uport);
+ if (locked) {
+ spin_unlock_irqrestore(&uport->lock, *flags);
+ tty_flip_buffer_push(tport);
+ spin_lock_irqsave(&uport->lock, *flags);
+ } else {
+ tty_flip_buffer_push(tport);
+ }
}
return 0;
}
@@ -901,7 +1089,7 @@ static int handle_rx_console(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx)
+ bool drop_rx, unsigned long *flags)
{
return -EPERM;
}
@@ -913,8 +1101,8 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport)
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
struct circ_buf *xmit = &uport->state->xmit;
unsigned int xmit_size;
- u32 geni_status;
- bool done = false;
+ unsigned int dma_dbg;
+ bool timeout, is_irq_masked;
int ret = 0;
xmit_size = uart_circ_chars_pending(xmit);
@@ -932,41 +1120,82 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport)
msm_geni_serial_setup_tx(uport, xmit_size);
ret = geni_se_tx_dma_prep(msm_port->wrapper_dev, uport->membase,
&xmit->buf[xmit->tail], xmit_size, &msm_port->tx_dma);
+
if (!ret) {
msm_port->xmit_size = xmit_size;
- return ret;
- }
-
- IPC_LOG_MSG(msm_port->ipc_log_misc,
- "%s: TX DMA map Fail %d\n", __func__, ret);
- geni_write_reg_nolog(0, uport->membase,
- SE_UART_TX_TRANS_LEN);
- geni_cancel_m_cmd(uport->membase);
- if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN, true)) {
- geni_status = geni_read_reg_nolog(uport->membase,
- SE_GENI_STATUS);
+ } else {
IPC_LOG_MSG(msm_port->ipc_log_misc,
- "%s: TX Cancel Fail 0x%x\n",
- __func__, geni_status);
- geni_abort_m_cmd(uport->membase);
- done = msm_geni_serial_poll_bit(uport,
- SE_GENI_M_IRQ_STATUS, M_CMD_ABORT_EN, true);
- if (!done) {
- geni_status =
- geni_read_reg_nolog(uport->membase,
- SE_GENI_STATUS);
+ "%s: TX DMA map Fail %d\n", __func__, ret);
+
+ geni_write_reg_nolog(0, uport->membase, SE_UART_TX_TRANS_LEN);
+ msm_port->m_cmd_done = false;
+ msm_port->m_cmd = true;
+ reinit_completion(&msm_port->m_cmd_timeout);
+
+ /*
+ * Try disabling interrupts before giving the
+ * cancel command as this might be in an atomic context.
+ */
+ is_irq_masked = msm_serial_try_disable_interrupts(uport);
+ geni_cancel_m_cmd(uport->membase);
+
+ timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
+ if (timeout) {
+ IPC_LOG_MSG(msm_port->console_log,
+ "%s: tx_cancel fail 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
+
IPC_LOG_MSG(msm_port->ipc_log_misc,
- "%s: TX Abort fail 0x%x\n",
- __func__, geni_status);
+ "%s: tx_cancel failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
+
+ msm_port->m_cmd_done = false;
+ reinit_completion(&msm_port->m_cmd_timeout);
+ /* Give abort command as cancel command failed */
+ geni_abort_m_cmd(uport->membase);
+
+ timeout = geni_wait_for_cmd_done(uport,
+ is_irq_masked);
+ if (timeout) {
+ IPC_LOG_MSG(msm_port->console_log,
+ "%s: tx abort failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS));
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s: tx abort failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS));
+ }
}
- geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
+
+ if (msm_port->xfer_mode == SE_DMA) {
+ dma_dbg = geni_read_reg(uport->membase,
+ SE_DMA_DEBUG_REG0);
+ if (dma_dbg & DMA_TX_ACTIVE) {
+ msm_port->m_cmd_done = false;
+ reinit_completion(&msm_port->m_cmd_timeout);
+ geni_write_reg_nolog(1, uport->membase,
+ SE_DMA_TX_FSM_RST);
+
+ timeout = geni_wait_for_cmd_done(uport,
+ is_irq_masked);
+ if (timeout)
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s: tx fsm reset failed\n", __func__);
+ }
+
+ if (msm_port->tx_dma) {
+ geni_se_tx_dma_unprep(msm_port->wrapper_dev,
+ msm_port->tx_dma, msm_port->xmit_size);
+ msm_port->tx_dma = (dma_addr_t)NULL;
+ }
+ }
+ msm_port->xmit_size = 0;
+ /* Enable the interrupts once the cancel operation is done. */
+ msm_geni_serial_enable_interrupts(uport);
+ msm_port->m_cmd = false;
}
- geni_write_reg_nolog(M_CMD_CANCEL_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
- msm_port->tx_dma = (dma_addr_t)NULL;
- msm_port->xmit_size = 0;
+
return ret;
}
@@ -1029,50 +1258,14 @@ exit_start_tx:
msm_geni_serial_power_off(uport);
}
-static void msm_geni_serial_tx_fsm_rst(struct uart_port *uport)
-{
- unsigned int tx_irq_en;
- int done = 0;
- int tries = 0;
-
- tx_irq_en = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_EN);
- geni_write_reg_nolog(0, uport->membase, SE_DMA_TX_IRQ_EN_SET);
- geni_write_reg_nolog(1, uport->membase, SE_DMA_TX_FSM_RST);
- do {
- done = msm_geni_serial_poll_bit(uport, SE_DMA_TX_IRQ_STAT,
- TX_RESET_DONE, true);
- tries++;
- } while (!done && tries < 5);
- geni_write_reg_nolog(TX_DMA_DONE | TX_RESET_DONE, uport->membase,
- SE_DMA_TX_IRQ_CLR);
- geni_write_reg_nolog(tx_irq_en, uport->membase, SE_DMA_TX_IRQ_EN_SET);
-}
-
static void stop_tx_sequencer(struct uart_port *uport)
{
- unsigned int geni_m_irq_en;
unsigned int geni_status;
+ bool timeout, is_irq_masked;
+ unsigned int dma_dbg;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- bool done = false;
- geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en &= ~M_CMD_DONE_EN;
- if (port->xfer_mode == FIFO_MODE) {
- geni_m_irq_en &= ~M_TX_FIFO_WATERMARK_EN;
- geni_write_reg_nolog(0, uport->membase,
- SE_GENI_TX_WATERMARK_REG);
- } else if (port->xfer_mode == SE_DMA) {
- if (port->tx_dma) {
- msm_geni_serial_tx_fsm_rst(uport);
- geni_se_tx_dma_unprep(port->wrapper_dev, port->tx_dma,
- port->xmit_size);
- port->tx_dma = (dma_addr_t)NULL;
- }
- }
- port->xmit_size = 0;
- geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
- geni_status = geni_read_reg_nolog(uport->membase,
- SE_GENI_STATUS);
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
/* Possible stop tx is called multiple times. */
if (!(geni_status & M_GENI_CMD_ACTIVE))
return;
@@ -1080,28 +1273,65 @@ static void stop_tx_sequencer(struct uart_port *uport)
IPC_LOG_MSG(port->ipc_log_misc,
"%s: Start GENI: 0x%x\n", __func__, geni_status);
+ port->m_cmd_done = false;
+ port->m_cmd = true;
+ reinit_completion(&port->m_cmd_timeout);
+ /*
+ * Try to mask the interrupts before giving the
+ * cancel command as this might be in an atomic context
+ * from framework driver.
+ */
+ is_irq_masked = msm_serial_try_disable_interrupts(uport);
geni_cancel_m_cmd(uport->membase);
- if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN, true)) {
- geni_status = geni_read_reg_nolog(uport->membase,
- SE_GENI_STATUS);
- IPC_LOG_MSG(port->ipc_log_misc,
- "%s: TX Cancel Fail 0x%x\n", __func__, geni_status);
+
+ timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
+ if (timeout) {
+ IPC_LOG_MSG(port->console_log, "%s: tx_cancel failed 0x%x\n",
+ __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
+ IPC_LOG_MSG(port->ipc_log_misc, "%s: tx_cancel failed 0x%x\n",
+ __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
+
+ port->m_cmd_done = false;
+ reinit_completion(&port->m_cmd_timeout);
geni_abort_m_cmd(uport->membase);
- done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN, true);
- if (!done) {
- geni_status = geni_read_reg_nolog(uport->membase,
- SE_GENI_STATUS);
+
+ timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
+ if (timeout) {
+ IPC_LOG_MSG(port->console_log,
+ "%s: tx abort failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
IPC_LOG_MSG(port->ipc_log_misc,
- "%s TX Abort fail 0x%x\n",
- __func__, geni_status);
+ "%s: tx abort failed 0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
}
- geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
}
- geni_write_reg_nolog(M_CMD_CANCEL_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
+
+ if (port->xfer_mode == SE_DMA) {
+ dma_dbg = geni_read_reg(uport->membase, SE_DMA_DEBUG_REG0);
+ if (dma_dbg & DMA_TX_ACTIVE) {
+ port->m_cmd_done = false;
+ reinit_completion(&port->m_cmd_timeout);
+ geni_write_reg_nolog(1, uport->membase,
+ SE_DMA_TX_FSM_RST);
+
+ timeout = geni_wait_for_cmd_done(uport,
+ is_irq_masked);
+ if (timeout)
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s: tx fsm reset failed\n", __func__);
+ }
+
+ if (port->tx_dma) {
+ geni_se_tx_dma_unprep(port->wrapper_dev,
+ port->tx_dma, port->xmit_size);
+ port->tx_dma = (dma_addr_t)NULL;
+ }
+ }
+ /* Unmask the interrupts once the cancel operation is done. */
+ msm_geni_serial_enable_interrupts(uport);
+ port->m_cmd = false;
+ port->xmit_size = 0;
+
/*
* If we end up having to cancel an on-going Tx for non-console usecase
* then it means there was some unsent data in the Tx FIFO, consequently
@@ -1113,6 +1343,7 @@ static void stop_tx_sequencer(struct uart_port *uport)
IPC_LOG_MSG(port->ipc_log_misc, "%s:Removing vote\n", __func__);
msm_geni_serial_power_off(uport);
}
+
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
IPC_LOG_MSG(port->ipc_log_misc, "%s: End GENI:0x%x\n",
__func__, geni_status);
@@ -1133,8 +1364,6 @@ static void msm_geni_serial_stop_tx(struct uart_port *uport)
static void start_rx_sequencer(struct uart_port *uport)
{
- unsigned int geni_s_irq_en;
- unsigned int geni_m_irq_en;
unsigned int geni_status;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
u32 geni_se_param = UART_PARAM_RFR_OPEN;
@@ -1157,29 +1386,14 @@ static void start_rx_sequencer(struct uart_port *uport)
}
/* Start RX with the RFR_OPEN to keep RFR in always ready state */
+ msm_geni_serial_enable_interrupts(uport);
geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param);
- if (port->xfer_mode == FIFO_MODE) {
- geni_s_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_M_IRQ_EN);
-
- geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
- geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
-
- geni_write_reg_nolog(geni_s_irq_en, uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_write_reg_nolog(geni_m_irq_en, uport->membase,
- SE_GENI_M_IRQ_EN);
- } else if (port->xfer_mode == SE_DMA) {
+ if (port->xfer_mode == SE_DMA)
geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE,
&port->rx_dma);
- }
- /*
- * Ensure the writes to the secondary sequencer and interrupt enables
- * go through.
- */
+
+ /* Ensure that the above writes go through */
mb();
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x, dma_dbg:0x%x\n", __func__,
@@ -1253,61 +1467,83 @@ static void msm_geni_serial_set_manual_flow(bool enable,
static void stop_rx_sequencer(struct uart_port *uport)
{
- unsigned int geni_s_irq_en;
- unsigned int geni_m_irq_en;
unsigned int geni_status;
+ bool timeout, is_irq_masked;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- u32 irq_clear = S_CMD_CANCEL_EN;
- bool done;
+ unsigned long flags = 0;
- if (port->xfer_mode == FIFO_MODE) {
- geni_s_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_M_IRQ_EN);
- geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
- geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
-
- geni_write_reg_nolog(geni_s_irq_en, uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_write_reg_nolog(geni_m_irq_en, uport->membase,
- SE_GENI_M_IRQ_EN);
- }
+ IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
/* Possible stop rx is called multiple times. */
- if (!(geni_status & S_GENI_CMD_ACTIVE))
+ if (!(geni_status & S_GENI_CMD_ACTIVE)) {
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s: RX is Inactive, geni_sts: 0x%x\n",
+ __func__, geni_status);
goto exit_rx_seq;
+ }
+
+ port->s_cmd_done = false;
+ port->s_cmd = true;
+ reinit_completion(&port->s_cmd_timeout);
IPC_LOG_MSG(port->ipc_log_misc, "%s: Start 0x%x\n",
__func__, geni_status);
+ /*
+ * Try disabling interrupts before giving the
+ * cancel command as this might be in an atomic context.
+ */
+ is_irq_masked = msm_serial_try_disable_interrupts(uport);
geni_cancel_s_cmd(uport->membase);
+
/*
* Ensure that the cancel goes through before polling for the
* cancel control bit.
*/
mb();
- if (!uart_console(uport))
- msm_geni_serial_complete_rx_eot(uport);
+ timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
+ if (timeout) {
+ bool is_rx_active;
+ geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ /*
+ * Possible that stop_rx is called from system resume context
+ * for console usecase. In early resume, irq remains disabled
+ * in the system. call msm_geni_serial_handle_isr to clear
+ * the interrupts.
+ */
+ is_rx_active = geni_status & S_GENI_CMD_ACTIVE;
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s cancel failed is_rx_active:%d 0x%x\n",
+ __func__, is_rx_active, geni_status);
+ IPC_LOG_MSG(port->console_log,
+ "%s cancel failed is_rx_active:%d 0x%x\n",
+ __func__, is_rx_active, geni_status);
+ if (uart_console(uport) && !is_rx_active) {
+ msm_geni_serial_handle_isr(uport, &flags);
+ goto exit_rx_seq;
+ }
+ port->s_cmd_done = false;
+ reinit_completion(&port->s_cmd_timeout);
+ geni_abort_s_cmd(uport->membase);
+ /* Ensure this goes through before polling. */
+ mb();
- done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
- S_GENI_CMD_CANCEL, false);
- if (done) {
- geni_write_reg_nolog(irq_clear, uport->membase,
- SE_GENI_S_IRQ_CLEAR);
- goto exit_rx_seq;
- } else {
- IPC_LOG_MSG(port->ipc_log_misc, "%s Cancel fail 0x%x\n",
- __func__, geni_status);
+ timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
+ if (timeout) {
+ geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s abort fail 0x%x\n", __func__, geni_status);
+ IPC_LOG_MSG(port->console_log,
+ "%s abort fail 0x%x\n", __func__, geni_status);
+ }
}
+ /* Enable the interrupts once the cancel operation is done. */
+ msm_geni_serial_enable_interrupts(uport);
+ port->s_cmd = false;
- geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
- if ((geni_status & S_GENI_CMD_ACTIVE)) {
- IPC_LOG_MSG(port->ipc_log_misc, "%s:Abort Rx, GENI:0x%x\n",
- __func__, geni_status);
- msm_geni_serial_abort_rx(uport);
- }
exit_rx_seq:
if (port->xfer_mode == SE_DMA && port->rx_dma)
msm_geni_serial_rx_fsm_rst(uport);
@@ -1333,7 +1569,7 @@ static int handle_rx_hs(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last,
- bool drop_rx)
+ bool drop_rx, unsigned long *flags)
{
unsigned char *rx_char;
struct tty_port *tport;
@@ -1365,7 +1601,8 @@ static int handle_rx_hs(struct uart_port *uport,
return ret;
}
-static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx)
+static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx,
+ unsigned long *flags)
{
int ret = 0;
unsigned int rx_fifo_status;
@@ -1384,7 +1621,7 @@ static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx)
rx_last = rx_fifo_status & RX_LAST;
if (rx_fifo_wc)
ret = port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
- rx_last, drop_rx);
+ rx_last, drop_rx, flags);
return ret;
}
@@ -1533,8 +1770,6 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx)
dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0,
rx_bytes);
exit_handle_dma_rx:
- geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE,
- &msm_port->rx_dma);
return ret;
}
@@ -1568,32 +1803,28 @@ static int msm_geni_serial_handle_dma_tx(struct uart_port *uport)
return 0;
}
-static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
+static void msm_geni_serial_handle_isr(struct uart_port *uport,
+ unsigned long *flags)
{
unsigned int m_irq_status;
unsigned int s_irq_status;
unsigned int dma;
unsigned int dma_tx_status;
unsigned int dma_rx_status;
- struct uart_port *uport = dev;
unsigned int m_irq_en;
unsigned int geni_status;
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
struct tty_port *tport = &uport->state->port;
bool drop_rx = false;
+ bool s_cmd_done = false;
+ bool m_cmd_done = false;
- spin_lock(&uport->lock);
if (uart_console(uport) && uport->suspended) {
IPC_LOG_MSG(msm_port->console_log,
"%s. Console in suspend state\n", __func__);
goto exit_geni_serial_isr;
}
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
- dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
- IPC_LOG_MSG(msm_port->ipc_log_misc,
- "%s.Device is suspended.\n", __func__);
- goto exit_geni_serial_isr;
- }
+
m_irq_status = geni_read_reg_nolog(uport->membase,
SE_GENI_M_IRQ_STATUS);
s_irq_status = geni_read_reg_nolog(uport->membase,
@@ -1601,16 +1832,13 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
if (uart_console(uport))
IPC_LOG_MSG(msm_port->console_log,
"%s. sirq 0x%x mirq:0x%x\n", __func__, s_irq_status,
- m_irq_status);
- m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
- dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
- dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT);
- dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT);
- geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
-
- geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
- geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+ m_irq_status);
+ geni_write_reg_nolog(m_irq_status, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg_nolog(s_irq_status, uport->membase,
+ SE_GENI_S_IRQ_CLEAR);
+ m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
WARN_ON(1);
goto exit_geni_serial_isr;
@@ -1624,49 +1852,79 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
__func__, s_irq_status, uport->icount.buf_overrun);
}
+ dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
if (!dma) {
+ geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+
if ((m_irq_status & m_irq_en) &
(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
msm_geni_serial_handle_tx(uport,
m_irq_status & M_CMD_DONE_EN,
geni_status & M_GENI_CMD_ACTIVE);
- if ((s_irq_status & S_GP_IRQ_0_EN) ||
- (s_irq_status & S_GP_IRQ_1_EN)) {
+ if (m_irq_status & (M_CMD_CANCEL_EN | M_CMD_ABORT_EN))
+ m_cmd_done = true;
+
+ if (s_irq_status & (S_GP_IRQ_0_EN | S_GP_IRQ_1_EN)) {
if (s_irq_status & S_GP_IRQ_0_EN)
uport->icount.parity++;
IPC_LOG_MSG(msm_port->ipc_log_misc,
"%s.sirq 0x%x parity:%d\n",
__func__, s_irq_status, uport->icount.parity);
drop_rx = true;
- } else if ((s_irq_status & S_GP_IRQ_2_EN) ||
- (s_irq_status & S_GP_IRQ_3_EN)) {
+ } else if (s_irq_status & (S_GP_IRQ_2_EN | S_GP_IRQ_3_EN)) {
uport->icount.brk++;
IPC_LOG_MSG(msm_port->ipc_log_misc,
"%s.sirq 0x%x break:%d\n",
__func__, s_irq_status, uport->icount.brk);
}
+ /*
+ * In case of stop_rx handling there is a chance
+ * for RX data can come in parallel. set drop_rx to
+ * avoid data push to framework from handle_rx_console()
+ * API for stop_rx case.
+ */
+ if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN)) {
+ s_cmd_done = true;
+ drop_rx = true;
+ }
- if ((s_irq_status & S_RX_FIFO_WATERMARK_EN) ||
- (s_irq_status & S_RX_FIFO_LAST_EN))
- msm_geni_serial_handle_rx(uport, drop_rx);
+ if (s_irq_status & (S_RX_FIFO_WATERMARK_EN |
+ S_RX_FIFO_LAST_EN))
+ msm_geni_serial_handle_rx(uport, drop_rx, flags);
} else {
+ dma_tx_status = geni_read_reg_nolog(uport->membase,
+ SE_DMA_TX_IRQ_STAT);
+ dma_rx_status = geni_read_reg_nolog(uport->membase,
+ SE_DMA_RX_IRQ_STAT);
+
if (dma_tx_status) {
+
geni_write_reg_nolog(dma_tx_status, uport->membase,
- SE_DMA_TX_IRQ_CLR);
+ SE_DMA_TX_IRQ_CLR);
+
if (dma_tx_status & TX_DMA_DONE)
msm_geni_serial_handle_dma_tx(uport);
+
+ if (dma_tx_status & (TX_RESET_DONE |
+ TX_GENI_CANCEL_IRQ))
+ m_cmd_done = true;
+
+ if (m_irq_status & (M_CMD_CANCEL_EN | M_CMD_ABORT_EN))
+ m_cmd_done = true;
}
if (dma_rx_status) {
geni_write_reg_nolog(dma_rx_status, uport->membase,
- SE_DMA_RX_IRQ_CLR);
+ SE_DMA_RX_IRQ_CLR);
+
if (dma_rx_status & RX_RESET_DONE) {
IPC_LOG_MSG(msm_port->ipc_log_misc,
"%s.Reset done. 0x%x.\n",
__func__, dma_rx_status);
goto exit_geni_serial_isr;
}
+
if (dma_rx_status & UART_DMA_RX_ERRS) {
if (dma_rx_status & UART_DMA_RX_PARITY_ERR)
uport->icount.parity++;
@@ -1682,13 +1940,53 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
__func__, dma_rx_status,
uport->icount.brk);
}
- if (dma_rx_status & RX_DMA_DONE)
- msm_geni_serial_handle_dma_rx(uport, drop_rx);
+
+ if (dma_rx_status & RX_EOT ||
+ dma_rx_status & RX_DMA_DONE) {
+ msm_geni_serial_handle_dma_rx(uport,
+ drop_rx);
+ if (!(dma_rx_status & RX_GENI_CANCEL_IRQ)) {
+ geni_se_rx_dma_start(uport->membase,
+ DMA_RX_BUF_SIZE, &msm_port->rx_dma);
+ }
+ }
+
+ if (dma_rx_status & RX_SBE) {
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s.Rx Errors. 0x%x\n",
+ __func__, dma_rx_status);
+ WARN_ON(1);
+ }
+
+ if (dma_rx_status & (RX_EOT | RX_GENI_CANCEL_IRQ |
+ RX_DMA_DONE))
+ s_cmd_done = true;
+
+ if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN))
+ s_cmd_done = true;
}
}
exit_geni_serial_isr:
- spin_unlock(&uport->lock);
+ if (m_cmd_done) {
+ msm_port->m_cmd_done = true;
+ complete(&msm_port->m_cmd_timeout);
+ }
+
+ if (s_cmd_done) {
+ msm_port->s_cmd_done = true;
+ complete(&msm_port->s_cmd_timeout);
+ }
+}
+
+static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
+{
+ struct uart_port *uport = dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ msm_geni_serial_handle_isr(uport, &flags);
+ spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
@@ -1775,14 +2073,12 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
/* Stop the console before stopping the current tx */
if (uart_console(uport)) {
console_stop(uport->cons);
+ disable_irq(uport->irq);
} else {
msm_geni_serial_power_on(uport);
wait_for_transfers_inflight(uport);
}
- disable_irq(uport->irq);
- free_irq(uport->irq, uport);
-
if (!uart_console(uport)) {
if (msm_port->ioctl_count) {
int i;
@@ -1858,7 +2154,9 @@ static int msm_geni_serial_port_setup(struct uart_port *uport)
* it else we could end up in data loss scenarios.
*/
msm_port->xfer_mode = FIFO_MODE;
- msm_geni_serial_poll_cancel_tx(uport);
+ msm_serial_try_disable_interrupts(uport);
+ msm_geni_serial_poll_tx_done(uport);
+ msm_geni_serial_enable_interrupts(uport);
se_get_packing_config(8, 1, false, &cfg0, &cfg1);
geni_write_reg_nolog(cfg0, uport->membase,
SE_GENI_TX_PACKING_CFG0);
@@ -1870,6 +2168,7 @@ static int msm_geni_serial_port_setup(struct uart_port *uport)
geni_write_reg_nolog(cfg1, uport->membase,
SE_GENI_RX_PACKING_CFG1);
}
+
ret = geni_se_init(uport->membase, msm_port->rx_wm, msm_port->rx_rfr);
if (ret) {
dev_err(uport->dev, "%s: Fail\n", __func__);
@@ -1904,8 +2203,6 @@ static int msm_geni_serial_startup(struct uart_port *uport)
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
IPC_LOG_MSG(msm_port->ipc_log_misc, "%s:\n", __func__);
- scnprintf(msm_port->name, sizeof(msm_port->name), "msm_serial_geni%d",
- uport->line);
msm_port->startup_in_progress = true;
@@ -1934,13 +2231,16 @@ static int msm_geni_serial_startup(struct uart_port *uport)
* before returning to the framework.
*/
mb();
- ret = request_irq(uport->irq, msm_geni_serial_isr, IRQF_TRIGGER_HIGH,
- msm_port->name, uport);
- if (unlikely(ret)) {
- dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n",
- __func__, ret);
- goto exit_startup;
- }
+
+ /* Console usecase requires irq to be in enable state after early
+ * console switch from probe to handle RX data. Hence enable IRQ
+ * from starup and disable it form shutdown APIs for cosnole case.
+ * BT HSUART usecase, IRQ will be enabled from runtime_resume()
+ * and disabled in runtime_suspend to avoid spurious interrupts
+ * after suspend.
+ */
+ if (uart_console(uport))
+ enable_irq(uport->irq);
if (msm_port->wakeup_irq > 0) {
ret = request_irq(msm_port->wakeup_irq, msm_geni_wakeup_isr,
@@ -2054,7 +2354,6 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
unsigned long ser_clk_cfg = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
unsigned long clk_rate;
- unsigned long flags;
unsigned long desired_rate;
unsigned int clk_idx;
int uart_sampling;
@@ -2076,16 +2375,9 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
__func__, ret);
return;
}
- disable_irq(uport->irq);
msm_geni_serial_set_manual_flow(false, port);
}
- /* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
- * and FSM_RESET. This also has a potential race with the dma_map/unmap
- * operations of ISR.
- */
- spin_lock_irqsave(&uport->lock, flags);
msm_geni_serial_stop_rx(uport);
- spin_unlock_irqrestore(&uport->lock, flags);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->cur_baud = baud;
@@ -2205,10 +2497,9 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n",
bits_per_char, stop_bit_len);
exit_set_termios:
- if (!uart_console(uport)) {
+ if (!uart_console(uport))
msm_geni_serial_set_manual_flow(true, port);
- enable_irq(uport->irq);
- }
+
msm_geni_serial_start_rx(uport);
if (!uart_console(uport))
msm_geni_serial_power_off(uport);
@@ -2276,9 +2567,9 @@ static ssize_t xfer_mode_store(struct device *dev,
return size;
msm_geni_serial_power_on(uport);
- spin_lock_irqsave(&uport->lock, flags);
msm_geni_serial_stop_tx(uport);
msm_geni_serial_stop_rx(uport);
+ spin_lock_irqsave(&uport->lock, flags);
port->xfer_mode = xfer_mode;
geni_se_select_mode(uport->membase, port->xfer_mode);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -2362,6 +2653,45 @@ msm_geni_serial_early_console_write(struct console *con, const char *s,
__msm_geni_serial_console_write(&dev->port, s, n);
}
+static void msm_geni_serial_cancel_rx(struct uart_port *uport)
+{
+ int done = 0;
+ int i = 0;
+ unsigned int irq_status;
+ u32 rx_fifo_status;
+ u32 rx_fifo_wc;
+
+ geni_cancel_s_cmd(uport->membase);
+ /* Ensure this goes through before polling. */
+ mb();
+
+ done = msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+ S_CMD_CANCEL_EN, true);
+ if (!done) {
+ geni_abort_s_cmd(uport->membase);
+ /* Ensure this goes through before polling. */
+ mb();
+ msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+ S_CMD_ABORT_EN, false);
+ } else if (msm_geni_serial_poll_bit(uport,
+ SE_GENI_S_IRQ_STATUS, S_RX_FIFO_LAST_EN, true)) {
+ rx_fifo_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_RX_FIFO_STATUS);
+ rx_fifo_wc = rx_fifo_status & RX_FIFO_WC_MSK;
+ for (i = 0; i < rx_fifo_wc; i++)
+ geni_read_reg_nolog(uport->membase,
+ SE_GENI_RX_FIFOn);
+ }
+
+ irq_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_STATUS);
+ geni_write_reg_nolog(irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+
+ if (!done)
+ geni_write_reg(FORCE_DEFAULT, uport->membase,
+ GENI_FORCE_DEFAULT_REG);
+}
+
static int __init
msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
const char *opt)
@@ -2408,6 +2738,7 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
stop_bit = 0;
rx_stale = 0x18;
clk_div = get_clk_div_rate(baud, &clk_rate);
+
if (clk_div <= 0) {
ret = -EINVAL;
goto exit_geni_serial_earlyconsetup;
@@ -2420,10 +2751,17 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
/*
- * Make an unconditional cancel on the main sequencer to reset
- * it else we could end up in data loss scenarios.
+ * Here we need to poll for command done which indicates that
+ * the previous tx transfer is done. And if the command done interrupt
+ * is not getting set, then we need to cancel the command.
*/
- msm_geni_serial_poll_cancel_tx(uport);
+ msm_geni_serial_poll_tx_done(uport);
+
+ /*
+ * Here cancel rx is done in polling mode as there is
+ * no isr support during early console time.
+ */
+ msm_geni_serial_cancel_rx(uport);
/* Only for earlyconsole */
if (IS_ENABLED(CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING)) {
@@ -2442,8 +2780,14 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
SE_UART_TX_TRANS_CFG);
geni_write_reg_nolog(tx_parity_cfg, uport->membase,
SE_UART_TX_PARITY_CFG);
+ geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+ SE_UART_RX_TRANS_CFG);
+ geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+ SE_UART_RX_PARITY_CFG);
geni_write_reg_nolog(bits_per_char, uport->membase,
SE_UART_TX_WORD_LEN);
+ geni_write_reg_nolog(bits_per_char, uport->membase,
+ SE_UART_RX_WORD_LEN);
geni_write_reg_nolog(stop_bit, uport->membase, SE_UART_TX_STOP_BIT_LEN);
geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
@@ -2657,6 +3001,8 @@ static int msm_geni_serial_get_ver_info(struct uart_port *uport)
__func__, msm_port->ver_info.hw_major_ver,
msm_port->ver_info.hw_minor_ver,
msm_port->ver_info.hw_step_ver);
+
+ msm_geni_serial_enable_interrupts(uport);
exit_ver_info:
if (!msm_port->is_console)
se_geni_clks_off(&msm_port->serial_rsc);
@@ -2857,6 +3203,9 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
uport->fifosize =
((dev_port->tx_fifo_depth * dev_port->tx_fifo_width) >> 3);
+ /* Complete signals to handle cancel cmd completion */
+ init_completion(&dev_port->m_cmd_timeout);
+ init_completion(&dev_port->s_cmd_timeout);
uport->irq = platform_get_irq(pdev, 0);
if (uport->irq < 0) {
@@ -2865,6 +3214,17 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
goto exit_geni_serial_probe;
}
+ dev_port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
+ "msm_serial_geni%d", uport->line);
+ irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, uport->irq, msm_geni_serial_isr,
+ IRQF_TRIGGER_HIGH, dev_port->name, uport);
+ if (ret) {
+ dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n",
+ __func__, ret);
+ goto exit_geni_serial_probe;
+ }
+
uport->private_data = (void *)drv;
platform_set_drvdata(pdev, dev_port);
if (is_console) {
@@ -2887,6 +3247,7 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
line, uport->fifosize, is_console);
+
device_create_file(uport->dev, &dev_attr_loopback);
device_create_file(uport->dev, &dev_attr_xfer_mode);
device_create_file(uport->dev, &dev_attr_ver_info);
@@ -2931,21 +3292,24 @@ static int msm_geni_serial_runtime_suspend(struct device *dev)
wait_for_transfers_inflight(&port->uport);
/*
- * Disable Interrupt
* Manual RFR On.
* Stop Rx.
+ * Disable Interrupt
* Resources off
*/
- disable_irq(port->uport.irq);
stop_rx_sequencer(&port->uport);
geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS);
+
if ((geni_status & M_GENI_CMD_ACTIVE))
stop_tx_sequencer(&port->uport);
+
+ disable_irq(port->uport.irq);
ret = se_geni_resources_off(&port->serial_rsc);
if (ret) {
dev_err(dev, "%s: Error ret %d\n", __func__, ret);
goto exit_runtime_suspend;
}
+
if (port->wakeup_irq > 0) {
port->edge_count = 0;
enable_irq(port->wakeup_irq);
@@ -2985,12 +3349,9 @@ static int msm_geni_serial_runtime_resume(struct device *dev)
start_rx_sequencer(&port->uport);
/* Ensure that the Rx is running before enabling interrupts */
mb();
- /*
- * Do not enable irq before interrupt registration which happens
- * at port open time.
- */
- if (pm_runtime_enabled(dev) && port->xfer_mode != INVALID)
- enable_irq(port->uport.irq);
+ /* Enable interrupt */
+ enable_irq(port->uport.irq);
+
IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__);
exit_runtime_resume:
return ret;
@@ -3035,7 +3396,6 @@ static int msm_geni_serial_sys_resume_noirq(struct device *dev)
console_suspend_enabled && uport->suspended) {
uart_resume_port((struct uart_driver *)uport->private_data,
uport);
- disable_irq(uport->irq);
}
return 0;
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ff9eb5322b21..71596216ea18 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1124,6 +1124,7 @@ struct dwc3_scratchpad_array {
* @bh_completion_time: time taken for taklet completion
* @bh_handled_evt_cnt: no. of events handled by tasklet per interrupt
* @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
+ * @last_run_stop: timestamp denoting the last run_stop update
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1358,6 +1359,7 @@ struct dwc3 {
u32 gen2_tx_de_emph1;
u32 gen2_tx_de_emph2;
u32 gen2_tx_de_emph3;
+ ktime_t last_run_stop;
};
#define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e2fc27dec4b8..de3a9326ac1a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2123,9 +2123,17 @@ done:
/* phy sync delay as per data book */
msleep(50);
+ /*
+ * Soft reset clears the block on the doorbell,
+ * set it back to prevent unwanted writes to the doorbell.
+ */
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_CLEAR_DB, 0);
+
return 0;
}
+#define MIN_RUN_STOP_DELAY_MS 50
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg, reg1;
@@ -2234,6 +2242,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
+ ktime_t diff;
is_on = !!is_on;
dwc->softconnect = is_on;
@@ -2252,6 +2261,15 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dbg_event(0xFF, "Pullup gsync",
atomic_read(&dwc->dev->power.usage_count));
+ diff = ktime_sub(ktime_get(), dwc->last_run_stop);
+ if (ktime_to_ms(diff) < MIN_RUN_STOP_DELAY_MS) {
+ dbg_event(0xFF, "waitBefRun_Stop",
+ MIN_RUN_STOP_DELAY_MS - ktime_to_ms(diff));
+ msleep(MIN_RUN_STOP_DELAY_MS - ktime_to_ms(diff));
+ }
+
+ dwc->last_run_stop = ktime_get();
+
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
@@ -2274,6 +2292,9 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
/* prevent pending bh to run later */
flush_work(&dwc->bh_work);
+ if (is_on)
+ dwc3_device_core_soft_reset(dwc);
+
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->ep0state != EP0_SETUP_PHASE)
dbg_event(0xFF, "EP0 is not in SETUP phase\n", 0);
@@ -2316,8 +2337,14 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ /*
+ * Enable SUSPENDEVENT(BIT:6) for version 230A and above
+ * else enable USB Link change event (BIT:3) for older version
+ */
if (dwc->revision < DWC3_REVISION_230A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
+ else
+ reg |= DWC3_DEVTEN_EOPFEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -3371,13 +3398,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
- /* Enable SUSPENDEVENT(BIT:6) for version 230A and above */
- if (dwc->revision >= DWC3_REVISION_230A) {
- reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
- reg |= DWC3_DEVTEN_EOPFEN;
- dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
- }
-
/* Reset the retry on erratic error event count */
dwc->retries_on_error = 0;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index f916f8753540..7f9827c42621 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -139,6 +139,7 @@ struct ffs_epfile {
struct ffs_data *ffs;
struct ffs_ep *ep; /* P: ffs->eps_lock */
+ atomic_t opened;
struct dentry *dentry;
@@ -205,7 +206,7 @@ struct ffs_epfile {
unsigned char in; /* P: ffs->eps_lock */
unsigned char isoc; /* P: ffs->eps_lock */
- unsigned char _pad;
+ bool invalid;
};
struct ffs_buffer {
@@ -956,6 +957,16 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
+ /*
+ * epfile->invalid is set when EPs are disabled. Userspace
+ * might have stale threads continuing to do I/O and may be
+ * unaware of that especially if we block here. Instead return
+ * an error immediately here and don't allow any more I/O
+ * until the epfile is reopened.
+ */
+ if (epfile->invalid)
+ return -ENODEV;
+
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
@@ -1117,6 +1128,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
+ io_data->req = NULL;
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
@@ -1151,15 +1163,16 @@ ffs_epfile_open(struct inode *inode, struct file *file)
ENTER();
- ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
- epfile->ffs->state, epfile->ffs->setup_state,
- epfile->ffs->flags);
+ ffs_log("%s: state %d setup_state %d flag %lu opened %u",
+ epfile->name, epfile->ffs->state, epfile->ffs->setup_state,
+ epfile->ffs->flags, atomic_read(&epfile->opened));
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
+ atomic_inc(&epfile->opened);
return 0;
}
@@ -1299,9 +1312,12 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
__ffs_epfile_read_buffer_free(epfile);
- ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
- epfile->ffs->state, epfile->ffs->setup_state,
- epfile->ffs->flags);
+ ffs_log("%s: state %d setup_state %d flag %lu opened %u",
+ epfile->name, epfile->ffs->state, epfile->ffs->setup_state,
+ epfile->ffs->flags, atomic_read(&epfile->opened));
+
+ if (atomic_dec_and_test(&epfile->opened))
+ epfile->invalid = false;
ffs_data_closed(epfile->ffs);
@@ -1331,6 +1347,10 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
+ /* don't allow any I/O until file is reopened */
+ if (epfile->invalid)
+ return -ENODEV;
+
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
@@ -1992,6 +2012,8 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ffs_epfiles_destroy(epfiles, i - 1);
return -ENOMEM;
}
+
+ atomic_set(&epfile->opened, 0);
}
ffs->epfiles = epfiles;
@@ -2039,6 +2061,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
++ep;
if (epfile) {
+ epfile->invalid = true; /* until file is reopened */
epfile->ep = NULL;
__ffs_epfile_read_buffer_free(epfile);
++epfile;
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index bb146d7534ca..d414a8a6a894 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -30,30 +30,38 @@
#define POST_VCO_DIV3_5_0_CLK 19
#define CPHY_PCLK_SRC_MUX_0_CLK 20
#define CPHY_PCLK_SRC_0_CLK 21
+#define SHADOW_CPHY_BYTECLK_SRC_0_CLK 22
+#define SHADOW_POST_VCO_DIV3_5_0_CLK 23
+#define SHADOW_CPHY_PCLK_SRC_MUX_0_CLK 24
+#define SHADOW_CPHY_PCLK_SRC_0_CLK 25
-#define VCO_CLK_1 22
-#define PLL_OUT_DIV_1_CLK 23
-#define BITCLK_SRC_1_CLK 24
-#define BYTECLK_SRC_1_CLK 25
-#define POST_BIT_DIV_1_CLK 26
-#define POST_VCO_DIV_1_CLK 27
-#define BYTECLK_MUX_1_CLK 28
-#define PCLK_SRC_MUX_1_CLK 29
-#define PCLK_SRC_1_CLK 30
-#define PCLK_MUX_1_CLK 31
-#define SHADOW_VCO_CLK_1 32
-#define SHADOW_PLL_OUT_DIV_1_CLK 33
-#define SHADOW_BITCLK_SRC_1_CLK 34
-#define SHADOW_BYTECLK_SRC_1_CLK 35
-#define SHADOW_POST_BIT_DIV_1_CLK 36
-#define SHADOW_POST_VCO_DIV_1_CLK 37
-#define SHADOW_PCLK_SRC_MUX_1_CLK 38
-#define SHADOW_PCLK_SRC_1_CLK 39
+#define VCO_CLK_1 26
+#define PLL_OUT_DIV_1_CLK 27
+#define BITCLK_SRC_1_CLK 28
+#define BYTECLK_SRC_1_CLK 29
+#define POST_BIT_DIV_1_CLK 30
+#define POST_VCO_DIV_1_CLK 31
+#define BYTECLK_MUX_1_CLK 32
+#define PCLK_SRC_MUX_1_CLK 33
+#define PCLK_SRC_1_CLK 34
+#define PCLK_MUX_1_CLK 35
+#define SHADOW_VCO_CLK_1 36
+#define SHADOW_PLL_OUT_DIV_1_CLK 37
+#define SHADOW_BITCLK_SRC_1_CLK 38
+#define SHADOW_BYTECLK_SRC_1_CLK 39
+#define SHADOW_POST_BIT_DIV_1_CLK 40
+#define SHADOW_POST_VCO_DIV_1_CLK 41
+#define SHADOW_PCLK_SRC_MUX_1_CLK 42
+#define SHADOW_PCLK_SRC_1_CLK 43
/* CPHY clocks for DSI-1 PLL */
-#define CPHY_BYTECLK_SRC_1_CLK 40
-#define POST_VCO_DIV3_5_1_CLK 41
-#define CPHY_PCLK_SRC_MUX_1_CLK 42
-#define CPHY_PCLK_SRC_1_CLK 43
+#define CPHY_BYTECLK_SRC_1_CLK 44
+#define POST_VCO_DIV3_5_1_CLK 45
+#define CPHY_PCLK_SRC_MUX_1_CLK 46
+#define CPHY_PCLK_SRC_1_CLK 47
+#define SHADOW_CPHY_BYTECLK_SRC_1_CLK 48
+#define SHADOW_POST_VCO_DIV3_5_1_CLK 49
+#define SHADOW_CPHY_PCLK_SRC_MUX_1_CLK 50
+#define SHADOW_CPHY_PCLK_SRC_1_CLK 51
/* DP PLL clocks */
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 7ce6a5273c51..547beaf9fb02 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -38,6 +38,7 @@ enum MHI_CB {
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
+ MHI_CB_FW_FALLBACK_IMG,
};
/**
@@ -282,6 +283,7 @@ struct mhi_controller {
/* fw images */
const char *fw_image;
+ const char *fw_image_fallback;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 1fd68e38cf2f..3e2888af87c3 100644..100755
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -330,6 +330,7 @@ struct se_geni_rsc {
#define TX_EOT (BIT(1))
#define TX_SBE (BIT(2))
#define TX_RESET_DONE (BIT(3))
+#define TX_GENI_CANCEL_IRQ (BIT(14))
/* SE_DMA_RX_IRQ_STAT Register fields */
#define RX_DMA_DONE (BIT(0))
@@ -338,9 +339,15 @@ struct se_geni_rsc {
#define RX_RESET_DONE (BIT(3))
#define RX_FLUSH_DONE (BIT(4))
#define RX_GENI_GP_IRQ (GENMASK(10, 5))
-#define RX_GENI_CANCEL_IRQ (BIT(11))
+#define RX_GENI_CANCEL_IRQ (BIT(14))
#define RX_GENI_GP_IRQ_EXT (GENMASK(13, 12))
+/* DMA DEBUG Register fields */
+#define DMA_TX_ACTIVE (BIT(0))
+#define DMA_RX_ACTIVE (BIT(1))
+#define DMA_TX_STATE (GENMASK(7, 4))
+#define DMA_RX_STATE (GENMASK(11, 8))
+
#define DEFAULT_BUS_WIDTH (4)
#define DEFAULT_SE_CLK (19200000)
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f6050074dbb5..7369a223aa7d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1003,6 +1003,9 @@ enum v4l2_mpeg_vidc_video_roi_type {
V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE = 2,
};
+#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 133)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 727a1b42768f..d530da00b9af 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -755,6 +755,28 @@ struct snd_soc_component *soc_find_component(
EXPORT_SYMBOL(soc_find_component);
/**
+ * soc_find_component_locked: soc_find_component with client lock acquired
+ *
+ * @of_node: of_node of the component to query.
+ * @name: name of the component to query.
+ *
+ * function to find out if a component is already registered with ASoC core.
+ *
+ * Returns component handle for success, else NULL error.
+ */
+struct snd_soc_component *soc_find_component_locked(
+ const struct device_node *of_node, const char *name)
+{
+ struct snd_soc_component *component = NULL;
+
+ mutex_lock(&client_mutex);
+ component = soc_find_component(of_node, name);
+ mutex_unlock(&client_mutex);
+ return component;
+}
+EXPORT_SYMBOL(soc_find_component_locked);
+
+/**
* snd_soc_find_dai - Find a registered DAI
*
* @dlc: name of the DAI or the DAI driver and optional component info to match
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5b38bcae2691..6297d720a498 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -894,10 +894,13 @@ static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
ret = wait_event_interruptible_timeout(dev->disconnect_wq,
!atomic_read(&dev->in_use),
msecs_to_jiffies(DEV_RELEASE_WAIT_TIMEOUT));
- if (!ret)
+ if (!ret) {
uaudio_err("timeout while waiting for dev_release\n");
- else if (ret < 0)
+ atomic_set(&dev->in_use, 0);
+ } else if (ret < 0) {
uaudio_err("failed with ret %d\n", ret);
+ atomic_set(&dev->in_use, 0);
+ }
mutex_lock(&chip->dev_lock);
}
@@ -1156,22 +1159,20 @@ static void handle_uaudio_stream_req(struct qmi_handle *handle,
mutex_unlock(&chip->dev_lock);
response:
- if (!req_msg->enable && ret != -EINVAL) {
- if (ret != -ENODEV) {
- if (info_idx >= 0) {
- mutex_lock(&chip->dev_lock);
- info = &uadev[pcm_card_num].info[info_idx];
- uaudio_dev_intf_cleanup(
- uadev[pcm_card_num].udev,
- info);
- uaudio_dbg("release resources: intf# %d card# %d\n",
- subs->interface, pcm_card_num);
- mutex_unlock(&chip->dev_lock);
- }
+ if (!req_msg->enable && (ret != -EINVAL || ret != -ENODEV)) {
+ mutex_lock(&chip->dev_lock);
+ if (info_idx >= 0) {
+ info = &uadev[pcm_card_num].info[info_idx];
+ uaudio_dev_intf_cleanup(
+ uadev[pcm_card_num].udev,
+ info);
+ uaudio_dbg("release resources: intf# %d card# %d\n",
+ subs->interface, pcm_card_num);
}
if (atomic_read(&uadev[pcm_card_num].in_use))
kref_put(&uadev[pcm_card_num].kref,
uaudio_dev_release);
+ mutex_unlock(&chip->dev_lock);
}
resp.usb_token = req_msg->usb_token;