aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/ccree/ssi_request_mgr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/ccree/ssi_request_mgr.c')
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c144
1 files changed, 72 insertions, 72 deletions
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 8611adf3bb2e..48c2450d65c6 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@@ -42,8 +42,8 @@
#define MONITOR_CNTR_BIT 0
/**
- * Monitor descriptor.
- * Used to measure CC performance.
+ * Monitor descriptor.
+ * Used to measure CC performance.
*/
#define INIT_CC_MONITOR_DESC(desc_p) \
do { \
@@ -51,7 +51,7 @@ do { \
HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
} while (0)
-/**
+/**
* Try adding monitor descriptor BEFORE enqueuing sequence.
*/
#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \
@@ -65,14 +65,14 @@ do { \
} while (0)
/**
- * If CC_CYCLE_DESC_HEAD was successfully added:
- * 1. Add memory barrier descriptor to ensure last AXI transaction.
+ * If CC_CYCLE_DESC_HEAD was successfully added:
+ * 1. Add memory barrier descriptor to ensure last AXI transaction.
* 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence.
*/
#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \
do { \
if ((is_monitored) == true) { \
- HwDesc_s barrier_desc; \
+ struct cc_hw_desc barrier_desc; \
HW_DESC_INIT(&barrier_desc); \
HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \
HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \
@@ -82,12 +82,12 @@ do { \
} while (0)
/**
- * Try reading CC monitor counter value upon sequence complete.
+ * Try reading CC monitor counter value upon sequence complete.
* Can only succeed if the lock_p is taken by the owner of the given request.
*/
#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
do { \
- uint32_t elapsed_cycles; \
+ u32 elapsed_cycles; \
if ((is_monitored) == true) { \
elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \
clear_bit(MONITOR_CNTR_BIT, (lock_p)); \
@@ -111,15 +111,15 @@ struct ssi_request_mgr_handle {
unsigned int min_free_hw_slots;
unsigned int max_used_sw_slots;
struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
- uint32_t req_queue_head;
- uint32_t req_queue_tail;
- uint32_t axi_completed;
- uint32_t q_free_slots;
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
spinlock_t hw_lock;
- HwDesc_s compl_desc;
- uint8_t *dummy_comp_buff;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
dma_addr_t dummy_comp_buff_dma;
- HwDesc_s monitor_desc;
+ struct cc_hw_desc monitor_desc;
volatile unsigned long monitor_lock;
#ifdef COMP_IN_WQ
struct workqueue_struct *workq;
@@ -147,7 +147,7 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
if (req_mgr_h->dummy_comp_buff_dma != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma);
dma_free_coherent(&drvdata->plat_dev->dev,
- sizeof(uint32_t), req_mgr_h->dummy_comp_buff,
+ sizeof(u32), req_mgr_h->dummy_comp_buff,
req_mgr_h->dummy_comp_buff_dma);
}
@@ -170,7 +170,7 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
int request_mgr_init(struct ssi_drvdata *drvdata)
{
#ifdef CC_CYCLE_COUNT
- HwDesc_s monitor_desc[2];
+ struct cc_hw_desc monitor_desc[2];
struct ssi_crypto_req monitor_req = {0};
#endif
struct ssi_request_mgr_handle *req_mgr_h;
@@ -213,22 +213,22 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
/* Allocate DMA word for "dummy" completion descriptor use */
req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
- sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+ sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
if (!req_mgr_h->dummy_comp_buff) {
SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
- "buffer\n", sizeof(uint32_t));
+ "buffer\n", sizeof(u32));
rc = -ENOMEM;
goto req_mgr_init_err;
}
SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma,
- sizeof(uint32_t));
+ sizeof(u32));
/* Init. "dummy" completion descriptor */
HW_DESC_INIT(&req_mgr_h->compl_desc);
- HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t));
+ HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(u32));
HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc,
req_mgr_h->dummy_comp_buff_dma,
- sizeof(uint32_t), NS_BIT, 1);
+ sizeof(u32), NS_BIT, 1);
HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS);
HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc);
@@ -259,7 +259,7 @@ req_mgr_init_err:
static inline void enqueue_seq(
void __iomem *cc_base,
- HwDesc_s seq[], unsigned int seq_len)
+ struct cc_hw_desc seq[], unsigned int seq_len)
{
int i;
@@ -279,10 +279,10 @@ static inline void enqueue_seq(
}
/*!
- * Completion will take place if and only if user requested completion
- * by setting "is_dout = 0" in send_request().
- *
- * \param dev
+ * Completion will take place if and only if user requested completion
+ * by setting "is_dout = 0" in send_request().
+ *
+ * \param dev
* \param dx_compl_h The completion event to signal
*/
static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
@@ -298,14 +298,14 @@ static inline int request_mgr_queues_status_check(
unsigned int total_seq_len)
{
unsigned long poll_queue;
-
- /* SW queue is checked only once as it will not
- be chaned during the poll becasue the spinlock_bh
+
+ /* SW queue is checked only once as it will not
+ be chaned during the poll becasue the spinlock_bh
is held by the thread */
if (unlikely(((req_mgr_h->req_queue_head + 1) &
- (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ (MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
- SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@@ -315,11 +315,11 @@ static inline int request_mgr_queues_status_check(
}
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) {
- req_mgr_h->q_free_slots =
+ req_mgr_h->q_free_slots =
CC_HAL_READ_REGISTER(
CC_REG_OFFSET(CRY_KERNEL,
DSCRPTR_QUEUE_CONTENT));
- if (unlikely(req_mgr_h->q_free_slots <
+ if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
}
@@ -329,12 +329,12 @@ static inline int request_mgr_queues_status_check(
return 0;
}
- SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
- "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
req_mgr_h->req_queue_head,
MAX_REQUEST_QUEUE_SIZE,
req_mgr_h->q_free_slots,
@@ -344,27 +344,27 @@ static inline int request_mgr_queues_status_check(
/*!
* Enqueue caller request to crypto hardware.
- *
- * \param drvdata
+ *
+ * \param drvdata
* \param ssi_req The request to enqueue
* \param desc The crypto sequence
* \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
+ * \param is_dout If "true": completion is handled by the caller
* If "false": this function adds a dummy descriptor completion
* and waits upon completion signal.
- *
+ *
* \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
*/
int send_request(
struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
- HwDesc_s *desc, unsigned int len, bool is_dout)
+ struct cc_hw_desc *desc, unsigned int len, bool is_dout)
{
void __iomem *cc_base = drvdata->cc_base;
struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
unsigned int iv_seq_len = 0;
unsigned int total_seq_len = len; /*initial sequence length*/
- HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
+ struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
int rc;
unsigned int max_required_seq_len = (total_seq_len +
((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
@@ -384,7 +384,7 @@ int send_request(
spin_lock_bh(&req_mgr_h->hw_lock);
/* Check if there is enough place in the SW/HW queues
- in case iv gen add the max size and in case of no dout add 1
+ in case iv gen add the max size and in case of no dout add 1
for the internal completion descriptor */
rc = request_mgr_queues_status_check(req_mgr_h,
cc_base,
@@ -396,7 +396,7 @@ int send_request(
spin_unlock_bh(&req_mgr_h->hw_lock);
if (rc != -EAGAIN) {
- /* Any error other than HW queue full
+ /* Any error other than HW queue full
(SW queue is full) */
#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
@@ -440,12 +440,12 @@ int send_request(
total_seq_len += iv_seq_len;
}
-
+
used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1));
if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) {
req_mgr_h->max_used_sw_slots = used_sw_slots;
}
-
+
CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc,
&req_mgr_h->monitor_lock, &ssi_req->is_monitored_p);
@@ -494,15 +494,15 @@ int send_request(
* Enqueue caller request to crypto hardware during init process.
* assume this function is not called in middle of a flow,
* since we set QUEUE_LAST_IND flag in the last descriptor.
- *
- * \param drvdata
+ *
+ * \param drvdata
* \param desc The crypto sequence
* \param len The crypto sequence length
- *
+ *
* \return int Returns "0" upon success
*/
int send_request_init(
- struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len)
+ struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len)
{
void __iomem *cc_base = drvdata->cc_base;
struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -529,7 +529,7 @@ int send_request_init(
void complete_request(struct ssi_drvdata *drvdata)
{
- struct ssi_request_mgr_handle *request_mgr_handle =
+ struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#ifdef COMP_IN_WQ
queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
@@ -552,7 +552,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_req *ssi_req;
struct platform_device *plat_dev = drvdata->plat_dev;
- struct ssi_request_mgr_handle * request_mgr_handle =
+ struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
int rc = 0;
@@ -580,7 +580,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
#ifdef COMPLETION_DELAY
/* Delay */
{
- uint32_t axi_err;
+ u32 axi_err;
int i;
SSI_LOG_INFO("Delay\n");
for (i=0;i<1000000;i++) {
@@ -611,10 +611,10 @@ static void comp_handler(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
void __iomem *cc_base = drvdata->cc_base;
- struct ssi_request_mgr_handle * request_mgr_handle =
+ struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
- uint32_t irq;
+ u32 irq;
DECL_CYCLE_COUNT_RESOURCES;
@@ -625,38 +625,38 @@ static void comp_handler(unsigned long devarg)
if (irq & SSI_COMP_IRQ_MASK) {
/* To avoid the interrupt from firing as we unmask it, we clear it now */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
-
+
/* Avoid race with above clear: Test completion counter once more */
- request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
-
+
/* ISR-to-Tasklet latency */
if (request_mgr_handle->axi_completed) {
/* Only if actually reflects ISR-to-completion-handling latency, i.e.,
not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */
END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1);
}
-
+
while (request_mgr_handle->axi_completed) {
do {
proc_completions(drvdata);
/* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0.
The following assignment was changed to = (previously was +=) to conform KW restrictions. */
- request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
} while (request_mgr_handle->axi_completed > 0);
-
+
/* To avoid the interrupt from firing as we unmask it, we clear it now */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
-
+
/* Avoid race with above clear: Test completion counter once more */
- request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
}
-
+
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
CC_HAL_READ_REGISTER(
CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2);
@@ -684,12 +684,12 @@ only verify that the queue can be suspended.
*/
int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
{
- struct ssi_request_mgr_handle * request_mgr_handle =
+ struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
-
+
/* lock the send_request */
spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
+ if (request_mgr_handle->req_queue_head !=
request_mgr_handle->req_queue_tail) {
spin_unlock_bh(&request_mgr_handle->hw_lock);
return -EBUSY;
@@ -702,7 +702,7 @@ int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
{
- struct ssi_request_mgr_handle * request_mgr_handle =
+ struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
return request_mgr_handle->is_runtime_suspended;