aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2017-07-24 13:43:53 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2017-07-24 13:43:53 +1000
commitfcb562096b909cad5f8f080973530bdfbebdc9c1 (patch)
tree662b00a232d77adb43885671af43c340b4e69523
parent1d6a0b1033c586a2dca1ddd5096630cc6870dc74 (diff)
parenta29e24907208a7e05e9bd3f990d3fc5ea49667de (diff)
Merge remote-tracking branch 'staging/staging-next'
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ion/ion.h12
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/ccree/Kconfig9
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h3
-rw-r--r--drivers/staging/ccree/ssi_aead.c193
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c433
-rw-r--r--drivers/staging/ccree/ssi_cipher.c125
-rw-r--r--drivers/staging/ccree/ssi_driver.c59
-rw-r--r--drivers/staging/ccree/ssi_driver.h1
-rw-r--r--drivers/staging/ccree/ssi_fips.c119
-rw-r--r--drivers/staging/ccree/ssi_fips.h58
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h306
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c92
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1649
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c357
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h67
-rw-r--r--drivers/staging/ccree/ssi_hash.c150
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c5
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c48
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c80
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c11
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c13
-rw-r--r--drivers/staging/gs_fpgaboot/io.c4
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h3
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h46
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h129
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h11
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts53
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt62
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt274
-rw-r--r--drivers/staging/pi433/Kconfig16
-rw-r--r--drivers/staging/pi433/Makefile3
-rw-r--r--drivers/staging/pi433/TODO5
-rw-r--r--drivers/staging/pi433/pi433_if.c1314
-rw-r--r--drivers/staging/pi433/pi433_if.h152
-rw-r--r--drivers/staging/pi433/rf69.c982
-rw-r--r--drivers/staging/pi433/rf69.h82
-rw-r--r--drivers/staging/pi433/rf69_enum.h201
-rw-r--r--drivers/staging/pi433/rf69_registers.h489
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c78
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c23
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h11
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c2
-rw-r--r--drivers/staging/typec/tcpm.c3
-rw-r--r--drivers/staging/unisys/include/channel.h338
-rw-r--r--drivers/staging/unisys/include/iochannel.h524
-rw-r--r--drivers/staging/unisys/include/visorbus.h6
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h706
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h93
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c369
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c129
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c106
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h57
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c402
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h65
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c84
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c213
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c2
-rw-r--r--drivers/staging/vt6655/card.c6
-rw-r--r--drivers/staging/vt6655/mac.c2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/firmware.c2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/vt6656/power.c6
-rw-r--r--drivers/staging/vt6656/rf.c6
-rw-r--r--drivers/staging/vt6656/usbpipe.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c65
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h12
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c17
94 files changed, 6213 insertions, 4857 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index ef28a1cb64ae..e97d72e3bc40 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,4 +112,6 @@ source "drivers/staging/typec/Kconfig"
source "drivers/staging/vboxvideo/Kconfig"
+source "drivers/staging/pi433/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2918580bdb9e..993ed0c1556c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
+obj-$(CONFIG_PI433) += pi433/
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index fa9ed81ab972..621e5f7ceacb 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -135,7 +135,7 @@ struct ion_heap_ops {
/**
* heap flags - flags between the heaps and core ion code
*/
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
/**
* private flags - flags internal to ion
@@ -146,7 +146,7 @@ struct ion_heap_ops {
* any buffer storage that came from the system allocator will be
* returned to the system allocator.
*/
-#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
/**
* struct ion_heap - represents a heap in the system
@@ -226,8 +226,8 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
int ion_alloc(size_t len,
- unsigned int heap_id_mask,
- unsigned int flags);
+ unsigned int heap_id_mask,
+ unsigned int flags);
/**
* ion_heap_init_shrinker
@@ -291,7 +291,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
* flag.
*/
size_t ion_heap_freelist_shrink(struct ion_heap *heap,
- size_t size);
+ size_t size);
/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
@@ -352,7 +352,7 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
* returns the number of items freed in pages
*/
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan);
+ int nr_to_scan);
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index a0949bc0dcf4..dd5545d9990a 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -31,7 +31,6 @@ struct ion_cma_heap {
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len,
@@ -46,7 +45,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (!pages)
return -ENOMEM;
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
goto err;
@@ -106,7 +105,7 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
return &cma_heap->heap;
}
-int __ion_add_cma_heaps(struct cma *cma, void *data)
+static int __ion_add_cma_heaps(struct cma *cma, void *data)
{
struct ion_heap *heap;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 5964bf21fd80..4dc5d7a589c2 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -98,7 +98,6 @@ static void free_buffer_page(struct ion_system_heap *heap,
ion_page_pool_free(pool, page);
}
-
static struct page *alloc_largest_available(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long size,
@@ -256,7 +255,6 @@ static struct ion_heap_ops system_heap_ops = {
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
-
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index 36a87c686a2a..0b3092ba2fcb 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -23,12 +23,3 @@ config CRYPTO_DEV_CCREE
Choose this if you wish to use hardware acceleration of
cryptographic operations on the system REE.
If unsure say Y.
-
-config CCREE_FIPS_SUPPORT
- bool "Turn on CryptoCell 7XX REE FIPS mode support"
- depends on CRYPTO_DEV_CCREE
- default n
- help
- Say 'Y' to enable support for FIPS compliant mode by the
- CCREE driver.
- If unsure say N.
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index 318c2b39acf6..ae702f3b5369 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
-ccree-$(CCREE_FIPS_SUPPORT) += ssi_fips.o ssi_fips_ll.o ssi_fips_ext.o ssi_fips_local.o
+ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
index e6b8cea3f88d..2ae0f655e7a0 100644
--- a/drivers/staging/ccree/cc_hw_queue_defs.h
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -27,7 +27,8 @@
******************************************************************************/
#define HW_DESC_SIZE_WORDS 6
-#define HW_QUEUE_SLOTS_MAX 15 /* Max. available slots in HW queue */
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX 15
#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 1fc0b05ea0d5..ea29b8a1a71d 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -36,7 +36,6 @@
#include "ssi_hash.h"
#include "ssi_sysfs.h"
#include "ssi_sram_mgr.h"
-#include "ssi_fips_local.h"
#define template_aead template_u.aead
@@ -57,22 +56,26 @@ struct ssi_aead_handle {
struct list_head aead_list;
};
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
struct ssi_aead_ctx {
struct ssi_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
union {
- struct {
- u8 *padded_authkey;
- u8 *ipad_opad; /* IPAD, OPAD*/
- dma_addr_t padded_authkey_dma_addr;
- dma_addr_t ipad_opad_dma_addr;
- } hmac;
- struct {
- u8 *xcbc_keys; /* K1,K2,K3 */
- dma_addr_t xcbc_keys_dma_addr;
- } xcbc;
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
} auth_state;
unsigned int enc_keylen;
unsigned int auth_keylen;
@@ -93,46 +96,50 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
+ crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
dev = &ctx->drvdata->plat_dev->dev;
/* Unmap enckey buffer */
if (ctx->enckey) {
dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
- SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->enckey_dma_addr);
+ SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
- if (ctx->auth_state.xcbc.xcbc_keys) {
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
- ctx->auth_state.xcbc.xcbc_keys,
- ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
}
- SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
- ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
- ctx->auth_state.xcbc.xcbc_keys = NULL;
+ SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
- if (ctx->auth_state.hmac.ipad_opad) {
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
- ctx->auth_state.hmac.ipad_opad,
- ctx->auth_state.hmac.ipad_opad_dma_addr);
- SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
- ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
- ctx->auth_state.hmac.ipad_opad = NULL;
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
}
- if (ctx->auth_state.hmac.padded_authkey) {
+ if (hmac->padded_authkey) {
dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
- ctx->auth_state.hmac.padded_authkey,
- ctx->auth_state.hmac.padded_authkey_dma_addr);
- SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
- ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
- ctx->auth_state.hmac.padded_authkey = NULL;
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
}
}
}
@@ -144,9 +151,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
-
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
ctx->cipher_mode = ssi_alg->cipher_mode;
@@ -168,31 +173,42 @@ static int ssi_aead_init(struct crypto_aead *tfm)
/* Set default authlen value */
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
/* (and temporary for user key - up to 256b) */
- ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
- CC_AES_128_BIT_KEY_SIZE * 3,
- &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.xcbc.xcbc_keys) {
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
goto init_failed;
}
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
/* Allocate dma-coherent buffer for IPAD + OPAD */
- ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
- 2 * MAX_HMAC_DIGEST_SIZE,
- &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.ipad_opad) {
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
goto init_failed;
}
+
SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
- ctx->auth_state.hmac.ipad_opad);
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
- ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
- MAX_HMAC_BLOCK_SIZE,
- &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.padded_authkey) {
+ if (!hmac->padded_authkey) {
SSI_LOG_ERR("failed to allocate padded_authkey\n");
goto init_failed;
}
@@ -236,8 +252,8 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset,
- areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+ areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
+ areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
@@ -292,12 +308,13 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
{
- unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
int idx = 0;
int i;
@@ -325,7 +342,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], hash_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
@@ -334,7 +351,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Perform HASH update */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- ctx->auth_state.hmac.padded_authkey_dma_addr,
+ hmac->padded_authkey_dma_addr,
SHA256_BLOCK_SIZE, NS_BIT);
set_cipher_mode(&desc[idx], hash_mode);
set_xor_active(&desc[idx]);
@@ -345,8 +362,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_dlli(&desc[idx],
- (ctx->auth_state.hmac.ipad_opad_dma_addr +
- digest_ofs), digest_size, NS_BIT, 0);
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
@@ -538,7 +555,6 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
@@ -654,7 +670,6 @@ static int ssi_aead_setauthsize(
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* Unsupported auth. sizes */
if ((authsize == 0) ||
(authsize > crypto_aead_maxauthsize(authenc))) {
@@ -762,11 +777,11 @@ ssi_aead_process_authenc_data_desc(
{
struct scatterlist *cipher =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstSgl : areq_ctx->srcSgl;
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstOffset : areq_ctx->srcOffset;
+ areq_ctx->dst_offset : areq_ctx->src_offset;
SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -828,11 +843,11 @@ ssi_aead_process_cipher_data_desc(
SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- (sg_dma_address(areq_ctx->srcSgl) +
- areq_ctx->srcOffset), areq_ctx->cryptlen, NS_BIT);
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
set_dout_dlli(&desc[idx],
- (sg_dma_address(areq_ctx->dstSgl) +
- areq_ctx->dstOffset),
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
areq_ctx->cryptlen, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
@@ -1365,27 +1380,27 @@ data_size_err:
}
#if SSI_CC_HAS_AES_CCM
-static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
{
unsigned int len = 0;
- if (headerSize == 0)
+ if (header_size == 0)
return 0;
- if (headerSize < ((1UL << 16) - (1UL << 8))) {
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
len = 2;
- pA0Buff[0] = (headerSize >> 8) & 0xFF;
- pA0Buff[1] = headerSize & 0xFF;
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
} else {
len = 6;
- pA0Buff[0] = 0xFF;
- pA0Buff[1] = 0xFE;
- pA0Buff[2] = (headerSize >> 24) & 0xFF;
- pA0Buff[3] = (headerSize >> 16) & 0xFF;
- pA0Buff[4] = (headerSize >> 8) & 0xFF;
- pA0Buff[5] = headerSize & 0xFF;
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
}
return len;
@@ -1557,7 +1572,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
- if (2 > l || l > 8) {
+ if (l < 2 || l > 8) {
SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
@@ -1848,8 +1863,9 @@ static inline void ssi_aead_dump_gcm(
SSI_LOG_DEBUG("%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
+ SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
+ req->assoclen, req_ctx->cryptlen);
if (ctx->enckey)
dump_byte_array("mac key", ctx->enckey, 16);
@@ -1864,7 +1880,7 @@ static inline void ssi_aead_dump_gcm(
dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
- dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
+ dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
if (req->src && req->cryptlen)
dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
@@ -1886,7 +1902,7 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize);
+ SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1903,16 +1919,16 @@ static int config_gcm_context(struct aead_request *req)
__be64 temp64;
temp64 = cpu_to_be64(req->assoclen * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
} else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
__be64 temp64;
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
}
return 0;
@@ -1946,7 +1962,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
@@ -2198,7 +2213,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2216,7 +2231,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
index 39cc633a3ffa..e85bcd917e7b 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -69,8 +69,8 @@ struct aead_req_ctx {
u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
struct {
- u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
- u8 lenC[GCM_BLOCK_LEN_SIZE];
+ u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ u8 len_c[GCM_BLOCK_LEN_SIZE];
} gcm_len_block;
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
@@ -94,10 +94,10 @@ struct aead_req_ctx {
struct ssi_mlli assoc;
struct ssi_mlli src;
struct ssi_mlli dst;
- struct scatterlist *srcSgl;
- struct scatterlist *dstSgl;
- unsigned int srcOffset;
- unsigned int dstOffset;
+ struct scatterlist *src_sgl;
+ struct scatterlist *dst_sgl;
+ unsigned int src_offset;
+ unsigned int dst_offset;
enum ssi_req_dma_buf_type assoc_buff_type;
enum ssi_req_dma_buf_type data_buff_type;
struct mlli_params mlli_params;
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index b35871eeabd1..6579a54f9dc4 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -150,7 +150,7 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
- u32 new_nents;;
+ u32 new_nents;
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
@@ -162,8 +162,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
mlli_entry_p = mlli_entry_p + 2;
@@ -173,8 +173,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, buff_size);
SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
mlli_entry_p = mlli_entry_p + 2;
*mlli_entry_pp = mlli_entry_p;
(*curr_nents)++;
@@ -182,8 +182,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
}
static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
- u32 **mlli_entry_pp)
+ struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
@@ -192,16 +192,17 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
for ( ; (curr_sgl) && (sgl_data_len != 0);
curr_sgl = sg_next(curr_sgl)) {
u32 entry_data_len =
- (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
- sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
+ (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+ sg_dma_len(curr_sgl) - sgl_offset :
+ sgl_data_len;
sgl_data_len -= entry_data_len;
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
- &mlli_entry_p);
+ sg_dma_address(curr_sgl) + sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
if (rc != 0)
return rc;
- sglOffset = 0;
+ sgl_offset = 0;
}
*mlli_entry_pp = mlli_entry_p;
return 0;
@@ -221,7 +222,7 @@ static int ssi_buffer_mgr_generate_mlli(
/* Allocate memory from the pointed pool */
mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL,
- &(mlli_params->mlli_dma_addr));
+ &mlli_params->mlli_dma_addr);
if (unlikely(!mlli_params->mlli_virt_addr)) {
SSI_LOG_ERR("dma_pool_alloc() failed\n");
rc = -ENOMEM;
@@ -249,7 +250,7 @@ static int ssi_buffer_mgr_generate_mlli(
/*Calculate the current MLLI table length for the
*length field in the descriptor
*/
- *(sg_data->mlli_nents[i]) +=
+ *sg_data->mlli_nents[i] +=
(total_nents - prev_total_nents);
prev_total_nents = total_nents;
}
@@ -259,9 +260,9 @@ static int ssi_buffer_mgr_generate_mlli(
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
SSI_LOG_DEBUG("MLLI params: "
- "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
+ "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr,
- (unsigned long long)mlli_params->mlli_dma_addr,
+ mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
build_mlli_exit:
@@ -275,9 +276,9 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
+ SSI_LOG_DEBUG("index=%u single_buff=%pad "
"buffer_len=0x%08X is_last=%d\n",
- index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
+ index, buffer_dma, buffer_len, is_last_entry);
sgl_data->nents[index] = 1;
sgl_data->entry[index].buffer_dma = buffer_dma;
sgl_data->offset[index] = 0;
@@ -302,7 +303,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
unsigned int index = sgl_data->num_of_buffers;
SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
- index, nents, sgl, data_len, is_last_table);
+ index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
@@ -317,7 +318,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
static int
ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
@@ -358,10 +359,10 @@ static int ssi_buffer_mgr_map_scatterlist(
SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped sg: dma_address=%pad "
"page=%p addr=%pK offset=%u "
"length=%u\n",
- (unsigned long long)sg_dma_address(sg),
+ sg_dma_address(sg),
sg_page(sg),
sg_virt(sg),
sg->offset, sg->length);
@@ -374,7 +375,7 @@ static int ssi_buffer_mgr_map_scatterlist(
if (*nents > max_sg_nents) {
*nents = 0;
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
+ *nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
@@ -408,10 +409,10 @@ static int ssi_buffer_mgr_map_scatterlist(
static inline int
ssi_aead_handle_config_buf(struct device *dev,
- struct aead_req_ctx *areq_ctx,
- u8 *config_data,
- struct buffer_array *sg_data,
- unsigned int assoclen)
+ struct aead_req_ctx *areq_ctx,
+ u8 *config_data,
+ struct buffer_array *sg_data,
+ unsigned int assoclen)
{
SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
/* create sg for the current buffer */
@@ -422,10 +423,10 @@ ssi_aead_handle_config_buf(struct device *dev,
"config buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
"page=%p addr=%pK "
"offset=%u length=%u\n",
- (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
areq_ctx->ccm_adata_sg.offset,
@@ -433,19 +434,18 @@ ssi_aead_handle_config_buf(struct device *dev,
/* prepare for case of MLLI */
if (assoclen > 0) {
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
- &areq_ctx->ccm_adata_sg,
- (AES_BLOCK_SIZE +
- areq_ctx->ccm_hdr_size), 0,
- false, NULL);
+ &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
}
return 0;
}
static inline int ssi_ahash_handle_curr_buf(struct device *dev,
- struct ahash_req_ctx *areq_ctx,
- u8 *curr_buff,
- u32 curr_buff_cnt,
- struct buffer_array *sg_data)
+ struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff,
+ u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
{
SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
@@ -456,10 +456,10 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
"src buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
"page=%p addr=%pK "
"offset=%u length=%u\n",
- (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_address(areq_ctx->buff_sg),
sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg),
areq_ctx->buff_sg->offset,
@@ -469,7 +469,7 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
- curr_buff_cnt, 0, false, NULL);
+ curr_buff_cnt, 0, false, NULL);
return 0;
}
@@ -483,9 +483,9 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
- SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
- (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
- ivsize);
+ SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ req_ctx->gen_ctx.iv_dma_addr,
+ ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
@@ -498,16 +498,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents,
- DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
- sg_virt(src));
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents,
- DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
- sg_virt(dst));
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
}
}
@@ -542,22 +538,24 @@ int ssi_buffer_mgr_map_blkcipher_request(
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
- req_ctx->gen_ctx.iv_dma_addr))) {
+ req_ctx->gen_ctx.iv_dma_addr))) {
SSI_LOG_ERR("Mapping iv %u B at va=%pK "
"for DMA failed\n", ivsize, info);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
- ivsize, info,
- (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info,
+ req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
}
/* Map the src SGL */
rc = ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto ablkcipher_exit;
@@ -570,8 +568,10 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->in_nents, src,
- nbytes, 0, true, &req_ctx->in_mlli_nents);
+ req_ctx->in_nents,
+ src, nbytes, 0,
+ true,
+ &req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
@@ -588,13 +588,15 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->in_nents, src,
- nbytes, 0, true,
- &req_ctx->in_mlli_nents);
+ req_ctx->in_nents,
+ src, nbytes, 0,
+ true,
+ &req_ctx->in_mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->out_nents, dst,
- nbytes, 0, true,
- &req_ctx->out_mlli_nents);
+ req_ctx->out_nents,
+ dst, nbytes, 0,
+ true,
+ &req_ctx->out_mlli_nents);
}
}
@@ -606,7 +608,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
}
SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
return 0;
@@ -628,7 +630,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->mac_buf_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
- MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
}
#if SSI_CC_HAS_AES_GCM
@@ -645,12 +647,12 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
}
#endif
@@ -658,7 +660,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (areq_ctx->ccm_iv0_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
@@ -672,9 +674,9 @@ void ssi_buffer_mgr_unmap_aead_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
- (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
@@ -690,14 +692,17 @@ void ssi_buffer_mgr_unmap_aead_request(
dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
- sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ sg_virt(req->dst));
+ dma_unmap_sg(dev, req->dst,
+ ssi_buffer_mgr_get_sgl_nents(req->dst,
+ size_to_unmap,
+ &dummy,
+ &chained),
+ DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst))
- {
+ likely(req->src == req->dst)) {
u32 size_to_skip = req->assoclen;
if (areq_ctx->is_gcm4543)
@@ -753,11 +758,11 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
*is_icv_fragmented = true;
} else {
SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
+ MAX_ICV_NENTS_SUPPORTED);
nents = -1; /*unsupported*/
}
SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
+ (*is_icv_fragmented ? "true" : "false"), nents);
return nents;
}
@@ -782,14 +787,14 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
hw_iv_size, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
- hw_iv_size, req->iv);
+ hw_iv_size, req->iv);
rc = -ENOMEM;
goto chain_iv_exit;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
- hw_iv_size, req->iv,
- (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv,
+ areq_ctx->gen_ctx.iv_dma_addr);
if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
@@ -833,8 +838,8 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -868,10 +873,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("CCM case.Too many fragments. "
- "Current %d max %d\n",
- (areq_ctx->assoc.nents + 1),
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
rc = -ENOMEM;
goto chain_assoc_exit;
}
@@ -884,10 +888,10 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
if (unlikely((do_chain) ||
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+ (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
ssi_buffer_mgr_add_scatterlist_entry(
sg_data, areq_ctx->assoc.nents,
req->src, req->assoclen, 0, is_last,
@@ -911,26 +915,26 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else {
/*NON-INPLACE and ENCRYPT*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->dstSgl) +
+ areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->dstSgl) +
+ areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
}
}
@@ -951,13 +955,18 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
- areq_ctx->src.nents, authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize,
+ *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
@@ -995,27 +1004,35 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
}
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
- &areq_ctx->dst.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
- areq_ctx->src.nents, authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ areq_ctx->dst.nents,
+ areq_ctx->dst_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->dst_offset,
+ is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize,
+ *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
@@ -1039,26 +1056,34 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
}
} else {
/*NON-INPLACE and ENCRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
- &areq_ctx->dst.mlli_nents);
+ areq_ctx->dst.nents,
+ areq_ctx->dst_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->dst_offset,
+ is_last_table,
+ &areq_ctx->dst.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
- areq_ctx->dst.nents, authsize, *dst_last_bytes,
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize,
+ *dst_last_bytes,
&areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
@@ -1068,10 +1093,10 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
if (likely(!areq_ctx->is_icv_fragmented)) {
/* Contig. ICV */
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
(*dst_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
(*dst_last_bytes - authsize);
} else {
areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
@@ -1113,37 +1138,36 @@ static inline int ssi_buffer_mgr_aead_chain_data(
rc = -EINVAL;
goto chain_data_exit;
}
- areq_ctx->srcSgl = req->src;
- areq_ctx->dstSgl = req->dst;
+ areq_ctx->src_sgl = req->src;
+ areq_ctx->dst_sgl = req->dst;
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
- sg_index = areq_ctx->srcSgl->length;
+ sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->srcSgl->length;
- areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
+ offset -= areq_ctx->src_sgl->length;
+ areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
//if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->srcSgl) {
+ if (!areq_ctx->src_sgl) {
SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
- sg_index += areq_ctx->srcSgl->length;
+ sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
- if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
- {
+ if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->src.nents = src_mapped_nents;
- areq_ctx->srcOffset = offset;
+ areq_ctx->src_offset = offset;
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
@@ -1152,9 +1176,11 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
- DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
- &dst_mapped_nents);
+ DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dst_last_bytes,
+ &dst_mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto chain_data_exit;
@@ -1162,35 +1188,37 @@ static inline int ssi_buffer_mgr_aead_chain_data(
}
dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
- sg_index = areq_ctx->dstSgl->length;
+ sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->dstSgl->length;
- areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
+ offset -= areq_ctx->dst_sgl->length;
+ areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
//if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dstSgl) {
+ if (!areq_ctx->dst_sgl) {
SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
- sg_index += areq_ctx->dstSgl->length;
+ sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
- if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
- {
+ if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
- areq_ctx->dstOffset = offset;
+ areq_ctx->dst_offset = offset;
if ((src_mapped_nents > 1) ||
(dst_mapped_nents > 1) ||
do_chain) {
areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
- rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes, &dst_last_bytes, is_last_table);
+ rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
+ sg_data,
+ &src_last_bytes,
+ &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
ssi_buffer_mgr_prepare_aead_data_dlli(
@@ -1202,7 +1230,7 @@ chain_data_exit:
}
static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
- struct aead_request *req)
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
u32 curr_mlli_size = 0;
@@ -1274,8 +1302,7 @@ int ssi_buffer_mgr_map_aead_request(
if (drvdata->coherent &&
(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst))
- {
+ likely(req->src == req->dst)) {
u32 size_to_skip = req->assoclen;
if (is_gcm4543)
@@ -1300,7 +1327,7 @@ int ssi_buffer_mgr_map_aead_request(
areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
- MAX_MAC_SIZE, areq_ctx->mac_buf);
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1319,7 +1346,8 @@ int ssi_buffer_mgr_map_aead_request(
goto aead_map_failure;
}
if (ssi_aead_handle_config_buf(dev, areq_ctx,
- areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
+ areq_ctx->ccm_config, &sg_data,
+ req->assoclen) != 0) {
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1331,7 +1359,7 @@ int ssi_buffer_mgr_map_aead_request(
areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, areq_ctx->hkey);
+ AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1340,7 +1368,7 @@ int ssi_buffer_mgr_map_aead_request(
&areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1380,7 +1408,7 @@ int ssi_buffer_mgr_map_aead_request(
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
- size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
+ size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
@@ -1491,18 +1519,18 @@ int ssi_buffer_mgr_map_hash_request_final(
/* map the previous buffer */
if (*curr_buff_cnt != 0) {
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ *curr_buff_cnt, &sg_data) != 0) {
return -ENOMEM;
}
}
if (src && (nbytes > 0) && do_update) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes,
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents))){
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy,
+ &mapped_nents))){
goto unmap_curr_buff;
}
if (src && (mapped_nents == 1)
@@ -1522,19 +1550,18 @@ int ssi_buffer_mgr_map_hash_request_final(
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- areq_ctx->in_nents,
- src,
- nbytes, 0,
- true, &areq_ctx->mlli_nents);
+ areq_ctx->in_nents,
+ src, nbytes, 0, true,
+ &areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ mlli_params) != 0)) {
goto fail_unmap_din;
}
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
@@ -1588,8 +1615,8 @@ int ssi_buffer_mgr_map_hash_request_update(
&curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
ssi_buffer_mgr_get_sgl_nents(src,
- nbytes,
- &dummy, NULL);
+ nbytes,
+ &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1612,15 +1639,15 @@ int ssi_buffer_mgr_map_hash_request_update(
(update_data_len - *curr_buff_cnt),
*next_buff_cnt);
ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
- (update_data_len - *curr_buff_cnt),
- nbytes, SSI_SG_TO_BUF);
+ (update_data_len - *curr_buff_cnt),
+ nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
if (*curr_buff_cnt != 0) {
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ *curr_buff_cnt, &sg_data) != 0) {
return -ENOMEM;
}
/* change the buffer index for next operation */
@@ -1629,11 +1656,12 @@ int ssi_buffer_mgr_map_hash_request_update(
if (update_data_len > *curr_buff_cnt) {
if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents))){
+ (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy,
+ &mapped_nents))){
goto unmap_curr_buff;
}
if ((mapped_nents == 1)
@@ -1653,12 +1681,14 @@ int ssi_buffer_mgr_map_hash_request_update(
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- areq_ctx->in_nents,
- src,
- (update_data_len - *curr_buff_cnt), 0,
- true, &areq_ctx->mlli_nents);
+ areq_ctx->in_nents,
+ src,
+ (update_data_len - *curr_buff_cnt),
+ 0,
+ true,
+ &areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ mlli_params) != 0)) {
goto fail_unmap_din;
}
}
@@ -1687,28 +1717,28 @@ void ssi_buffer_mgr_unmap_hash_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
- (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if ((src) && likely(areq_ctx->in_nents != 0)) {
- SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
- sg_virt(src),
- (unsigned long long)sg_dma_address(src),
- sg_dma_len(src));
+ SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src),
+ sg_dma_address(src),
+ sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len != 0) {
SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
- " dma=0x%llX len 0x%X\n",
+ " dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
- (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
@@ -1725,8 +1755,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = &drvdata->plat_dev->dev;
- buff_mgr_handle = (struct buff_mgr_handle *)
- kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
+ buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
if (!buff_mgr_handle)
return -ENOMEM;
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index cd2eafc04232..bfe9b1ccbf37 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
+#include <crypto/xts.h>
#include "ssi_config.h"
#include "ssi_driver.h"
@@ -31,7 +32,6 @@
#include "ssi_cipher.h"
#include "ssi_request_mgr.h"
#include "ssi_sysfs.h"
-#include "ssi_fips_local.h"
#define MAX_ABLKCIPHER_SEQ_LEN 6
@@ -68,7 +68,8 @@ struct ssi_ablkcipher_ctx {
static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
+{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (size) {
@@ -92,8 +93,7 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
break;
}
case S_DIN_to_DES:
- if (likely(size == DES3_EDE_KEY_SIZE ||
- size == DES_KEY_SIZE))
+ if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
return 0;
break;
#if SSI_CC_HAS_MULTI2
@@ -108,7 +108,8 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
return -EINVAL;
}
-static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
+static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size)
+{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
@@ -183,10 +184,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
- crypto_tfm_alg_name(tfm));
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n",
+ ctx_p, crypto_tfm_alg_name(tfm));
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx_p->cipher_mode = ssi_alg->cipher_mode;
ctx_p->flow_mode = ssi_alg->flow_mode;
ctx_p->drvdata = ssi_alg->drvdata;
@@ -206,12 +206,12 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
max_key_buf_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
- max_key_buf_size, ctx_p->user.key);
+ max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
- max_key_buf_size, ctx_p->user.key,
- (unsigned long long)ctx_p->user.key_dma_addr);
+ SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key,
+ ctx_p->user.key_dma_addr);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
@@ -232,7 +232,7 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
unsigned int max_key_buf_size = get_max_keysize(tfm);
SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Free hash tfm for essiv */
@@ -242,9 +242,9 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
/* Unmap key buffer */
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
- DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
- (unsigned long long)ctx_p->user.key_dma_addr);
+ DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=%pad\n",
+ ctx_p->user.key_dma_addr);
/* Free key buffer in context */
kfree(ctx_p->user.key);
@@ -263,31 +263,15 @@ static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
-static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
+static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
{
-#ifdef CCREE_FIPS_SUPPORT
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
- (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+ (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
return -ENOEXEC;
}
-#endif /* CCREE_FIPS_SUPPORT */
-
- return 0;
-}
-
-/* The function verifies that xts keys are not weak.*/
-static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen)
-{
-#ifdef CCREE_FIPS_SUPPORT
- /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
- int singleKeySize = keylen >> 1;
-
- if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0))
- return -ENOEXEC;
-#endif /* CCREE_FIPS_SUPPORT */
return 0;
}
@@ -317,11 +301,9 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
unsigned int max_key_buf_size = get_max_keysize(tfm);
SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
- ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
dump_byte_array("key", (u8 *)key, keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
/* STAT_PHASE_0: Init and sanity checks */
@@ -383,20 +365,20 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
}
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
- ssi_fips_verify_xts_keys(key, keylen) != 0) {
+ xts_check_key(tfm, key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
return -EINVAL;
}
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
- ssi_fips_verify_3des_keys(key, keylen) != 0) {
+ ssi_verify_3des_keys(key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
return -EINVAL;
}
/* STAT_PHASE_1: Copy key to ctx */
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
- max_key_buf_size, DMA_TO_DEVICE);
+ max_key_buf_size, DMA_TO_DEVICE);
if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
#if SSI_CC_HAS_MULTI2
@@ -429,7 +411,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
- max_key_buf_size, DMA_TO_DEVICE);
+ max_key_buf_size, DMA_TO_DEVICE);
ctx_p->keylen = keylen;
SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
@@ -632,17 +614,15 @@ ssi_blkcipher_create_data_desc(
break;
#endif /*SSI_CC_HAS_MULTI2*/
default:
- SSI_LOG_ERR("invalid flow mode, flow_mode = %d \n", flow_mode);
+ SSI_LOG_ERR("invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
- SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
- (unsigned long long)sg_dma_address(src),
- nbytes);
- SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
- (unsigned long long)sg_dma_address(dst),
- nbytes);
+ SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
+ sg_dma_address(src), nbytes);
+ SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
+ sg_dma_address(dst), nbytes);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
nbytes, NS_BIT);
@@ -655,9 +635,9 @@ ssi_blkcipher_create_data_desc(
(*seq_size)++;
} else {
/* bypass */
- SSI_LOG_DEBUG(" bypass params addr 0x%llX "
+ SSI_LOG_DEBUG(" bypass params addr %pad "
"length 0x%X addr 0x%08X\n",
- (unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
@@ -706,13 +686,13 @@ ssi_blkcipher_create_data_desc(
}
static int ssi_blkcipher_complete(struct device *dev,
- struct ssi_ablkcipher_ctx *ctx_p,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int ivsize,
- void *areq,
- void __iomem *cc_base)
+ struct ssi_ablkcipher_ctx *ctx_p,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ void *areq,
+ void __iomem *cc_base)
{
int completion_error = 0;
u32 inflight_counter;
@@ -749,10 +729,9 @@ static int ssi_blkcipher_process(
int rc, seq_len = 0, cts_restore_flag = 0;
SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
- ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- areq, info, nbytes);
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+ areq, info, nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
@@ -804,12 +783,8 @@ static int ssi_blkcipher_process(
ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
desc, &seq_len);
/* Data processing */
- ssi_blkcipher_create_data_desc(tfm,
- req_ctx,
- dst, src,
- nbytes,
- areq,
- desc, &seq_len);
+ ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq,
+ desc, &seq_len);
/* do we need to generate IV? */
if (req_ctx->is_giv) {
@@ -853,8 +828,6 @@ static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __io
struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
-
ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
ivsize, areq, cc_base);
}
@@ -871,8 +844,8 @@ static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
}
static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key,
- unsigned int keylen)
+ const u8 *key,
+ unsigned int keylen)
{
return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
}
@@ -1286,7 +1259,7 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
- &blkcipher_handle->blkcipher_alg_list,
+ &blkcipher_handle->blkcipher_alg_list,
entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
@@ -1306,7 +1279,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
int alg;
ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!ablkcipher_handle)
return -ENOMEM;
@@ -1322,7 +1295,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
SSI_LOG_ERR("%s alg allocation failed\n",
- blkcipher_algs[alg].driver_name);
+ blkcipher_algs[alg].driver_name);
goto fail0;
}
t_alg->drvdata = drvdata;
@@ -1330,17 +1303,17 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
rc = crypto_register_alg(&t_alg->crypto_alg);
SSI_LOG_DEBUG("%s alg registration rc = %x\n",
- t_alg->crypto_alg.cra_driver_name, rc);
+ t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
goto fail0;
} else {
list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
SSI_LOG_DEBUG("Registered %s\n",
- t_alg->crypto_alg.cra_driver_name);
+ t_alg->crypto_alg.cra_driver_name);
}
}
return 0;
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 78709b92736d..e0faca0a30a6 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -71,7 +71,7 @@
#include "ssi_ivgen.h"
#include "ssi_sram_mgr.h"
#include "ssi_pm.h"
-#include "ssi_fips_local.h"
+#include "ssi_fips.h"
#ifdef DX_DUMP_BYTES
void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
@@ -81,12 +81,11 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
char line_buf[80];
if (!the_array) {
- SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
+ SSI_LOG_ERR("cannot %s - NULL pointer\n", __func__);
return;
}
- ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
- name, size);
+ ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size);
if (ret < 0) {
SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
return;
@@ -95,8 +94,8 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
for (i = 0, cur_byte = the_array;
(i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
ret = snprintf(line_buf + line_offset,
- sizeof(line_buf) - line_offset,
- "0x%02X ", *cur_byte);
+ sizeof(line_buf) - line_offset,
+ "0x%02X ", *cur_byte);
if (ret < 0) {
SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
return;
@@ -193,11 +192,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
#ifdef DX_IRQ_DELAY
/* Set CC IRQ delay */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
- DX_IRQ_DELAY);
+ DX_IRQ_DELAY);
#endif
if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
}
#endif
@@ -251,10 +250,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = -ENODEV;
goto init_cc_res_err;
}
- SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n",
- new_drvdata->res_mem->name,
- (unsigned long long)new_drvdata->res_mem->start,
- (unsigned long long)new_drvdata->res_mem->end);
+ SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n",
+ new_drvdata->res_mem->name,
+ new_drvdata->res_mem->start,
+ new_drvdata->res_mem->end);
/* Map registers space */
req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
if (unlikely(!req_mem_cc_regs)) {
@@ -266,7 +265,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
if (unlikely(!cc_base)) {
SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
- (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
+ (unsigned int)new_drvdata->res_mem->start,
+ (unsigned int)resource_size(new_drvdata->res_mem));
rc = -ENOMEM;
goto init_cc_res_err;
}
@@ -284,15 +284,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
IRQF_SHARED, "arm_cc7x", new_drvdata);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("Could not register to interrupt %llu\n",
- (unsigned long long)new_drvdata->res_irq->start);
+ (unsigned long long)new_drvdata->res_irq->start);
goto init_cc_res_err;
}
init_completion(&new_drvdata->icache_setup_completion);
irq_registered = true;
SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
- new_drvdata->res_irq->name,
- (unsigned long long)new_drvdata->res_irq->start);
+ new_drvdata->res_irq->name,
+ (unsigned long long)new_drvdata->res_irq->start);
new_drvdata->plat_dev = plat_dev;
@@ -301,19 +301,16 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto init_cc_res_err;
if (!new_drvdata->plat_dev->dev.dma_mask)
- {
new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
- }
+
if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
- {
new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
- }
/* Verify correct mapping */
signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
if (signature_val != DX_DEV_SIGNATURE) {
SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ signature_val, (u32)DX_DEV_SIGNATURE);
rc = -EINVAL;
goto init_cc_res_err;
}
@@ -330,7 +327,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata);
+ rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("init_stat_db failed\n");
goto init_cc_res_err;
@@ -401,6 +398,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto init_cc_res_err;
}
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+ */
+ cc_set_ree_fips_status(new_drvdata, true);
+
return 0;
init_cc_res_err:
@@ -428,7 +431,7 @@ init_cc_res_err:
new_drvdata->cc_base = NULL;
}
release_mem_region(new_drvdata->res_mem->start,
- resource_size(new_drvdata->res_mem));
+ resource_size(new_drvdata->res_mem));
new_drvdata->res_mem = NULL;
}
kfree(new_drvdata);
@@ -471,7 +474,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
if (drvdata->cc_base) {
iounmap(drvdata->cc_base);
release_mem_region(drvdata->res_mem->start,
- resource_size(drvdata->res_mem));
+ resource_size(drvdata->res_mem));
drvdata->cc_base = NULL;
drvdata->res_mem = NULL;
}
@@ -516,12 +519,12 @@ static int cc7x_probe(struct platform_device *plat_dev)
asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
cacheline_size = 4 << ((ctr >> 16) & 0xf);
SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
+ cacheline_size, L1_CACHE_BYTES);
asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X,"
- " Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF, (ctr >> 20) & 0xF, ctr & 0xF);
+ SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
+ (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
+ (ctr >> 20) & 0xF, ctr & 0xF);
#endif
/* Map registers space */
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
index c1ed61f1a202..b6ad89ae9bee 100644
--- a/drivers/staging/ccree/ssi_driver.h
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -48,7 +48,6 @@
#include "cc_crypto_ctx.h"
#include "ssi_sysfs.h"
#include "hash_defs.h"
-#include "ssi_fips_local.h"
#include "cc_hw_queue_defs.h"
#include "ssi_sram_mgr.h"
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
index fdc40f38332a..33d53d64603d 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -14,48 +14,115 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-/**************************************************************
- * This file defines the driver FIPS APIs *
- **************************************************************/
+#include <linux/kernel.h>
+#include <linux/fips.h>
-#include <linux/module.h>
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "cc_hal.h"
#include "ssi_fips.h"
-extern int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state);
-extern int ssi_fips_ext_get_error(enum cc_fips_error *p_err);
+static void fips_dsr(unsigned long devarg);
+
+struct ssi_fips_handle {
+ struct tasklet_struct tasklet;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+{
+ u32 reg;
+ void __iomem *cc_base = drvdata->cc_base;
+
+ reg = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+}
/*
- * This function returns the REE FIPS state.
- * It should be called by kernel module.
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
*/
-int ssi_fips_get_state(enum cc_fips_state_t *p_state)
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+{
+ void __iomem *cc_base = drvdata->cc_base;
+ int val = CC_FIPS_SYNC_REE_STATUS;
+
+ val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), val);
+}
+
+void ssi_fips_fini(struct ssi_drvdata *drvdata)
{
- int rc = 0;
+ struct ssi_fips_handle *fips_h = drvdata->fips_handle;
- if (!p_state)
- return -EINVAL;
+ if (!fips_h)
+ return; /* Not allocated */
- rc = ssi_fips_ext_get_state(p_state);
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->tasklet);
- return rc;
+ kfree(fips_h);
+ drvdata->fips_handle = NULL;
}
-EXPORT_SYMBOL(ssi_fips_get_state);
+void fips_handler(struct ssi_drvdata *drvdata)
+{
+ struct ssi_fips_handle *fips_handle_ptr =
+ drvdata->fips_handle;
-/*
- * This function returns the REE FIPS error.
- * It should be called by kernel module.
- */
-int ssi_fips_get_error(enum cc_fips_error *p_err)
+ tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(void)
{
- int rc = 0;
+ if (fips_enabled)
+ panic("ccree: TEE reported cryptographic error in fips mode!\n");
+ else
+ SSI_LOG_ERR("TEE reported error!\n");
+}
- if (!p_err)
- return -EINVAL;
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ void __iomem *cc_base = drvdata->cc_base;
+ u32 irq, state, val;
- rc = ssi_fips_ext_get_error(p_err);
+ irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
- return rc;
+ if (irq) {
+ state = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+
+ if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
+ tee_fips_error();
+ }
+
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt.
+ */
+ val = (CC_REG_OFFSET(HOST_RGF, HOST_IMR) & ~irq);
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
}
-EXPORT_SYMBOL(ssi_fips_get_error);
+/* The function called once at driver entry point .*/
+int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ struct ssi_fips_handle *fips_h;
+
+ fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ if (!fips_h)
+ return -ENOMEM;
+
+ p_drvdata->fips_handle = fips_h;
+
+ SSI_LOG_DEBUG("Initializing fips tasklet\n");
+ tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error();
+
+ return 0;
+}
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
index 4f5c6a9a8363..369ddf9478e7 100644
--- a/drivers/staging/ccree/ssi_fips.h
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -17,45 +17,33 @@
#ifndef __SSI_FIPS_H__
#define __SSI_FIPS_H__
-/*!
- * @file
- * @brief This file contains FIPS related defintions and APIs.
- */
+#ifdef CONFIG_CRYPTO_FIPS
-enum cc_fips_state {
- CC_FIPS_STATE_NOT_SUPPORTED = 0,
- CC_FIPS_STATE_SUPPORTED,
- CC_FIPS_STATE_ERROR,
- CC_FIPS_STATE_RESERVE32B = S32_MAX
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
};
-enum cc_fips_error {
- CC_REE_FIPS_ERROR_OK = 0,
- CC_REE_FIPS_ERROR_GENERAL,
- CC_REE_FIPS_ERROR_FROM_TEE,
- CC_REE_FIPS_ERROR_AES_ECB_PUT,
- CC_REE_FIPS_ERROR_AES_CBC_PUT,
- CC_REE_FIPS_ERROR_AES_OFB_PUT,
- CC_REE_FIPS_ERROR_AES_CTR_PUT,
- CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT,
- CC_REE_FIPS_ERROR_AES_XTS_PUT,
- CC_REE_FIPS_ERROR_AES_CMAC_PUT,
- CC_REE_FIPS_ERROR_AESCCM_PUT,
- CC_REE_FIPS_ERROR_AESGCM_PUT,
- CC_REE_FIPS_ERROR_DES_ECB_PUT,
- CC_REE_FIPS_ERROR_DES_CBC_PUT,
- CC_REE_FIPS_ERROR_SHA1_PUT,
- CC_REE_FIPS_ERROR_SHA256_PUT,
- CC_REE_FIPS_ERROR_SHA512_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA1_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA256_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA512_PUT,
- CC_REE_FIPS_ERROR_ROM_CHECKSUM,
- CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX
-};
+int ssi_fips_init(struct ssi_drvdata *p_drvdata);
+void ssi_fips_fini(struct ssi_drvdata *drvdata);
+void fips_handler(struct ssi_drvdata *drvdata);
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
+void fips_handler(struct ssi_drvdata *drvdata) {}
-int ssi_fips_get_state(enum cc_fips_state *p_state);
-int ssi_fips_get_error(enum cc_fips_error *p_err);
+#endif /* CONFIG_CRYPTO_FIPS */
#endif /*__SSI_FIPS_H__*/
diff --git a/drivers/staging/ccree/ssi_fips_data.h b/drivers/staging/ccree/ssi_fips_data.h
deleted file mode 100644
index c41671dbee40..000000000000
--- a/drivers/staging/ccree/ssi_fips_data.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * The test vectors were taken from:
- *
- * * AES
- * NIST Special Publication 800-38A 2001 Edition
- * Recommendation for Block Cipher Modes of Operation
- * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
- * Appendix F: Example Vectors for Modes of Operation of the AES
- *
- * * AES CTS
- * Advanced Encryption Standard (AES) Encryption for Kerberos 5
- * February 2005
- * https://tools.ietf.org/html/rfc3962#appendix-B
- * B. Sample Test Vectors
- *
- * * AES XTS
- * http://csrc.nist.gov/groups/STM/cavp/#08
- * http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
- *
- * * AES CMAC
- * http://csrc.nist.gov/groups/STM/cavp/index.html#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
- *
- * * AES-CCM
- * http://csrc.nist.gov/groups/STM/cavp/#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
- *
- * * AES-GCM
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
- *
- * * Triple-DES
- * NIST Special Publication 800-67 January 2012
- * Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher
- * http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf
- * APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS
- * and
- * http://csrc.nist.gov/groups/STM/cavp/#01
- * http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
- *
- * * HASH
- * http://csrc.nist.gov/groups/STM/cavp/#03
- * http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
- *
- * * HMAC
- * http://csrc.nist.gov/groups/STM/cavp/#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
- */
-
-/* NIST AES */
-#define AES_128_BIT_KEY_SIZE 16
-#define AES_192_BIT_KEY_SIZE 24
-#define AES_256_BIT_KEY_SIZE 32
-#define AES_512_BIT_KEY_SIZE 64
-
-#define NIST_AES_IV_SIZE 16
-
-#define NIST_AES_128_KEY { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }
-#define NIST_AES_192_KEY { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, \
- 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b }
-#define NIST_AES_256_KEY { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, \
- 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }
-#define NIST_AES_VECTOR_SIZE 16
-#define NIST_AES_PLAIN_DATA { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }
-
-#define NIST_AES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-#define NIST_AES_128_ECB_CIPHER { 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97 }
-#define NIST_AES_192_ECB_CIPHER { 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f, 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc }
-#define NIST_AES_256_ECB_CIPHER { 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c, 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8 }
-
-#define NIST_AES_CBC_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
-#define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d }
-#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
-#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
-
-#define NIST_AES_OFB_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
-#define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a }
-#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
-#define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 }
-
-#define NIST_AES_CTR_IV { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }
-#define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce }
-#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
-#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
-
-#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
-#define RFC3962_AES_VECTOR_SIZE 17
-#define RFC3962_AES_PLAIN_DATA { 0x49, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20 }
-#define RFC3962_AES_CBC_CTS_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-#define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 }
-
-#define NIST_AES_256_XTS_KEY { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \
- 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
-#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
-#define NIST_AES_256_XTS_VECTOR_SIZE 16
-#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
-#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
-
-#define NIST_AES_512_XTS_KEY { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \
- 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \
- 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d, 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3, \
- 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58, 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08 }
-#define NIST_AES_512_XTS_IV { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64, }
-#define NIST_AES_512_XTS_VECTOR_SIZE 32
-#define NIST_AES_512_XTS_PLAIN { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \
- 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
-#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
- 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
-
-/* NIST AES-CMAC */
-#define NIST_AES_128_CMAC_KEY { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 }
-#define NIST_AES_128_CMAC_PLAIN_DATA { 0xa8, 0xde, 0x55, 0x17, 0x0c, 0x6d, 0xc0, 0xd8, 0x0d, 0xe3, 0x2f, 0x50, 0x8b, 0xf4, 0x9b, 0x70 }
-#define NIST_AES_128_CMAC_MAC { 0xcf, 0xef, 0x9b, 0x78, 0x39, 0x84, 0x1f, 0xdb, 0xcc, 0xbb, 0x6c, 0x2c, 0xf2, 0x38, 0xf7 }
-#define NIST_AES_128_CMAC_VECTOR_SIZE 16
-#define NIST_AES_128_CMAC_OUTPUT_SIZE 15
-
-#define NIST_AES_192_CMAC_KEY { 0x20, 0x51, 0xaf, 0x34, 0x76, 0x2e, 0xbe, 0x55, 0x6f, 0x72, 0xa5, 0xc6, 0xed, 0xc7, 0x77, 0x1e, \
- 0xb9, 0x24, 0x5f, 0xad, 0x76, 0xf0, 0x34, 0xbe }
-#define NIST_AES_192_CMAC_PLAIN_DATA { 0xae, 0x8e, 0x93, 0xc9, 0xc9, 0x91, 0xcf, 0x89, 0x6a, 0x49, 0x1a, 0x89, 0x07, 0xdf, 0x4e, 0x4b, \
- 0xe5, 0x18, 0x6a, 0xe4, 0x96, 0xcd, 0x34, 0x0d, 0xc1, 0x9b, 0x23, 0x78, 0x21, 0xdb, 0x7b, 0x60 }
-#define NIST_AES_192_CMAC_MAC { 0x74, 0xf7, 0x46, 0x08, 0xc0, 0x4f, 0x0f, 0x4e, 0x47, 0xfa, 0x64, 0x04, 0x33, 0xb6, 0xe6, 0xfb }
-#define NIST_AES_192_CMAC_VECTOR_SIZE 32
-#define NIST_AES_192_CMAC_OUTPUT_SIZE 16
-
-#define NIST_AES_256_CMAC_KEY { 0x3a, 0x75, 0xa9, 0xd2, 0xbd, 0xb8, 0xc8, 0x04, 0xba, 0x4a, 0xb4, 0x98, 0x35, 0x73, 0xa6, 0xb2, \
- 0x53, 0x16, 0x0d, 0xd9, 0x0f, 0x8e, 0xdd, 0xfb, 0x2f, 0xdc, 0x2a, 0xb1, 0x76, 0x04, 0xf5, 0xc5 }
-#define NIST_AES_256_CMAC_PLAIN_DATA { 0x42, 0xf3, 0x5d, 0x5a, 0xa5, 0x33, 0xa7, 0xa0, 0xa5, 0xf7, 0x4e, 0x14, 0x4f, 0x2a, 0x5f, 0x20 }
-#define NIST_AES_256_CMAC_MAC { 0xf1, 0x53, 0x2f, 0x87, 0x32, 0xd9, 0xf5, 0x90, 0x30, 0x07 }
-#define NIST_AES_256_CMAC_VECTOR_SIZE 16
-#define NIST_AES_256_CMAC_OUTPUT_SIZE 10
-
-/* NIST TDES */
-#define TDES_NUM_OF_KEYS 3
-#define NIST_TDES_VECTOR_SIZE 8
-#define NIST_TDES_IV_SIZE 8
-
-#define NIST_TDES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-
-#define NIST_TDES_ECB3_KEY { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, \
- 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, \
- 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23 }
-#define NIST_TDES_ECB3_PLAIN_DATA { 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 }
-#define NIST_TDES_ECB3_CIPHER { 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f }
-
-#define NIST_TDES_CBC3_IV { 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 }
-#define NIST_TDES_CBC3_KEY { 0xe9, 0xda, 0x37, 0xf8, 0xdc, 0x97, 0x6d, 0x5b, \
- 0xb6, 0x8c, 0x04, 0xe3, 0xec, 0x98, 0x20, 0x15, \
- 0xf4, 0x0e, 0x08, 0xb5, 0x97, 0x29, 0xf2, 0x8f }
-#define NIST_TDES_CBC3_PLAIN_DATA { 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
-#define NIST_TDES_CBC3_CIPHER { 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
-
-/* NIST AES-CCM */
-#define NIST_AESCCM_128_BIT_KEY_SIZE 16
-#define NIST_AESCCM_192_BIT_KEY_SIZE 24
-#define NIST_AESCCM_256_BIT_KEY_SIZE 32
-
-#define NIST_AESCCM_B0_VAL 0x79 /* L'[0:2]=1 , M'[3-5]=7 , Adata[6]=1, reserved[7]=0 */
-#define NIST_AESCCM_NONCE_SIZE 13
-#define NIST_AESCCM_IV_SIZE 16
-#define NIST_AESCCM_ADATA_SIZE 32
-#define NIST_AESCCM_TEXT_SIZE 16
-#define NIST_AESCCM_TAG_SIZE 16
-
-#define NIST_AESCCM_128_KEY { 0x70, 0x01, 0x0e, 0xd9, 0x0e, 0x61, 0x86, 0xec, 0xad, 0x41, 0xf0, 0xd3, 0xc7, 0xc4, 0x2f, 0xf8 }
-#define NIST_AESCCM_128_NONCE { 0xa5, 0xf4, 0xf4, 0x98, 0x6e, 0x98, 0x47, 0x29, 0x65, 0xf5, 0xab, 0xcc, 0x4b }
-#define NIST_AESCCM_128_ADATA { 0x3f, 0xec, 0x0e, 0x5c, 0xc2, 0x4d, 0x67, 0x13, 0x94, 0x37, 0xcb, 0xc8, 0x11, 0x24, 0x14, 0xfc, \
- 0x8d, 0xac, 0xcd, 0x1a, 0x94, 0xb4, 0x9a, 0x4c, 0x76, 0xe2, 0xd3, 0x93, 0x03, 0x54, 0x73, 0x17 }
-#define NIST_AESCCM_128_PLAIN_TEXT { 0xbe, 0x32, 0x2f, 0x58, 0xef, 0xa7, 0xf8, 0xc6, 0x8a, 0x63, 0x5e, 0x0b, 0x9c, 0xce, 0x77, 0xf2 }
-#define NIST_AESCCM_128_CIPHER { 0x8e, 0x44, 0x25, 0xae, 0x57, 0x39, 0x74, 0xf0, 0xf0, 0x69, 0x3a, 0x18, 0x8b, 0x52, 0x58, 0x12 }
-#define NIST_AESCCM_128_MAC { 0xee, 0xf0, 0x8e, 0x3f, 0xb1, 0x5f, 0x42, 0x27, 0xe0, 0xd9, 0x89, 0xa4, 0xd5, 0x87, 0xa8, 0xcf }
-
-#define NIST_AESCCM_192_KEY { 0x68, 0x73, 0xf1, 0xc6, 0xc3, 0x09, 0x75, 0xaf, 0xf6, 0xf0, 0x84, 0x70, 0x26, 0x43, 0x21, 0x13, \
- 0x0a, 0x6e, 0x59, 0x84, 0xad, 0xe3, 0x24, 0xe9 }
-#define NIST_AESCCM_192_NONCE { 0x7c, 0x4d, 0x2f, 0x7c, 0xec, 0x04, 0x36, 0x1f, 0x18, 0x7f, 0x07, 0x26, 0xd5 }
-#define NIST_AESCCM_192_ADATA { 0x77, 0x74, 0x3b, 0x5d, 0x83, 0xa0, 0x0d, 0x2c, 0x8d, 0x5f, 0x7e, 0x10, 0x78, 0x15, 0x31, 0xb4, \
- 0x96, 0xe0, 0x9f, 0x3b, 0xc9, 0x29, 0x5d, 0x7a, 0xe9, 0x79, 0x9e, 0x64, 0x66, 0x8e, 0xf8, 0xc5 }
-#define NIST_AESCCM_192_PLAIN_TEXT { 0x50, 0x51, 0xa0, 0xb0, 0xb6, 0x76, 0x6c, 0xd6, 0xea, 0x29, 0xa6, 0x72, 0x76, 0x9d, 0x40, 0xfe }
-#define NIST_AESCCM_192_CIPHER { 0x0c, 0xe5, 0xac, 0x8d, 0x6b, 0x25, 0x6f, 0xb7, 0x58, 0x0b, 0xf6, 0xac, 0xc7, 0x64, 0x26, 0xaf }
-#define NIST_AESCCM_192_MAC { 0x40, 0xbc, 0xe5, 0x8f, 0xd4, 0xcd, 0x65, 0x48, 0xdf, 0x90, 0xa0, 0x33, 0x7c, 0x84, 0x20, 0x04 }
-
-#define NIST_AESCCM_256_KEY { 0xee, 0x8c, 0xe1, 0x87, 0x16, 0x97, 0x79, 0xd1, 0x3e, 0x44, 0x3d, 0x64, 0x28, 0xe3, 0x8b, 0x38, \
- 0xb5, 0x5d, 0xfb, 0x90, 0xf0, 0x22, 0x8a, 0x8a, 0x4e, 0x62, 0xf8, 0xf5, 0x35, 0x80, 0x6e, 0x62 }
-#define NIST_AESCCM_256_NONCE { 0x12, 0x16, 0x42, 0xc4, 0x21, 0x8b, 0x39, 0x1c, 0x98, 0xe6, 0x26, 0x9c, 0x8a }
-#define NIST_AESCCM_256_ADATA { 0x71, 0x8d, 0x13, 0xe4, 0x75, 0x22, 0xac, 0x4c, 0xdf, 0x3f, 0x82, 0x80, 0x63, 0x98, 0x0b, 0x6d, \
- 0x45, 0x2f, 0xcd, 0xcd, 0x6e, 0x1a, 0x19, 0x04, 0xbf, 0x87, 0xf5, 0x48, 0xa5, 0xfd, 0x5a, 0x05 }
-#define NIST_AESCCM_256_PLAIN_TEXT { 0xd1, 0x5f, 0x98, 0xf2, 0xc6, 0xd6, 0x70, 0xf5, 0x5c, 0x78, 0xa0, 0x66, 0x48, 0x33, 0x2b, 0xc9 }
-#define NIST_AESCCM_256_CIPHER { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a }
-#define NIST_AESCCM_256_MAC { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 }
-
-/* NIST AES-GCM */
-#define NIST_AESGCM_128_BIT_KEY_SIZE 16
-#define NIST_AESGCM_192_BIT_KEY_SIZE 24
-#define NIST_AESGCM_256_BIT_KEY_SIZE 32
-
-#define NIST_AESGCM_IV_SIZE 12
-#define NIST_AESGCM_ADATA_SIZE 16
-#define NIST_AESGCM_TEXT_SIZE 16
-#define NIST_AESGCM_TAG_SIZE 16
-
-#define NIST_AESGCM_128_KEY { 0x81, 0x6e, 0x39, 0x07, 0x04, 0x10, 0xcf, 0x21, 0x84, 0x90, 0x4d, 0xa0, 0x3e, 0xa5, 0x07, 0x5a }
-#define NIST_AESGCM_128_IV { 0x32, 0xc3, 0x67, 0xa3, 0x36, 0x26, 0x13, 0xb2, 0x7f, 0xc3, 0xe6, 0x7e }
-#define NIST_AESGCM_128_ADATA { 0xf2, 0xa3, 0x07, 0x28, 0xed, 0x87, 0x4e, 0xe0, 0x29, 0x83, 0xc2, 0x94, 0x43, 0x5d, 0x3c, 0x16 }
-#define NIST_AESGCM_128_PLAIN_TEXT { 0xec, 0xaf, 0xe9, 0x6c, 0x67, 0xa1, 0x64, 0x67, 0x44, 0xf1, 0xc8, 0x91, 0xf5, 0xe6, 0x94, 0x27 }
-#define NIST_AESGCM_128_CIPHER { 0x55, 0x2e, 0xbe, 0x01, 0x2e, 0x7b, 0xcf, 0x90, 0xfc, 0xef, 0x71, 0x2f, 0x83, 0x44, 0xe8, 0xf1 }
-#define NIST_AESGCM_128_MAC { 0xec, 0xaa, 0xe9, 0xfc, 0x68, 0x27, 0x6a, 0x45, 0xab, 0x0c, 0xa3, 0xcb, 0x9d, 0xd9, 0x53, 0x9f }
-
-#define NIST_AESGCM_192_KEY { 0x0c, 0x44, 0xd6, 0xc9, 0x28, 0xee, 0x11, 0x2c, 0xe6, 0x65, 0xfe, 0x54, 0x7e, 0xbd, 0x38, 0x72, \
- 0x98, 0xa9, 0x54, 0xb4, 0x62, 0xf6, 0x95, 0xd8 }
-#define NIST_AESGCM_192_IV { 0x18, 0xb8, 0xf3, 0x20, 0xfe, 0xf4, 0xae, 0x8c, 0xcb, 0xe8, 0xf9, 0x52 }
-#define NIST_AESGCM_192_ADATA { 0x73, 0x41, 0xd4, 0x3f, 0x98, 0xcf, 0x38, 0x82, 0x21, 0x18, 0x09, 0x41, 0x97, 0x03, 0x76, 0xe8 }
-#define NIST_AESGCM_192_PLAIN_TEXT { 0x96, 0xad, 0x07, 0xf9, 0xb6, 0x28, 0xb6, 0x52, 0xcf, 0x86, 0xcb, 0x73, 0x17, 0x88, 0x6f, 0x51 }
-#define NIST_AESGCM_192_CIPHER { 0xa6, 0x64, 0x07, 0x81, 0x33, 0x40, 0x5e, 0xb9, 0x09, 0x4d, 0x36, 0xf7, 0xe0, 0x70, 0x19, 0x1f }
-#define NIST_AESGCM_192_MAC { 0xe8, 0xf9, 0xc3, 0x17, 0x84, 0x7c, 0xe3, 0xf3, 0xc2, 0x39, 0x94, 0xa4, 0x02, 0xf0, 0x65, 0x81 }
-
-#define NIST_AESGCM_256_KEY { 0x54, 0xe3, 0x52, 0xea, 0x1d, 0x84, 0xbf, 0xe6, 0x4a, 0x10, 0x11, 0x09, 0x61, 0x11, 0xfb, 0xe7, \
- 0x66, 0x8a, 0xd2, 0x20, 0x3d, 0x90, 0x2a, 0x01, 0x45, 0x8c, 0x3b, 0xbd, 0x85, 0xbf, 0xce, 0x14 }
-#define NIST_AESGCM_256_IV { 0xdf, 0x7c, 0x3b, 0xca, 0x00, 0x39, 0x6d, 0x0c, 0x01, 0x84, 0x95, 0xd9 }
-#define NIST_AESGCM_256_ADATA { 0x7e, 0x96, 0x8d, 0x71, 0xb5, 0x0c, 0x1f, 0x11, 0xfd, 0x00, 0x1f, 0x3f, 0xef, 0x49, 0xd0, 0x45 }
-#define NIST_AESGCM_256_PLAIN_TEXT { 0x85, 0xfc, 0x3d, 0xfa, 0xd9, 0xb5, 0xa8, 0xd3, 0x25, 0x8e, 0x4f, 0xc4, 0x45, 0x71, 0xbd, 0x3b }
-#define NIST_AESGCM_256_CIPHER { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d }
-#define NIST_AESGCM_256_MAC { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 }
-
-/* NIST HASH */
-#define NIST_SHA_MSG_SIZE 16
-
-#define NIST_SHA_1_MSG { 0x35, 0x52, 0x69, 0x4c, 0xdf, 0x66, 0x3f, 0xd9, 0x4b, 0x22, 0x47, 0x47, 0xac, 0x40, 0x6a, 0xaf }
-#define NIST_SHA_1_MD { 0xa1, 0x50, 0xde, 0x92, 0x74, 0x54, 0x20, 0x2d, 0x94, 0xe6, 0x56, 0xde, 0x4c, 0x7c, 0x0c, 0xa6, \
- 0x91, 0xde, 0x95, 0x5d }
-
-#define NIST_SHA_256_MSG { 0x0a, 0x27, 0x84, 0x7c, 0xdc, 0x98, 0xbd, 0x6f, 0x62, 0x22, 0x0b, 0x04, 0x6e, 0xdd, 0x76, 0x2b }
-#define NIST_SHA_256_MD { 0x80, 0xc2, 0x5e, 0xc1, 0x60, 0x05, 0x87, 0xe7, 0xf2, 0x8b, 0x18, 0xb1, 0xb1, 0x8e, 0x3c, 0xdc, \
- 0x89, 0x92, 0x8e, 0x39, 0xca, 0xb3, 0xbc, 0x25, 0xe4, 0xd4, 0xa4, 0xc1, 0x39, 0xbc, 0xed, 0xc4 }
-
-#define NIST_SHA_512_MSG { 0xcd, 0x67, 0xbd, 0x40, 0x54, 0xaa, 0xa3, 0xba, 0xa0, 0xdb, 0x17, 0x8c, 0xe2, 0x32, 0xfd, 0x5a }
-#define NIST_SHA_512_MD { 0x0d, 0x85, 0x21, 0xf8, 0xf2, 0xf3, 0x90, 0x03, 0x32, 0xd1, 0xa1, 0xa5, 0x5c, 0x60, 0xba, 0x81, \
- 0xd0, 0x4d, 0x28, 0xdf, 0xe8, 0xc5, 0x04, 0xb6, 0x32, 0x8a, 0xe7, 0x87, 0x92, 0x5f, 0xe0, 0x18, \
- 0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \
- 0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 }
-
-/* NIST HMAC */
-#define NIST_HMAC_MSG_SIZE 128
-
-#define NIST_HMAC_SHA1_KEY_SIZE 10
-#define NIST_HMAC_SHA1_KEY { 0x59, 0x78, 0x59, 0x28, 0xd7, 0x25, 0x16, 0xe3, 0x12, 0x72 }
-#define NIST_HMAC_SHA1_MSG { 0xa3, 0xce, 0x88, 0x99, 0xdf, 0x10, 0x22, 0xe8, 0xd2, 0xd5, 0x39, 0xb4, 0x7b, 0xf0, 0xe3, 0x09, \
- 0xc6, 0x6f, 0x84, 0x09, 0x5e, 0x21, 0x43, 0x8e, 0xc3, 0x55, 0xbf, 0x11, 0x9c, 0xe5, 0xfd, 0xcb, \
- 0x4e, 0x73, 0xa6, 0x19, 0xcd, 0xf3, 0x6f, 0x25, 0xb3, 0x69, 0xd8, 0xc3, 0x8f, 0xf4, 0x19, 0x99, \
- 0x7f, 0x0c, 0x59, 0x83, 0x01, 0x08, 0x22, 0x36, 0x06, 0xe3, 0x12, 0x23, 0x48, 0x3f, 0xd3, 0x9e, \
- 0xde, 0xaa, 0x4d, 0x3f, 0x0d, 0x21, 0x19, 0x88, 0x62, 0xd2, 0x39, 0xc9, 0xfd, 0x26, 0x07, 0x41, \
- 0x30, 0xff, 0x6c, 0x86, 0x49, 0x3f, 0x52, 0x27, 0xab, 0x89, 0x5c, 0x8f, 0x24, 0x4b, 0xd4, 0x2c, \
- 0x7a, 0xfc, 0xe5, 0xd1, 0x47, 0xa2, 0x0a, 0x59, 0x07, 0x98, 0xc6, 0x8e, 0x70, 0x8e, 0x96, 0x49, \
- 0x02, 0xd1, 0x24, 0xda, 0xde, 0xcd, 0xbd, 0xa9, 0xdb, 0xd0, 0x05, 0x1e, 0xd7, 0x10, 0xe9, 0xbf }
-#define NIST_HMAC_SHA1_MD { 0x3c, 0x81, 0x62, 0x58, 0x9a, 0xaf, 0xae, 0xe0, 0x24, 0xfc, 0x9a, 0x5c, 0xa5, 0x0d, 0xd2, 0x33, \
- 0x6f, 0xe3, 0xeb, 0x28 }
-
-#define NIST_HMAC_SHA256_KEY_SIZE 40
-#define NIST_HMAC_SHA256_KEY { 0x97, 0x79, 0xd9, 0x12, 0x06, 0x42, 0x79, 0x7f, 0x17, 0x47, 0x02, 0x5d, 0x5b, 0x22, 0xb7, 0xac, \
- 0x60, 0x7c, 0xab, 0x08, 0xe1, 0x75, 0x8f, 0x2f, 0x3a, 0x46, 0xc8, 0xbe, 0x1e, 0x25, 0xc5, 0x3b, \
- 0x8c, 0x6a, 0x8f, 0x58, 0xff, 0xef, 0xa1, 0x76 }
-#define NIST_HMAC_SHA256_MSG { 0xb1, 0x68, 0x9c, 0x25, 0x91, 0xea, 0xf3, 0xc9, 0xe6, 0x60, 0x70, 0xf8, 0xa7, 0x79, 0x54, 0xff, \
- 0xb8, 0x17, 0x49, 0xf1, 0xb0, 0x03, 0x46, 0xf9, 0xdf, 0xe0, 0xb2, 0xee, 0x90, 0x5d, 0xcc, 0x28, \
- 0x8b, 0xaf, 0x4a, 0x92, 0xde, 0x3f, 0x40, 0x01, 0xdd, 0x9f, 0x44, 0xc4, 0x68, 0xc3, 0xd0, 0x7d, \
- 0x6c, 0x6e, 0xe8, 0x2f, 0xac, 0xea, 0xfc, 0x97, 0xc2, 0xfc, 0x0f, 0xc0, 0x60, 0x17, 0x19, 0xd2, \
- 0xdc, 0xd0, 0xaa, 0x2a, 0xec, 0x92, 0xd1, 0xb0, 0xae, 0x93, 0x3c, 0x65, 0xeb, 0x06, 0xa0, 0x3c, \
- 0x9c, 0x93, 0x5c, 0x2b, 0xad, 0x04, 0x59, 0x81, 0x02, 0x41, 0x34, 0x7a, 0xb8, 0x7e, 0x9f, 0x11, \
- 0xad, 0xb3, 0x04, 0x15, 0x42, 0x4c, 0x6c, 0x7f, 0x5f, 0x22, 0xa0, 0x03, 0xb8, 0xab, 0x8d, 0xe5, \
- 0x4f, 0x6d, 0xed, 0x0e, 0x3a, 0xb9, 0x24, 0x5f, 0xa7, 0x95, 0x68, 0x45, 0x1d, 0xfa, 0x25, 0x8e }
-#define NIST_HMAC_SHA256_MD { 0x76, 0x9f, 0x00, 0xd3, 0xe6, 0xa6, 0xcc, 0x1f, 0xb4, 0x26, 0xa1, 0x4a, 0x4f, 0x76, 0xc6, 0x46, \
- 0x2e, 0x61, 0x49, 0x72, 0x6e, 0x0d, 0xee, 0x0e, 0xc0, 0xcf, 0x97, 0xa1, 0x66, 0x05, 0xac, 0x8b }
-
-#define NIST_HMAC_SHA512_KEY_SIZE 100
-#define NIST_HMAC_SHA512_KEY { 0x57, 0xc2, 0xeb, 0x67, 0x7b, 0x50, 0x93, 0xb9, 0xe8, 0x29, 0xea, 0x4b, 0xab, 0xb5, 0x0b, 0xde, \
- 0x55, 0xd0, 0xad, 0x59, 0xfe, 0xc3, 0x4a, 0x61, 0x89, 0x73, 0x80, 0x2b, 0x2a, 0xd9, 0xb7, 0x8e, \
- 0x26, 0xb2, 0x04, 0x5d, 0xda, 0x78, 0x4d, 0xf3, 0xff, 0x90, 0xae, 0x0f, 0x2c, 0xc5, 0x1c, 0xe3, \
- 0x9c, 0xf5, 0x48, 0x67, 0x32, 0x0a, 0xc6, 0xf3, 0xba, 0x2c, 0x6f, 0x0d, 0x72, 0x36, 0x04, 0x80, \
- 0xc9, 0x66, 0x14, 0xae, 0x66, 0x58, 0x1f, 0x26, 0x6c, 0x35, 0xfb, 0x79, 0xfd, 0x28, 0x77, 0x4a, \
- 0xfd, 0x11, 0x3f, 0xa5, 0x18, 0x7e, 0xff, 0x92, 0x06, 0xd7, 0xcb, 0xe9, 0x0d, 0xd8, 0xbf, 0x67, \
- 0xc8, 0x44, 0xe2, 0x02 }
-#define NIST_HMAC_SHA512_MSG { 0x24, 0x23, 0xdf, 0xf4, 0x8b, 0x31, 0x2b, 0xe8, 0x64, 0xcb, 0x34, 0x90, 0x64, 0x1f, 0x79, 0x3d, \
- 0x2b, 0x9f, 0xb6, 0x8a, 0x77, 0x63, 0xb8, 0xe2, 0x98, 0xc8, 0x6f, 0x42, 0x24, 0x5e, 0x45, 0x40, \
- 0xeb, 0x01, 0xae, 0x4d, 0x2d, 0x45, 0x00, 0x37, 0x0b, 0x18, 0x86, 0xf2, 0x3c, 0xa2, 0xcf, 0x97, \
- 0x01, 0x70, 0x4c, 0xad, 0x5b, 0xd2, 0x1b, 0xa8, 0x7b, 0x81, 0x1d, 0xaf, 0x7a, 0x85, 0x4e, 0xa2, \
- 0x4a, 0x56, 0x56, 0x5c, 0xed, 0x42, 0x5b, 0x35, 0xe4, 0x0e, 0x1a, 0xcb, 0xeb, 0xe0, 0x36, 0x03, \
- 0xe3, 0x5d, 0xcf, 0x4a, 0x10, 0x0e, 0x57, 0x21, 0x84, 0x08, 0xa1, 0xd8, 0xdb, 0xcc, 0x3b, 0x99, \
- 0x29, 0x6c, 0xfe, 0xa9, 0x31, 0xef, 0xe3, 0xeb, 0xd8, 0xf7, 0x19, 0xa6, 0xd9, 0xa1, 0x54, 0x87, \
- 0xb9, 0xad, 0x67, 0xea, 0xfe, 0xdf, 0x15, 0x55, 0x9c, 0xa4, 0x24, 0x45, 0xb0, 0xf9, 0xb4, 0x2e }
-#define NIST_HMAC_SHA512_MD { 0x33, 0xc5, 0x11, 0xe9, 0xbc, 0x23, 0x07, 0xc6, 0x27, 0x58, 0xdf, 0x61, 0x12, 0x5a, 0x98, 0x0e, \
- 0xe6, 0x4c, 0xef, 0xeb, 0xd9, 0x09, 0x31, 0xcb, 0x91, 0xc1, 0x37, 0x42, 0xd4, 0x71, 0x4c, 0x06, \
- 0xde, 0x40, 0x03, 0xfa, 0xf3, 0xc4, 0x1c, 0x06, 0xae, 0xfc, 0x63, 0x8a, 0xd4, 0x7b, 0x21, 0x90, \
- 0x6e, 0x6b, 0x10, 0x48, 0x16, 0xb7, 0x2d, 0xe6, 0x26, 0x9e, 0x04, 0x5a, 0x1f, 0x44, 0x29, 0xd4 }
-
diff --git a/drivers/staging/ccree/ssi_fips_ext.c b/drivers/staging/ccree/ssi_fips_ext.c
deleted file mode 100644
index e7bf1843f60c..000000000000
--- a/drivers/staging/ccree/ssi_fips_ext.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS functions that should be
- * implemented by the driver user. Current implementation is sample code only.
- ***************************************************************/
-
-#include <linux/module.h>
-#include "ssi_fips_local.h"
-#include "ssi_driver.h"
-
-static bool tee_error;
-module_param(tee_error, bool, 0644);
-MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured ");
-
-static enum cc_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
-static enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
-
-/*
- * This function returns the FIPS REE state.
- * The function should be implemented by the driver user, depends on where
- * the state value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state)
-{
- int rc = 0;
-
- if (!p_state)
- return -EINVAL;
-
- *p_state = fips_state;
-
- return rc;
-}
-
-/*
- * This function returns the FIPS REE error.
- * The function should be implemented by the driver user, depends on where
- * the error value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_get_error(enum cc_fips_error *p_err)
-{
- int rc = 0;
-
- if (!p_err)
- return -EINVAL;
-
- *p_err = fips_error;
-
- return rc;
-}
-
-/*
- * This function sets the FIPS REE state.
- * The function should be implemented by the driver user, depends on where
- * the state value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_set_state(enum cc_fips_state_t state)
-{
- fips_state = state;
- return 0;
-}
-
-/*
- * This function sets the FIPS REE error.
- * The function should be implemented by the driver user, depends on where
- * the error value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_set_error(enum cc_fips_error err)
-{
- fips_error = err;
- return 0;
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_ll.c b/drivers/staging/ccree/ssi_fips_ll.c
deleted file mode 100644
index 3557e20c9e36..000000000000
--- a/drivers/staging/ccree/ssi_fips_ll.c
+++ /dev/null
@@ -1,1649 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS Low Level implmentaion functions,
- * that executes the KAT.
- ***************************************************************/
-#include <linux/kernel.h>
-
-#include "ssi_driver.h"
-#include "ssi_fips_local.h"
-#include "ssi_fips_data.h"
-#include "cc_crypto_ctx.h"
-#include "ssi_hash.h"
-#include "ssi_request_mgr.h"
-
-static const u32 digest_len_init[] = {
- 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const u32 sha1_init[] = {
- SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const u32 sha256_init[] = {
- SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
- SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
-#if (CC_SUPPORT_SHA > 256)
-static const u32 digest_len_sha512_init[] = {
- 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const u64 sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
-#endif
-
-#define NIST_CIPHER_AES_MAX_VECTOR_SIZE 32
-
-struct fips_cipher_ctx {
- u8 iv[CC_AES_IV_SIZE];
- u8 key[AES_512_BIT_KEY_SIZE];
- u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-};
-
-typedef struct _FipsCipherData {
- u8 isAes;
- u8 key[AES_512_BIT_KEY_SIZE];
- size_t keySize;
- u8 iv[CC_AES_IV_SIZE];
- enum drv_crypto_direction direction;
- enum drv_cipher_mode oprMode;
- u8 dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- size_t dataInSize;
-} FipsCipherData;
-
-struct fips_cmac_ctx {
- u8 key[AES_256_BIT_KEY_SIZE];
- u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsCmacData {
- enum drv_crypto_direction direction;
- u8 key[AES_256_BIT_KEY_SIZE];
- size_t key_size;
- u8 data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
- size_t mac_res_size;
-} FipsCmacData;
-
-struct fips_hash_ctx {
- u8 initial_digest[CC_DIGEST_SIZE_MAX];
- u8 din[NIST_SHA_MSG_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsHashData {
- enum drv_hash_mode hash_mode;
- u8 data_in[NIST_SHA_MSG_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-} FipsHashData;
-
-/* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
-struct fips_hmac_ctx {
- u8 initial_digest[CC_DIGEST_SIZE_MAX];
- u8 key[CC_HMAC_BLOCK_SIZE_MAX];
- u8 k0[CC_HMAC_BLOCK_SIZE_MAX];
- u8 digest_bytes_len[HASH_LEN_SIZE];
- u8 tmp_digest[CC_DIGEST_SIZE_MAX];
- u8 din[NIST_HMAC_MSG_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsHmacData {
- enum drv_hash_mode hash_mode;
- u8 key[CC_HMAC_BLOCK_SIZE_MAX];
- size_t key_size;
- u8 data_in[NIST_HMAC_MSG_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-} FipsHmacData;
-
-#define FIPS_CCM_B0_A0_ADATA_SIZE (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
-
-struct fips_ccm_ctx {
- u8 b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
- u8 iv[NIST_AESCCM_IV_SIZE];
- u8 ctr_cnt_0[NIST_AESCCM_IV_SIZE];
- u8 key[CC_AES_KEY_SIZE_MAX];
- u8 din[NIST_AESCCM_TEXT_SIZE];
- u8 dout[NIST_AESCCM_TEXT_SIZE];
- u8 mac_res[NIST_AESCCM_TAG_SIZE];
-};
-
-typedef struct _FipsCcmData {
- enum drv_crypto_direction direction;
- u8 key[CC_AES_KEY_SIZE_MAX];
- size_t keySize;
- u8 nonce[NIST_AESCCM_NONCE_SIZE];
- u8 adata[NIST_AESCCM_ADATA_SIZE];
- size_t adataSize;
- u8 dataIn[NIST_AESCCM_TEXT_SIZE];
- size_t dataInSize;
- u8 dataOut[NIST_AESCCM_TEXT_SIZE];
- u8 tagSize;
- u8 macResOut[NIST_AESCCM_TAG_SIZE];
-} FipsCcmData;
-
-struct fips_gcm_ctx {
- u8 adata[NIST_AESGCM_ADATA_SIZE];
- u8 key[CC_AES_KEY_SIZE_MAX];
- u8 hkey[CC_AES_KEY_SIZE_MAX];
- u8 din[NIST_AESGCM_TEXT_SIZE];
- u8 dout[NIST_AESGCM_TEXT_SIZE];
- u8 mac_res[NIST_AESGCM_TAG_SIZE];
- u8 len_block[AES_BLOCK_SIZE];
- u8 iv_inc1[AES_BLOCK_SIZE];
- u8 iv_inc2[AES_BLOCK_SIZE];
-};
-
-typedef struct _FipsGcmData {
- enum drv_crypto_direction direction;
- u8 key[CC_AES_KEY_SIZE_MAX];
- size_t keySize;
- u8 iv[NIST_AESGCM_IV_SIZE];
- u8 adata[NIST_AESGCM_ADATA_SIZE];
- size_t adataSize;
- u8 dataIn[NIST_AESGCM_TEXT_SIZE];
- size_t dataInSize;
- u8 dataOut[NIST_AESGCM_TEXT_SIZE];
- u8 tagSize;
- u8 macResOut[NIST_AESGCM_TAG_SIZE];
-} FipsGcmData;
-
-typedef union _fips_ctx {
- struct fips_cipher_ctx cipher;
- struct fips_cmac_ctx cmac;
- struct fips_hash_ctx hash;
- struct fips_hmac_ctx hmac;
- struct fips_ccm_ctx ccm;
- struct fips_gcm_ctx gcm;
-} fips_ctx;
-
-/* test data tables */
-static const FipsCipherData FipsCipherDataTable[] = {
- /* AES */
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_128_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_128_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_192_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_192_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_256_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_256_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_128_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_128_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_192_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_192_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_256_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_256_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_128_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_128_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_192_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_192_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_256_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_256_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_128_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_128_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_192_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_192_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_256_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_256_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_PLAIN_DATA, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_VECTOR_SIZE },
- { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_PLAIN_DATA, RFC3962_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_VECTOR_SIZE },
- { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_VECTOR_SIZE },
-#if (CC_SUPPORT_SHA > 256)
- { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE },
- { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE },
-#endif
- /* DES */
- { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_ECB3_CIPHER, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_CIPHER, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
-};
-
-#define FIPS_CIPHER_NUM_OF_TESTS (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData))
-
-static const FipsCmacData FipsCmacDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_128_CMAC_KEY, AES_128_BIT_KEY_SIZE, NIST_AES_128_CMAC_PLAIN_DATA, NIST_AES_128_CMAC_VECTOR_SIZE, NIST_AES_128_CMAC_MAC, NIST_AES_128_CMAC_OUTPUT_SIZE },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE },
-};
-
-#define FIPS_CMAC_NUM_OF_TESTS (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData))
-
-static const FipsHashData FipsHashDataTable[] = {
- { DRV_HASH_SHA1, NIST_SHA_1_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_1_MD },
- { DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD },
-#if (CC_SUPPORT_SHA > 256)
-// { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD },
-#endif
-};
-
-#define FIPS_HASH_NUM_OF_TESTS (sizeof(FipsHashDataTable) / sizeof(FipsHashData))
-
-static const FipsHmacData FipsHmacDataTable[] = {
- { DRV_HASH_SHA1, NIST_HMAC_SHA1_KEY, NIST_HMAC_SHA1_KEY_SIZE, NIST_HMAC_SHA1_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD },
- { DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD },
-#if (CC_SUPPORT_SHA > 256)
-// { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD },
-#endif
-};
-
-#define FIPS_HMAC_NUM_OF_TESTS (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData))
-
-static const FipsCcmData FipsCcmDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
-};
-
-#define FIPS_CCM_NUM_OF_TESTS (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData))
-
-static const FipsGcmData FipsGcmDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
-};
-
-#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
-
-static inline enum cc_fips_error
-FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
-{
- switch (mode)
- {
- case DRV_CIPHER_ECB:
- return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT;
- case DRV_CIPHER_CBC:
- return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT;
- case DRV_CIPHER_OFB:
- return CC_REE_FIPS_ERROR_AES_OFB_PUT;
- case DRV_CIPHER_CTR:
- return CC_REE_FIPS_ERROR_AES_CTR_PUT;
- case DRV_CIPHER_CBC_CTS:
- return CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT;
- case DRV_CIPHER_XTS:
- return CC_REE_FIPS_ERROR_AES_XTS_PUT;
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
- bool is_aes,
- int cipher_mode,
- int direction,
- dma_addr_t key_dma_addr,
- size_t key_len,
- dma_addr_t iv_dma_addr,
- size_t iv_len,
- dma_addr_t din_dma_addr,
- dma_addr_t dout_dma_addr,
- size_t data_size)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CIPHER_MAX_SEQ_LEN 6
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CIPHER_MAX_SEQ_LEN];
- int idx = 0;
- int s_flow_mode = is_aes ? S_DIN_to_AES : S_DIN_to_DES;
-
- /* create setup descriptors */
- switch (cipher_mode) {
- case DRV_CIPHER_CBC:
- case DRV_CIPHER_CBC_CTS:
- case DRV_CIPHER_CTR:
- case DRV_CIPHER_OFB:
- /* Load cipher state */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI,
- iv_dma_addr, iv_len, NS_BIT);
- set_cipher_config0(&desc[idx], direction);
- set_flow_mode(&desc[idx], s_flow_mode);
- set_cipher_mode(&desc[idx], cipher_mode);
- if ((cipher_mode == DRV_CIPHER_CTR) ||
- (cipher_mode == DRV_CIPHER_OFB)) {
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- } else {
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- }
- idx++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- if (is_aes) {
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_len == 24) ? AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- set_key_size_aes(&desc[idx], key_len);
- } else {/*des*/
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[idx], key_len);
- }
- set_flow_mode(&desc[idx], s_flow_mode);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
- break;
- case DRV_CIPHER_XTS:
- /* Load AES key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, (key_len / 2),
- NS_BIT);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_flow_mode(&desc[idx], s_flow_mode);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* load XEX key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_din_type(&desc[idx], DMA_DLLI,
- (key_dma_addr + (key_len / 2)),
- (key_len / 2), NS_BIT);
- set_xex_data_unit_size(&desc[idx], data_size);
- set_flow_mode(&desc[idx], s_flow_mode);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_setup_mode(&desc[idx], SETUP_LOAD_XEX_KEY);
- idx++;
-
- /* Set state */
- hw_desc_init(&desc[idx]);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_flow_mode(&desc[idx], s_flow_mode);
- set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr,
- CC_AES_BLOCK_SIZE, NS_BIT);
- idx++;
- break;
- default:
- FIPS_LOG("Unsupported cipher mode (%d)\n", cipher_mode);
- BUG();
- }
-
- /* create data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CIPHER_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- // send_request returns error just in some corner cases which should not appear in this flow.
- return rc;
-}
-
-enum cc_fips_error
-ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_cipher_ctx *virt_ctx = (struct fips_cipher_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for iv, key, din, dout */
- dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, iv);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, dout);
-
- for (i = 0; i < FIPS_CIPHER_NUM_OF_TESTS; ++i)
- {
- FipsCipherData *cipherData = (FipsCipherData *)&FipsCipherDataTable[i];
- int rc = 0;
- size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_cipher_ctx));
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->iv, cipherData->iv, iv_size);
- memcpy(virt_ctx->key, cipherData->key, cipherData->keySize);
- memcpy(virt_ctx->din, cipherData->dataIn, cipherData->dataInSize);
-
- FIPS_DBG("ssi_cipher_fips_run_test - (i = %d) \n", i);
- rc = ssi_cipher_fips_run_test(drvdata,
- cipherData->isAes,
- cipherData->oprMode,
- cipherData->direction,
- key_dma_addr,
- cipherData->keySize,
- iv_dma_addr,
- iv_size,
- din_dma_addr,
- dout_dma_addr,
- cipherData->dataInSize);
- if (rc != 0)
- {
- FIPS_LOG("ssi_cipher_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
- break;
- }
-
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, cipherData->dataOut, cipherData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - oprMode=%d, isAes=%d\n", i, cipherData->oprMode, cipherData->isAes);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x (size=%d) \n", (size_t)cipherData->dataOut, (size_t)virt_ctx->dout, cipherData->dataInSize);
- for (i = 0; i < cipherData->dataInSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cipherData->dataOut[i], virt_ctx->dout[i]);
- }
-
- error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t key_dma_addr,
- size_t key_len,
- dma_addr_t din_dma_addr,
- size_t din_len,
- dma_addr_t digest_dma_addr,
- size_t digest_len)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CMAC_MAX_SEQ_LEN 4
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CMAC_MAX_SEQ_LEN];
- int idx = 0;
-
- /* Setup CMAC Key */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_key_size_aes(&desc[idx], key_len);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Load MAC state */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE,
- NS_BIT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_key_size_aes(&desc[idx], key_len);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- //ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_len, NS_BIT);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_dout_dlli(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,
- 0);
- set_flow_mode(&desc[idx], S_AES_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- // send_request returns error just in some corner cases which should not appear in this flow.
- return rc;
-}
-
-enum cc_fips_error
-ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_cmac_ctx *virt_ctx = (struct fips_cmac_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for key, din, dout */
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, mac_res);
-
- for (i = 0; i < FIPS_CMAC_NUM_OF_TESTS; ++i)
- {
- FipsCmacData *cmac_data = (FipsCmacData *)&FipsCmacDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_cmac_ctx));
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->key, cmac_data->key, cmac_data->key_size);
- memcpy(virt_ctx->din, cmac_data->data_in, cmac_data->data_in_size);
-
- BUG_ON(cmac_data->direction != DRV_CRYPTO_DIRECTION_ENCRYPT);
-
- FIPS_DBG("ssi_cmac_fips_run_test - (i = %d) \n", i);
- rc = ssi_cmac_fips_run_test(drvdata,
- key_dma_addr,
- cmac_data->key_size,
- din_dma_addr,
- cmac_data->data_in_size,
- mac_res_dma_addr,
- cmac_data->mac_res_size);
- if (rc != 0)
- {
- FIPS_LOG("ssi_cmac_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, cmac_data->mac_res, cmac_data->mac_res_size) != 0)
- {
- FIPS_LOG("comparison error %d - digest_size=%d \n", i, cmac_data->mac_res_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)cmac_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < cmac_data->mac_res_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cmac_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
- break;
- }
- }
-
- return error;
-}
-
-static inline enum cc_fips_error
-FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
-{
- switch (hash_mode) {
- case DRV_HASH_SHA1:
- return CC_REE_FIPS_ERROR_SHA1_PUT;
- case DRV_HASH_SHA256:
- return CC_REE_FIPS_ERROR_SHA256_PUT;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- return CC_REE_FIPS_ERROR_SHA512_PUT;
-#endif
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t initial_digest_dma_addr,
- dma_addr_t din_dma_addr,
- size_t data_in_size,
- dma_addr_t mac_res_dma_addr,
- enum drv_hash_mode hash_mode,
- enum drv_hash_hw_mode hw_mode,
- int digest_size,
- int inter_digestsize)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_HASH_MAX_SEQ_LEN 4
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_HASH_MAX_SEQ_LEN];
- int idx = 0;
-
- /* Load initial digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_HASH_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_hash_ctx *virt_ctx = (struct fips_hash_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for initial_digest, din, mac_res */
- dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, initial_digest);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, mac_res);
-
- for (i = 0; i < FIPS_HASH_NUM_OF_TESTS; ++i)
- {
- FipsHashData *hash_data = (FipsHashData *)&FipsHashDataTable[i];
- int rc = 0;
- enum drv_hash_hw_mode hw_mode = 0;
- int digest_size = 0;
- int inter_digestsize = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_hash_ctx));
-
- switch (hash_data->hash_mode) {
- case DRV_HASH_SHA1:
- hw_mode = DRV_HASH_HW_SHA1;
- digest_size = CC_SHA1_DIGEST_SIZE;
- inter_digestsize = CC_SHA1_DIGEST_SIZE;
- /* copy the initial digest into the allocated cache coherent buffer */
- memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
- break;
- case DRV_HASH_SHA256:
- hw_mode = DRV_HASH_HW_SHA256;
- digest_size = CC_SHA256_DIGEST_SIZE;
- inter_digestsize = CC_SHA256_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
- break;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- hw_mode = DRV_HASH_HW_SHA512;
- digest_size = CC_SHA512_DIGEST_SIZE;
- inter_digestsize = CC_SHA512_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
- break;
-#endif
- default:
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
-
- /* copy the din data into the allocated buffer */
- memcpy(virt_ctx->din, hash_data->data_in, hash_data->data_in_size);
-
- /* run the test on HW */
- FIPS_DBG("ssi_hash_fips_run_test - (i = %d) \n", i);
- rc = ssi_hash_fips_run_test(drvdata,
- initial_digest_dma_addr,
- din_dma_addr,
- hash_data->data_in_size,
- mac_res_dma_addr,
- hash_data->hash_mode,
- hw_mode,
- digest_size,
- inter_digestsize);
- if (rc != 0)
- {
- FIPS_LOG("ssi_hash_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, hash_data->mac_res, digest_size) != 0)
- {
- FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hash_data->hash_mode, digest_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hash_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < digest_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hash_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
- }
-
- return error;
-}
-
-static inline enum cc_fips_error
-FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
-{
- switch (hash_mode) {
- case DRV_HASH_SHA1:
- return CC_REE_FIPS_ERROR_HMAC_SHA1_PUT;
- case DRV_HASH_SHA256:
- return CC_REE_FIPS_ERROR_HMAC_SHA256_PUT;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- return CC_REE_FIPS_ERROR_HMAC_SHA512_PUT;
-#endif
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t initial_digest_dma_addr,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t din_dma_addr,
- size_t data_in_size,
- dma_addr_t mac_res_dma_addr,
- enum drv_hash_mode hash_mode,
- enum drv_hash_hw_mode hw_mode,
- size_t digest_size,
- size_t inter_digestsize,
- size_t block_size,
- dma_addr_t k0_dma_addr,
- dma_addr_t tmp_digest_dma_addr,
- dma_addr_t digest_bytes_len_dma_addr)
-{
- /* The implemented flow is not the same as the one implemented in ssi_hash.c (setkey + digest flows).
- * In this flow, there is no need to store and reload some of the intermidiate results.
- */
-
- /* max number of descriptors used for the flow */
- #define FIPS_HMAC_MAX_SEQ_LEN 12
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_HMAC_MAX_SEQ_LEN];
- int idx = 0;
- int i;
- /* calc the hash opad first and ipad only afterwards (unlike the flow in ssi_hash.c) */
- unsigned int hmacPadConst[2] = { HMAC_OPAD_CONST, HMAC_IPAD_CONST };
-
- // assume (key_size <= block_size)
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0);
- idx++;
-
- // if needed, append Key with zeros to create K0
- if ((block_size - key_size) != 0) {
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0, (block_size - key_size));
- set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], (k0_dma_addr + key_size),
- (block_size - key_size), NS_BIT, 0);
- idx++;
- }
-
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
- return rc;
- }
- idx = 0;
-
- /* calc derived HMAC key */
- for (i = 0; i < 2; i++) {
- /* Load hash initial state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Prepare opad/ipad key */
- hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- idx++;
-
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, block_size,
- NS_BIT);
- set_cipher_mode(&desc[idx], hw_mode);
- set_xor_active(&desc[idx]);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- if (i == 0) {
- /* First iteration - calc H(K0^opad) into tmp_digest_dma_addr */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], tmp_digest_dma_addr,
- inter_digestsize, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- // is this needed?? or continue with current descriptors??
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
- return rc;
- }
- idx = 0;
- }
- }
-
- /* data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* HW last hash block padding (aka. "DO_PAD") */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
- set_cipher_do(&desc[idx], DO_PAD);
- idx++;
-
- /* store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* at this point:
- * tmp_digest = H(o_key_pad)
- * k0 = H(i_key_pad || m)
- */
-
- /* Loading hash opad xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, tmp_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_hmac_ctx *virt_ctx = (struct fips_hmac_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, initial_digest);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, key);
- dma_addr_t k0_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, k0);
- dma_addr_t tmp_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, tmp_digest);
- dma_addr_t digest_bytes_len_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, digest_bytes_len);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, mac_res);
-
- for (i = 0; i < FIPS_HMAC_NUM_OF_TESTS; ++i)
- {
- FipsHmacData *hmac_data = (FipsHmacData *)&FipsHmacDataTable[i];
- int rc = 0;
- enum drv_hash_hw_mode hw_mode = 0;
- int digest_size = 0;
- int block_size = 0;
- int inter_digestsize = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_hmac_ctx));
-
- switch (hmac_data->hash_mode) {
- case DRV_HASH_SHA1:
- hw_mode = DRV_HASH_HW_SHA1;
- digest_size = CC_SHA1_DIGEST_SIZE;
- block_size = CC_SHA1_BLOCK_SIZE;
- inter_digestsize = CC_SHA1_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
- break;
- case DRV_HASH_SHA256:
- hw_mode = DRV_HASH_HW_SHA256;
- digest_size = CC_SHA256_DIGEST_SIZE;
- block_size = CC_SHA256_BLOCK_SIZE;
- inter_digestsize = CC_SHA256_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
- break;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- hw_mode = DRV_HASH_HW_SHA512;
- digest_size = CC_SHA512_DIGEST_SIZE;
- block_size = CC_SHA512_BLOCK_SIZE;
- inter_digestsize = CC_SHA512_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
- break;
-#endif
- default:
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->key, hmac_data->key, hmac_data->key_size);
- memcpy(virt_ctx->din, hmac_data->data_in, hmac_data->data_in_size);
-
- /* run the test on HW */
- FIPS_DBG("ssi_hmac_fips_run_test - (i = %d) \n", i);
- rc = ssi_hmac_fips_run_test(drvdata,
- initial_digest_dma_addr,
- key_dma_addr,
- hmac_data->key_size,
- din_dma_addr,
- hmac_data->data_in_size,
- mac_res_dma_addr,
- hmac_data->hash_mode,
- hw_mode,
- digest_size,
- inter_digestsize,
- block_size,
- k0_dma_addr,
- tmp_digest_dma_addr,
- digest_bytes_len_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_hmac_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, hmac_data->mac_res, digest_size) != 0)
- {
- FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hmac_data->hash_mode, digest_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hmac_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < digest_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hmac_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
- enum drv_crypto_direction direction,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t iv_dma_addr,
- dma_addr_t ctr_cnt_0_dma_addr,
- dma_addr_t b0_a0_adata_dma_addr,
- size_t b0_a0_adata_size,
- dma_addr_t din_dma_addr,
- size_t din_size,
- dma_addr_t dout_dma_addr,
- dma_addr_t mac_res_dma_addr)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CCM_MAX_SEQ_LEN 10
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CCM_MAX_SEQ_LEN];
- unsigned int idx = 0;
- unsigned int cipher_flow_mode;
-
- if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- } else { /* Encrypt */
- cipher_flow_mode = AES_and_HASH;
- }
-
- /* load key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
- CC_AES_KEY_SIZE_MAX : key_size), NS_BIT)
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load ctr state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load MAC key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
- CC_AES_KEY_SIZE_MAX : key_size), NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load MAC state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
- NIST_AESCCM_TAG_SIZE, NS_BIT);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* prcess assoc data */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr,
- b0_a0_adata_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* process the cipher */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], cipher_flow_mode);
- idx++;
-
- /* Read temporal MAC */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
- NS_BIT, 0);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load AES-CTR state (for last MAC calculation)*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, ctr_cnt_0_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* encrypt the "T" value and store MAC inplace */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
- NIST_AESCCM_TAG_SIZE, NS_BIT);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
- NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_ccm_ctx *virt_ctx = (struct fips_ccm_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t b0_a0_adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, b0_a0_adata);
- dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, iv);
- dma_addr_t ctr_cnt_0_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, ctr_cnt_0);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, dout);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, mac_res);
-
- for (i = 0; i < FIPS_CCM_NUM_OF_TESTS; ++i)
- {
- FipsCcmData *ccmData = (FipsCcmData *)&FipsCcmDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_ccm_ctx));
-
- /* copy the nonce, key, adata, din data into the allocated buffer */
- memcpy(virt_ctx->key, ccmData->key, ccmData->keySize);
- memcpy(virt_ctx->din, ccmData->dataIn, ccmData->dataInSize);
- {
- /* build B0 -- B0, nonce, l(m) */
- __be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE);
-
- virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL;
- memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
- memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16));
- /* build A0+ADATA */
- virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 0] = (ccmData->adataSize >> 8) & 0xFF;
- virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 1] = ccmData->adataSize & 0xFF;
- memcpy(virt_ctx->b0_a0_adata + NIST_AESCCM_IV_SIZE + 2, ccmData->adata, ccmData->adataSize);
- /* iv */
- virt_ctx->iv[0] = 1; /* L' */
- memcpy(virt_ctx->iv + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
- virt_ctx->iv[15] = 1;
- /* ctr_count_0 */
- memcpy(virt_ctx->ctr_cnt_0, virt_ctx->iv, NIST_AESCCM_IV_SIZE);
- virt_ctx->ctr_cnt_0[15] = 0;
- }
-
- FIPS_DBG("ssi_ccm_fips_run_test - (i = %d) \n", i);
- rc = ssi_ccm_fips_run_test(drvdata,
- ccmData->direction,
- key_dma_addr,
- ccmData->keySize,
- iv_dma_addr,
- ctr_cnt_0_dma_addr,
- b0_a0_adata_dma_addr,
- FIPS_CCM_B0_A0_ADATA_SIZE,
- din_dma_addr,
- ccmData->dataInSize,
- dout_dma_addr,
- mac_res_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_ccm_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
-
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, ccmData->dataOut, ccmData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - size=%d \n", i, ccmData->dataInSize);
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, ccmData->macResOut, ccmData->tagSize) != 0)
- {
- FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, ccmData->tagSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)ccmData->macResOut, (size_t)virt_ctx->mac_res);
- for (i = 0; i < ccmData->tagSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, ccmData->macResOut[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
- enum drv_crypto_direction direction,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t hkey_dma_addr,
- dma_addr_t block_len_dma_addr,
- dma_addr_t iv_inc1_dma_addr,
- dma_addr_t iv_inc2_dma_addr,
- dma_addr_t adata_dma_addr,
- size_t adata_size,
- dma_addr_t din_dma_addr,
- size_t din_size,
- dma_addr_t dout_dma_addr,
- dma_addr_t mac_res_dma_addr)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_GCM_MAX_SEQ_LEN 15
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_GCM_MAX_SEQ_LEN];
- unsigned int idx = 0;
- unsigned int cipher_flow_mode;
-
- if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_and_HASH;
- } else { /* Encrypt */
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- }
-
-///////////////////////////////// 1 ////////////////////////////////////
-// ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
-///////////////////////////////// 1 ////////////////////////////////////
-
- /* load key to AES */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* process one zero block to generate hkey */
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
- set_dout_dlli(&desc[idx], hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Load GHASH subkey */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, hkey_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Configure Hash Engine to work with GHASH.
- * Since it was not possible to extend HASH submodes to add GHASH,
- * The following command is necessary in order to
- * select GHASH (according to HW designers)
- */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
-///////////////////////////////// 2 ////////////////////////////////////
- /* prcess(ghash) assoc data */
-// if (req->assoclen > 0)
-// ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
-///////////////////////////////// 2 ////////////////////////////////////
-
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, adata_dma_addr, adata_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
-///////////////////////////////// 3 ////////////////////////////////////
-// ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
-///////////////////////////////// 3 ////////////////////////////////////
-
- /* load key to AES*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load AES/CTR initial CTR value inc by 2*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_inc2_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
-///////////////////////////////// 4 ////////////////////////////////////
- /* process(gctr+ghash) */
-// if (req_ctx->cryptlen != 0)
-// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
-///////////////////////////////// 4 ////////////////////////////////////
-
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], cipher_flow_mode);
- idx++;
-
-///////////////////////////////// 5 ////////////////////////////////////
-// ssi_aead_process_gcm_result_desc(req, desc, seq_size);
-///////////////////////////////// 5 ////////////////////////////////////
-
- /* prcess(ghash) gcm_block_len */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, block_len_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load AES/CTR initial CTR value inc by 1*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_inc1_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* process GCTR on stored GHASH and store MAC inplace */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_GCM_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_gcm_ctx *virt_ctx = (struct fips_gcm_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, adata);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, key);
- dma_addr_t hkey_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, hkey);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, dout);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, mac_res);
- dma_addr_t len_block_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, len_block);
- dma_addr_t iv_inc1_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc1);
- dma_addr_t iv_inc2_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc2);
-
- for (i = 0; i < FIPS_GCM_NUM_OF_TESTS; ++i)
- {
- FipsGcmData *gcmData = (FipsGcmData *)&FipsGcmDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_gcm_ctx));
-
- /* copy the key, adata, din data - into the allocated buffer */
- memcpy(virt_ctx->key, gcmData->key, gcmData->keySize);
- memcpy(virt_ctx->adata, gcmData->adata, gcmData->adataSize);
- memcpy(virt_ctx->din, gcmData->dataIn, gcmData->dataInSize);
-
- /* len_block */
- {
- __be64 len_bits;
-
- len_bits = cpu_to_be64(gcmData->adataSize * 8);
- memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits));
- len_bits = cpu_to_be64(gcmData->dataInSize * 8);
- memcpy(virt_ctx->len_block + 8, &len_bits, sizeof(len_bits));
- }
- /* iv_inc1, iv_inc2 */
- {
- __be32 counter = cpu_to_be32(1);
-
- memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE);
- memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
- counter = cpu_to_be32(2);
- memcpy(virt_ctx->iv_inc2, gcmData->iv, NIST_AESGCM_IV_SIZE);
- memcpy(virt_ctx->iv_inc2 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
- }
-
- FIPS_DBG("ssi_gcm_fips_run_test - (i = %d) \n", i);
- rc = ssi_gcm_fips_run_test(drvdata,
- gcmData->direction,
- key_dma_addr,
- gcmData->keySize,
- hkey_dma_addr,
- len_block_dma_addr,
- iv_inc1_dma_addr,
- iv_inc2_dma_addr,
- adata_dma_addr,
- gcmData->adataSize,
- din_dma_addr,
- gcmData->dataInSize,
- dout_dma_addr,
- mac_res_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_gcm_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
-
- if (gcmData->direction == DRV_CRYPTO_DIRECTION_ENCRYPT) {
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, gcmData->dataOut, gcmData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - size=%d \n", i, gcmData->dataInSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->dataOut, (size_t)virt_ctx->dout);
- for (i = 0; i < gcmData->dataInSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->dataOut[i], virt_ctx->dout[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, gcmData->macResOut, gcmData->tagSize) != 0)
- {
- FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, gcmData->tagSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->macResOut, (size_t)virt_ctx->mac_res);
- for (i = 0; i < gcmData->tagSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->macResOut[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
- }
- return error;
-}
-
-size_t ssi_fips_max_mem_alloc_size(void)
-{
- FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx));
- FIPS_DBG("sizeof(struct fips_cmac_ctx) %d \n", sizeof(struct fips_cmac_ctx));
- FIPS_DBG("sizeof(struct fips_hash_ctx) %d \n", sizeof(struct fips_hash_ctx));
- FIPS_DBG("sizeof(struct fips_hmac_ctx) %d \n", sizeof(struct fips_hmac_ctx));
- FIPS_DBG("sizeof(struct fips_ccm_ctx) %d \n", sizeof(struct fips_ccm_ctx));
- FIPS_DBG("sizeof(struct fips_gcm_ctx) %d \n", sizeof(struct fips_gcm_ctx));
-
- return sizeof(fips_ctx);
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_local.c b/drivers/staging/ccree/ssi_fips_local.c
deleted file mode 100644
index aefb71dc9e9a..000000000000
--- a/drivers/staging/ccree/ssi_fips_local.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS internal function, used by the driver itself.
- ***************************************************************/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <crypto/des.h>
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "cc_hal.h"
-
-#define FIPS_POWER_UP_TEST_CIPHER 1
-#define FIPS_POWER_UP_TEST_CMAC 1
-#define FIPS_POWER_UP_TEST_HASH 1
-#define FIPS_POWER_UP_TEST_HMAC 1
-#define FIPS_POWER_UP_TEST_CCM 1
-#define FIPS_POWER_UP_TEST_GCM 1
-
-static bool ssi_fips_support = 1;
-module_param(ssi_fips_support, bool, 0644);
-MODULE_PARM_DESC(ssi_fips_support, "FIPS supported flag: 0 - off , 1 - on (default)");
-
-static void fips_dsr(unsigned long devarg);
-
-struct ssi_fips_handle {
-#ifdef COMP_IN_WQ
- struct workqueue_struct *workq;
- struct delayed_work fipswork;
-#else
- struct tasklet_struct fipstask;
-#endif
-};
-
-extern int ssi_fips_get_state(enum cc_fips_state_t *p_state);
-extern int ssi_fips_get_error(enum cc_fips_error *p_err);
-extern int ssi_fips_ext_set_state(enum cc_fips_state_t state);
-extern int ssi_fips_ext_set_error(enum cc_fips_error err);
-
-/* FIPS power-up tests */
-extern enum cc_fips_error ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern size_t ssi_fips_max_mem_alloc_size(void);
-
-/* The function called once at driver entry point to check whether TEE FIPS error occured.*/
-static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
-{
- u32 regVal;
- void __iomem *cc_base = drvdata->cc_base;
-
- regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- return CC_REE_FIPS_ERROR_OK;
-
- return CC_REE_FIPS_ERROR_FROM_TEE;
-}
-
-/*
- * This function should push the FIPS REE library status towards the TEE library.
- * By writing the error state to HOST_GPR0 register. The function is called from
- * driver entry point so no need to protect by mutex.
- */
-static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, enum cc_fips_error err)
-{
- void __iomem *cc_base = drvdata->cc_base;
-
- if (err == CC_REE_FIPS_ERROR_OK)
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK));
- else
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR));
-}
-
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
-{
- struct ssi_fips_handle *fips_h = drvdata->fips_handle;
-
- if (!fips_h)
- return; /* Not allocated */
-
-#ifdef COMP_IN_WQ
- if (fips_h->workq) {
- flush_workqueue(fips_h->workq);
- destroy_workqueue(fips_h->workq);
- }
-#else
- /* Kill tasklet */
- tasklet_kill(&fips_h->fipstask);
-#endif
- memset(fips_h, 0, sizeof(struct ssi_fips_handle));
- kfree(fips_h);
- drvdata->fips_handle = NULL;
-}
-
-void fips_handler(struct ssi_drvdata *drvdata)
-{
- struct ssi_fips_handle *fips_handle_ptr =
- drvdata->fips_handle;
-#ifdef COMP_IN_WQ
- queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0);
-#else
- tasklet_schedule(&fips_handle_ptr->fipstask);
-#endif
-}
-
-#ifdef COMP_IN_WQ
-static void fips_wq_handler(struct work_struct *work)
-{
- struct ssi_drvdata *drvdata =
- container_of(work, struct ssi_drvdata, fipswork.work);
-
- fips_dsr((unsigned long)drvdata);
-}
-#endif
-
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void fips_dsr(unsigned long devarg)
-{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
- u32 irq;
- u32 teeFipsError = 0;
-
- irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
-
- if (irq & SSI_GPR0_IRQ_MASK) {
- teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
- }
-
- /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
-}
-
-enum cc_fips_error cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
-{
- enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
- void *cpu_addr_buffer = NULL;
- dma_addr_t dma_handle;
- size_t alloc_buff_size = ssi_fips_max_mem_alloc_size();
- struct device *dev = &drvdata->plat_dev->dev;
-
- // allocate memory using dma_alloc_coherent - for phisical, consecutive and cache coherent buffer (memory map is not needed)
- // the return value is the virtual address - use it to copy data into the buffer
- // the dma_handle is the returned phy address - use it in the HW descriptor
- FIPS_DBG("dma_alloc_coherent \n");
- cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL);
- if (!cpu_addr_buffer)
- return CC_REE_FIPS_ERROR_GENERAL;
-
- FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size);
-
-#if FIPS_POWER_UP_TEST_CIPHER
- FIPS_DBG("ssi_cipher_fips_power_up_tests ...\n");
- fips_error = ssi_cipher_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_cipher_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
-#endif
-#if FIPS_POWER_UP_TEST_CMAC
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_cmac_fips_power_up_tests ...\n");
- fips_error = ssi_cmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_cmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_HASH
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_hash_fips_power_up_tests ...\n");
- fips_error = ssi_hash_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_hash_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_HMAC
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_hmac_fips_power_up_tests ...\n");
- fips_error = ssi_hmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_hmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_CCM
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_ccm_fips_power_up_tests ...\n");
- fips_error = ssi_ccm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_ccm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_GCM
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_gcm_fips_power_up_tests ...\n");
- fips_error = ssi_gcm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_gcm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
- /* deallocate the buffer when all tests are done... */
- FIPS_DBG("dma_free_coherent \n");
- dma_free_coherent(dev, alloc_buff_size, cpu_addr_buffer, dma_handle);
-
- return fips_error;
-}
-
-/* The function checks if FIPS supported and FIPS error exists.*
- * It should be used in every driver API.
- */
-int ssi_fips_check_fips_error(void)
-{
- enum cc_fips_state_t fips_state;
-
- if (ssi_fips_get_state(&fips_state) != 0) {
- FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n");
- return -ENOEXEC;
- }
- if (fips_state == CC_FIPS_STATE_ERROR) {
- FIPS_LOG("ssi_fips_get_state: fips_state is %d, returning.. \n", fips_state);
- return -ENOEXEC;
- }
- return 0;
-}
-
-/* The function sets the REE FIPS state.*
- * It should be used while driver is being loaded.
- */
-int ssi_fips_set_state(enum cc_fips_state_t state)
-{
- return ssi_fips_ext_set_state(state);
-}
-
-/* The function sets the REE FIPS error, and pushes the error to TEE library. *
- * It should be used when any of the KAT tests fails.
- */
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err)
-{
- int rc = 0;
- enum cc_fips_error current_err;
-
- FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
-
- // setting no error is not allowed
- if (err == CC_REE_FIPS_ERROR_OK)
- return -ENOEXEC;
-
- // If error exists, do not set new error
- if (ssi_fips_get_error(&current_err) != 0)
- return -ENOEXEC;
-
- if (current_err != CC_REE_FIPS_ERROR_OK)
- return -ENOEXEC;
-
- // set REE internal error and state
- rc = ssi_fips_ext_set_error(err);
- if (rc != 0)
- return -ENOEXEC;
-
- rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
- if (rc != 0)
- return -ENOEXEC;
-
- // push error towards TEE libraray, if it's not TEE error
- if (err != CC_REE_FIPS_ERROR_FROM_TEE)
- ssi_fips_update_tee_upon_ree_status(p_drvdata, err);
-
- return rc;
-}
-
-/* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- enum cc_fips_error rc = CC_REE_FIPS_ERROR_OK;
- struct ssi_fips_handle *fips_h;
-
- FIPS_DBG("CC FIPS code .. (fips=%d) \n", ssi_fips_support);
-
- fips_h = kzalloc(sizeof(struct ssi_fips_handle), GFP_KERNEL);
- if (!fips_h) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- return -ENOMEM;
- }
-
- p_drvdata->fips_handle = fips_h;
-
-#ifdef COMP_IN_WQ
- SSI_LOG_DEBUG("Initializing fips workqueue\n");
- fips_h->workq = create_singlethread_workqueue("arm_cc7x_fips_wq");
- if (unlikely(!fips_h->workq)) {
- SSI_LOG_ERR("Failed creating fips work queue\n");
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- rc = -ENOMEM;
- goto ssi_fips_init_err;
- }
- INIT_DELAYED_WORK(&fips_h->fipswork, fips_wq_handler);
-#else
- SSI_LOG_DEBUG("Initializing fips tasklet\n");
- tasklet_init(&fips_h->fipstask, fips_dsr, (unsigned long)p_drvdata);
-#endif
-
- /* init fips driver data */
- rc = ssi_fips_set_state((ssi_fips_support == 0) ? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED);
- if (unlikely(rc != 0)) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
-
- /* Run power up tests (before registration and operating the HW engines) */
- FIPS_DBG("ssi_fips_get_tee_error \n");
- rc = ssi_fips_get_tee_error(p_drvdata);
- if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
-
- FIPS_DBG("cc_fips_run_power_up_tests \n");
- rc = cc_fips_run_power_up_tests(p_drvdata);
- if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
- ssi_fips_set_error(p_drvdata, rc);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
- FIPS_LOG("cc_fips_run_power_up_tests - done ... fips_error = %d \n", rc);
-
- /* when all tests passed, update TEE with fips OK status after power up tests */
- ssi_fips_update_tee_upon_ree_status(p_drvdata, CC_REE_FIPS_ERROR_OK);
-
- if (unlikely(rc != 0)) {
- rc = -EAGAIN;
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- goto ssi_fips_init_err;
- }
-
- return 0;
-
-ssi_fips_init_err:
- ssi_fips_fini(p_drvdata);
- return rc;
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_local.h b/drivers/staging/ccree/ssi_fips_local.h
deleted file mode 100644
index 8c7994fe9fae..000000000000
--- a/drivers/staging/ccree/ssi_fips_local.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_FIPS_LOCAL_H__
-#define __SSI_FIPS_LOCAL_H__
-
-#ifdef CONFIG_CCX7REE_FIPS_SUPPORT
-
-#include "ssi_fips.h"
-struct ssi_drvdata;
-
-#define CHECK_AND_RETURN_UPON_FIPS_ERROR() {\
- if (ssi_fips_check_fips_error() != 0) {\
- return -ENOEXEC;\
- } \
-}
-
-#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\
- if (ssi_fips_check_fips_error() != 0) {\
- return;\
- } \
-}
-
-#define SSI_FIPS_INIT(p_drvData) (ssi_fips_init(p_drvData))
-#define SSI_FIPS_FINI(p_drvData) (ssi_fips_fini(p_drvData))
-
-#define FIPS_LOG(...) SSI_LOG(KERN_INFO, __VA_ARGS__)
-#define FIPS_DBG(...) //SSI_LOG(KERN_INFO, __VA_ARGS__)
-
-/* FIPS functions */
-int ssi_fips_init(struct ssi_drvdata *p_drvdata);
-void ssi_fips_fini(struct ssi_drvdata *drvdata);
-int ssi_fips_check_fips_error(void);
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err);
-void fips_handler(struct ssi_drvdata *drvdata);
-
-#else /* CONFIG_CC7XXREE_FIPS_SUPPORT */
-
-#define CHECK_AND_RETURN_UPON_FIPS_ERROR()
-#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR()
-
-static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- return 0;
-}
-
-static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-
-void fips_handler(struct ssi_drvdata *drvdata);
-
-#endif /* CONFIG_CC7XXREE_FIPS_SUPPORT */
-
-#endif /*__SSI_FIPS_LOCAL_H__*/
-
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index ae8f36af3837..fba0643e78fa 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -30,7 +30,6 @@
#include "ssi_sysfs.h"
#include "ssi_hash.h"
#include "ssi_sram_mgr.h"
-#include "ssi_fips_local.h"
#define SSI_MAX_AHASH_SEQ_LEN 12
#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
@@ -140,9 +139,9 @@ static int ssi_hash_map_result(struct device *dev,
return -ENOMEM;
}
SSI_LOG_DEBUG("Mapped digest result buffer %u B "
- "at va=%pK to dma=0x%llX\n",
+ "at va=%pK to dma=%pad\n",
digestsize, state->digest_result_buff,
- (unsigned long long)state->digest_result_dma_addr);
+ state->digest_result_dma_addr);
return 0;
}
@@ -204,9 +203,9 @@ static int ssi_hash_map_request(struct device *dev,
ctx->inter_digestsize, state->digest_buff);
goto fail3;
}
- SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
+ SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
- (unsigned long long)state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr);
if (is_hmac) {
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
@@ -253,9 +252,9 @@ static int ssi_hash_map_request(struct device *dev,
HASH_LEN_SIZE, state->digest_bytes_len);
goto fail4;
}
- SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
+ SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
HASH_LEN_SIZE, state->digest_bytes_len,
- (unsigned long long)state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr);
} else {
state->digest_bytes_len_dma_addr = 0;
}
@@ -267,9 +266,9 @@ static int ssi_hash_map_request(struct device *dev,
ctx->inter_digestsize, state->opad_digest_buff);
goto fail5;
}
- SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
+ SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
- (unsigned long long)state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr);
} else {
state->opad_digest_dma_addr = 0;
}
@@ -322,22 +321,22 @@ static void ssi_hash_unmap_request(struct device *dev,
if (state->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
- (unsigned long long)state->digest_buff_dma_addr);
+ SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
if (state->digest_bytes_len_dma_addr != 0) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
- (unsigned long long)state->digest_bytes_len_dma_addr);
+ SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
if (state->opad_digest_dma_addr != 0) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
- (unsigned long long)state->opad_digest_dma_addr);
+ SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
@@ -359,9 +358,9 @@ static void ssi_hash_unmap_result(struct device *dev,
digestsize,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("unmpa digest result buffer "
- "va (%pK) pa (%llx) len %u\n",
+ "va (%pK) pa (%pad) len %u\n",
state->digest_result_buff,
- (unsigned long long)state->digest_result_dma_addr,
+ state->digest_result_dma_addr,
digestsize);
memcpy(result,
state->digest_result_buff,
@@ -431,8 +430,6 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
SSI_LOG_ERR("map_ahash_source() failed\n");
return -ENOMEM;
@@ -596,13 +593,13 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
"hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (nbytes == 0) {
/* no real updates required */
return 0;
}
- if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) {
+ rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
+ if (unlikely(rc)) {
if (rc == 1) {
SSI_LOG_DEBUG(" data size not require HW update %x\n",
nbytes);
@@ -693,8 +690,6 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
@@ -829,8 +824,6 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
@@ -964,7 +957,6 @@ static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
state->xcbc_count = 0;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ssi_hash_map_request(dev, state, ctx);
return 0;
@@ -975,7 +967,7 @@ static int ssi_hash_setkey(void *hash,
unsigned int keylen,
bool synchronize)
{
- unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
struct ssi_crypto_req ssi_req = {};
struct ssi_hash_ctx *ctx = NULL;
int blocksize = 0;
@@ -986,7 +978,6 @@ static int ssi_hash_setkey(void *hash,
SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
@@ -1012,9 +1003,8 @@ static int ssi_hash_setkey(void *hash,
" DMA failed\n", key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
- "keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
+ "keylen=%u\n", ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
if (keylen > blocksize) {
@@ -1118,7 +1108,7 @@ static int ssi_hash_setkey(void *hash,
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
@@ -1157,8 +1147,8 @@ out:
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
}
return rc;
@@ -1173,15 +1163,14 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- return -EINVAL;
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
}
ctx->key_params.keylen = keylen;
@@ -1196,9 +1185,9 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
" DMA failed\n", key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
"keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
ctx->is_hmac = true;
@@ -1245,8 +1234,8 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
return rc;
@@ -1259,17 +1248,16 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx->is_hmac = true;
switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- return -EINVAL;
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
}
ctx->key_params.keylen = keylen;
@@ -1302,8 +1290,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped digest-buffer: "
- "digest_buff_dma_addr=0x%llX\n",
- (unsigned long long)ctx->digest_buff_dma_addr);
+ "digest_buff_dma_addr=%pad\n",
+ ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
if (ctx->opad_tmp_keys_dma_addr != 0) {
@@ -1311,8 +1299,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped opad-digest: "
- "opad_tmp_keys_dma_addr=0x%llX\n",
- (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+ "opad_tmp_keys_dma_addr=%pad\n",
+ ctx->opad_tmp_keys_dma_addr);
ctx->opad_tmp_keys_dma_addr = 0;
}
@@ -1331,9 +1319,9 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
+ SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
- (unsigned long long)ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
@@ -1342,9 +1330,9 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
ctx->opad_tmp_keys_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
+ SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
- (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr);
ctx->is_hmac = false;
return 0;
@@ -1364,7 +1352,6 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
struct ssi_hash_alg *ssi_alg =
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
@@ -1396,7 +1383,6 @@ static int ssi_mac_update(struct ahash_request *req)
int rc;
u32 idx = 0;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (req->nbytes == 0) {
/* no real updates required */
return 0;
@@ -1404,7 +1390,8 @@ static int ssi_mac_update(struct ahash_request *req)
state->xcbc_count++;
- if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) {
+ rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
+ if (unlikely(rc)) {
if (rc == 1) {
SSI_LOG_DEBUG(" data size not require HW update %x\n",
req->nbytes);
@@ -1454,19 +1441,19 @@ static int ssi_mac_final(struct ahash_request *req)
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc = 0;
- u32 keySize, keyLen;
+ u32 key_size, key_len;
u32 digestsize = crypto_ahash_digestsize(tfm);
u32 rem_cnt = state->buff_index ? state->buff1_cnt :
state->buff0_cnt;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
- keySize = CC_AES_128_BIT_KEY_SIZE;
- keyLen = CC_AES_128_BIT_KEY_SIZE;
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
} else {
- keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen;
- keyLen = ctx->key_params.keylen;
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
}
SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
@@ -1492,8 +1479,8 @@ static int ssi_mac_final(struct ahash_request *req)
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
set_din_type(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET), keySize, NS_BIT);
- set_key_size_aes(&desc[idx], keyLen);
+ XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1522,7 +1509,7 @@ static int ssi_mac_final(struct ahash_request *req)
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_key_size_aes(&desc[idx], keyLen);
+ set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
@@ -1569,9 +1556,8 @@ static int ssi_mac_finup(struct ahash_request *req)
u32 digestsize = crypto_ahash_digestsize(tfm);
SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (state->xcbc_count > 0 && req->nbytes == 0) {
- SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
+ SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
return ssi_mac_final(req);
}
@@ -1636,12 +1622,11 @@ static int ssi_mac_digest(struct ahash_request *req)
u32 digestsize = crypto_ahash_digestsize(tfm);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- u32 keyLen;
+ u32 key_len;
int idx = 0;
int rc;
SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
SSI_LOG_ERR("map_ahash_source() failed\n");
@@ -1662,17 +1647,17 @@ static int ssi_mac_digest(struct ahash_request *req)
ssi_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
- keyLen = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
ssi_hash_create_xcbc_setup(req, desc, &idx);
} else {
- keyLen = ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
ssi_hash_create_cmac_setup(req, desc, &idx);
}
if (req->nbytes == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_key_size_aes(&desc[idx], keyLen);
+ set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
@@ -1764,8 +1749,6 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
state->buff0_cnt;
const u32 tmp = CC_EXPORT_MAGIC;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
memcpy(out, &tmp, sizeof(u32));
out += sizeof(u32);
@@ -1805,8 +1788,6 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
u32 tmp;
int rc;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC) {
rc = -EINVAL;
@@ -2366,7 +2347,8 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
struct cc_hw_desc desc[],
- unsigned int *seq_size) {
+ unsigned int *seq_size)
+{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index 5ff3368c04d9..f140dbc5195c 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -158,7 +158,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
{
struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct device *device = &(drvdata->plat_dev->dev);
+ struct device *device = &drvdata->plat_dev->dev;
if (!ivgen_ctx)
return;
@@ -166,7 +166,8 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
if (ivgen_ctx->pool_meta) {
memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
- ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma);
+ ivgen_ctx->pool_meta,
+ ivgen_ctx->pool_meta_dma);
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 46d9396f9ff9..3f39150cda4f 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -30,8 +30,6 @@
#include "ssi_sysfs.h"
#include "ssi_ivgen.h"
#include "ssi_pm.h"
-#include "ssi_fips.h"
-#include "ssi_fips_local.h"
#define SSI_MAX_POLL_ITER 10
@@ -129,7 +127,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -177,7 +175,8 @@ static inline void enqueue_seq(
writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
#ifdef DX_DUMP_DESCS
SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
- seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]);
+ seq[i].word[0], seq[i].word[1], seq[i].word[2],
+ seq[i].word[3], seq[i].word[4], seq[i].word[5]);
#endif
}
}
@@ -211,7 +210,7 @@ static inline int request_mgr_queues_status_check(
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@@ -221,9 +220,8 @@ static inline int request_mgr_queues_status_check(
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
@@ -235,7 +233,7 @@ static inline int request_mgr_queues_status_check(
}
SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
+ req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
@@ -291,9 +289,8 @@ int send_request(
* in case iv gen add the max size and in case of no dout add 1
* for the internal completion descriptor
*/
- rc = request_mgr_queues_status_check(req_mgr_h,
- cc_base,
- max_required_seq_len);
+ rc = request_mgr_queues_status_check(req_mgr_h, cc_base,
+ max_required_seq_len);
if (likely(rc == 0))
/* There is enough place in the queue */
break;
@@ -320,21 +317,22 @@ int send_request(
if (!is_dout) {
init_completion(&ssi_req->seq_compl);
ssi_req->user_cb = request_mgr_complete;
- ssi_req->user_arg = &(ssi_req->seq_compl);
+ ssi_req->user_arg = &ssi_req->seq_compl;
total_seq_len++;
}
if (ssi_req->ivgen_dma_addr_len > 0) {
- SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- (unsigned long long)ssi_req->ivgen_dma_addr[0],
- (unsigned long long)ssi_req->ivgen_dma_addr[1],
- (unsigned long long)ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
+ SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ ssi_req->ivgen_dma_addr_len,
+ ssi_req->ivgen_dma_addr[0],
+ ssi_req->ivgen_dma_addr[1],
+ ssi_req->ivgen_dma_addr[2],
+ ssi_req->ivgen_size);
/* Acquire IV from pool */
- rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_size, iv_seq, &iv_seq_len);
+ rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
+ ssi_req->ivgen_dma_addr_len,
+ ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
@@ -418,9 +416,8 @@ int send_request_init(
enqueue_seq(cc_base, desc, len);
/* Update the free slots in HW queue */
- req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
return 0;
}
@@ -545,8 +542,7 @@ static void comp_handler(unsigned long devarg)
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
}
/*
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index e05c0c13c2eb..f11116afe89a 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -58,7 +58,7 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL);
if (!drvdata->sram_mgr_handle) {
SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n",
- sizeof(struct ssi_sram_mgr_ctx));
+ sizeof(struct ssi_sram_mgr_ctx));
rc = -ENOMEM;
goto out;
}
@@ -90,12 +90,12 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
if (unlikely((size & 0x3) != 0)) {
SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4",
- size);
+ size);
return NULL_SRAM_ADDR;
}
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
}
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
index dbcd1634aad1..0655658bba4d 100644
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -40,8 +40,7 @@ struct stat_name {
const char *stat_phase_name[MAX_STAT_PHASES];
};
-static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
-{
+static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = {
{
/* STAT_OP_TYPE_NULL */
.op_type_name = "NULL",
@@ -144,8 +143,12 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]
avg = (u64)item[i][j].sum;
do_div(avg, item[i][j].count);
SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
- stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
- item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count);
+ stat_name_db[i].op_type_name,
+ stat_name_db[i].stat_phase_name[j],
+ item[i][j].min, (int)avg,
+ item[i][j].max,
+ (long long)item[i][j].sum,
+ item[i][j].count);
}
}
}
@@ -156,21 +159,23 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]
**************************************/
static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
init_db(stat_host_db);
return count;
}
static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
init_db(stat_cc_db);
return count;
}
static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int i, j;
char line[512];
@@ -179,7 +184,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
ssize_t buf_len, tmp_len = 0;
buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
@@ -193,11 +198,11 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
avg = min_cyc = max_cyc = 0;
}
tmp_len = scnprintf(line, 512,
- "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- min_cyc, (unsigned int)avg, max_cyc,
- stat_host_db[i][j].count);
+ "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name,
+ stat_name_db[i].stat_phase_name[j],
+ min_cyc, (unsigned int)avg, max_cyc,
+ stat_host_db[i][j].count);
if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
if (buf_len + tmp_len >= PAGE_SIZE)
@@ -210,7 +215,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
}
static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int i;
char line[256];
@@ -219,7 +224,7 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
ssize_t buf_len, tmp_len = 0;
buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
@@ -231,13 +236,10 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
} else {
avg = min_cyc = max_cyc = 0;
}
- tmp_len = scnprintf(line, 256,
- "%s\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- min_cyc,
- (unsigned int)avg,
- max_cyc,
- stat_cc_db[i][STAT_PHASE_6].count);
+ tmp_len = scnprintf(line, 256, "%s\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name, min_cyc,
+ (unsigned int)avg, max_cyc,
+ stat_cc_db[i][STAT_PHASE_6].count);
if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
@@ -255,7 +257,7 @@ void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result)
unsigned long flags;
spin_lock_irqsave(&stat_lock, flags);
- update_db(&(stat_host_db[op_type][phase]), (unsigned int)result);
+ update_db(&stat_host_db[op_type][phase], (unsigned int)result);
spin_unlock_irqrestore(&stat_lock, flags);
}
@@ -264,7 +266,7 @@ void update_cc_stat(
unsigned int phase,
unsigned int elapsed_cycles)
{
- update_db(&(stat_cc_db[op_type][phase]), elapsed_cycles);
+ update_db(&stat_cc_db[op_type][phase], elapsed_cycles);
}
void display_all_stat_db(void)
@@ -277,7 +279,7 @@ void display_all_stat_db(void)
#endif /*CC_CYCLE_COUNT*/
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct ssi_drvdata *drvdata = sys_get_drvdata();
u32 register_value;
@@ -285,20 +287,20 @@ static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
int offset = 0;
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
return offset;
}
static ssize_t ssi_sys_help_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
char *help_str[] = {
"cat reg_dump ", "Print several of CC register values",
@@ -357,8 +359,8 @@ static struct ssi_drvdata *sys_get_drvdata(void)
}
static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
- struct kobject *parent_dir_kobj, const char *dir_name,
- struct kobj_attribute *attrs, u32 num_of_attrs)
+ struct kobject *parent_dir_kobj, const char *dir_name,
+ struct kobj_attribute *attrs, u32 num_of_attrs)
{
int i;
@@ -375,7 +377,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
/* allocate memory for directory's attributes list */
sys_dir->sys_dir_attr_list =
kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!(sys_dir->sys_dir_attr_list)) {
kobject_put(sys_dir->sys_dir_kobj);
@@ -386,7 +388,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
/* initialize attributes list */
for (i = 0; i < num_of_attrs; ++i)
- sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+ sys_dir->sys_dir_attr_list[i] = &attrs[i].attr;
/* last list entry should be NULL */
sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
@@ -394,7 +396,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
return sysfs_create_group(sys_dir->sys_dir_kobj,
- &(sys_dir->sys_dir_attr_group));
+ &sys_dir->sys_dir_attr_group);
}
static void sys_free_dir(struct sys_dir *sys_dir)
@@ -421,9 +423,9 @@ int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name);
/* Initialize top directory */
- retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj,
- "cc_info", ssi_sys_top_level_attrs,
- ARRAY_SIZE(ssi_sys_top_level_attrs));
+ retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
+ ssi_sys_top_level_attrs,
+ ARRAY_SIZE(ssi_sys_top_level_attrs));
return retval;
}
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 8e9b30b26810..b455ff6714eb 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -165,7 +165,7 @@ int comedi_buf_map_put(struct comedi_buf_map *bm)
int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
void *buf, int len, int write)
{
- unsigned int pgoff = offset & ~PAGE_MASK;
+ unsigned int pgoff = offset_in_page(offset);
unsigned long pg = offset >> PAGE_SHIFT;
int done = 0;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index 5312edc26f01..031179ab3a22 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -217,8 +217,6 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
case 2:
num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
break;
- default:
- break;
}
for (k = 0; k < num_cnt; k++)
*(data + i++) = dpni_stats.raw.counter[k];
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index eced2d26467b..625990400fa9 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -176,7 +176,10 @@ static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_IDLE);
} else {
- /* Check we are not in middle of irq thread already */
+ /*
+ * Check we are not in middle of irq thread
+ * already
+ */
if (arche_pdata->wake_detect_state !=
WD_STATE_COLDBOOT_START) {
arche_platform_set_wake_detect_state(arche_pdata,
@@ -657,12 +660,14 @@ static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
arche_platform_resume);
static const struct of_device_id arche_platform_of_match[] = {
- { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+ /* Use PID/VID of SVC device */
+ { .compatible = "google,arche-platform", },
{ },
};
static const struct of_device_id arche_combined_id[] = {
- { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+ /* Use PID/VID of SVC device */
+ { .compatible = "google,arche-platform", },
{ .compatible = "usbffff,2", },
{ },
};
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index 32a43693181c..d86bcce53e6b 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -528,11 +528,11 @@ static int log_results(struct loopback_test *t)
tm = *localtime(&local_time);
/*
- * file name will test_name_size_iteration_max.csv
- * every time the same test with the same parameters is run we will then
- * append to the same CSV with datestamp - representing each test
- * dataset.
- */
+ * file name will test_name_size_iteration_max.csv
+ * every time the same test with the same parameters is run we will then
+ * append to the same CSV with datestamp - representing each test
+ * dataset.
+ */
if (t->file_output && !t->porcelain) {
snprintf(file_name, sizeof(file_name), "%s_%d_%d.csv",
t->test_name, t->size, t->iteration_max);
@@ -779,7 +779,8 @@ static void prepare_devices(struct loopback_test *t)
{
int i;
- /* Cancel any running tests on enabled devices. If
+ /*
+ * Cancel any running tests on enabled devices. If
* stop_all option is given, stop test on all devices.
*/
for (i = 0; i < t->device_count; i++)
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index c9391198fbfb..83a13ca7259a 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -9,10 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 8aa12e813bd7..0f9348ba5d84 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -1356,7 +1356,7 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
/* Add mode */
iwe.cmd = SIOCGIWMODE;
- capabilities = le16_to_cpu(ap->capability);
+ capabilities = ap->capability;
if (capabilities & (BSS_CAP_ESS | BSS_CAP_IBSS)) {
if (capabilities & BSS_CAP_ESS)
iwe.u.mode = IW_MODE_INFRA;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 8ae7423b4543..f534115d402a 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -453,7 +453,8 @@ extern int portal_rotor;
int lnet_lib_init(void);
void lnet_lib_exit(void);
-int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive, unsigned long when);
+int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive,
+ unsigned long when);
void lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive,
unsigned long when);
int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 321752dfe58b..ddb808ed5d0b 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -308,9 +308,11 @@ struct lnet_rc_data {
struct lnet_peer {
struct list_head lp_hashlist; /* chain on peer hash */
struct list_head lp_txq; /* messages blocking for
- tx credits */
+ * tx credits
+ */
struct list_head lp_rtrq; /* messages blocking for
- router credits */
+ * router credits
+ */
struct list_head lp_rtr_list; /* chain on router list */
int lp_txcredits; /* # tx credits available */
int lp_mintxcredits; /* low water mark */
@@ -319,23 +321,31 @@ struct lnet_peer {
unsigned int lp_alive:1; /* alive/dead? */
unsigned int lp_notify:1; /* notification outstanding? */
unsigned int lp_notifylnd:1;/* outstanding notification
- for LND? */
+ * for LND?
+ */
unsigned int lp_notifying:1; /* some thread is handling
- notification */
+ * notification
+ */
unsigned int lp_ping_notsent;/* SEND event outstanding
- from ping */
+ * from ping
+ */
int lp_alive_count; /* # times router went
- dead<->alive */
- long lp_txqnob; /* bytes queued for sending */
+ * dead<->alive
+ */
+ long lp_txqnob; /* ytes queued for sending */
unsigned long lp_timestamp; /* time of last aliveness
- news */
+ * news
+ */
unsigned long lp_ping_timestamp;/* time of last ping
- attempt */
+ * attempt
+ */
unsigned long lp_ping_deadline; /* != 0 if ping reply
- expected */
+ * expected
+ */
unsigned long lp_last_alive; /* when I was last alive */
unsigned long lp_last_query; /* when lp_ni was queried
- last time */
+ * last time
+ */
struct lnet_ni *lp_ni; /* interface peer is on */
lnet_nid_t lp_nid; /* peer's NID */
int lp_refcount; /* # refs */
@@ -386,7 +396,8 @@ struct lnet_route {
struct lnet_remotenet {
struct list_head lrn_list; /* chain on
- ln_remote_nets_hash */
+ * ln_remote_nets_hash
+ */
struct list_head lrn_routes; /* routes to me */
__u32 lrn_net; /* my net number */
};
@@ -399,14 +410,16 @@ struct lnet_remotenet {
struct lnet_rtrbufpool {
struct list_head rbp_bufs; /* my free buffer pool */
struct list_head rbp_msgs; /* messages blocking
- for a buffer */
+ * for a buffer
+ */
int rbp_npages; /* # pages in each buffer */
/* requested number of buffers */
int rbp_req_nbuffers;
/* # buffers actually allocated */
int rbp_nbuffers;
- int rbp_credits; /* # free buffers /
- blocked messages */
+ int rbp_credits; /* # free buffers
+ * blocked messages
+ */
int rbp_mincredits; /* low water mark */
};
@@ -442,7 +455,8 @@ enum lnet_match_flags {
#define LNET_PTL_LAZY (1 << 0)
#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match,
- request portal */
+ * request portal
+ */
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index ea736f8d5231..a4f9ff01d458 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -54,7 +54,8 @@
#define LSTIO_GROUP_ADD 0xC10 /* add group */
#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */
#define LSTIO_GROUP_INFO 0xC12 /* query default information of
- * specified group */
+ * specified group
+ */
#define LSTIO_GROUP_DEL 0xC13 /* delete group */
#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */
#define LSTIO_GROUP_UPDATE 0xC15 /* update group */
@@ -102,27 +103,32 @@ struct lstcon_test_ent {
int tse_type; /* test type */
int tse_loop; /* loop count */
int tse_concur; /* concurrency of test */
-}; /*** test summary entry, for
- *** list_batch command */
+}; /* test summary entry, for
+ * list_batch command
+ */
struct lstcon_batch_ent {
int bae_state; /* batch status */
int bae_timeout; /* batch timeout */
int bae_ntest; /* # of tests in the batch */
-}; /*** batch summary entry, for
- *** list_batch command */
+}; /* batch summary entry, for
+ * list_batch command
+ */
struct lstcon_test_batch_ent {
struct lstcon_ndlist_ent tbe_cli_nle; /* client (group) node_list
- * entry */
+ * entry
+ */
struct lstcon_ndlist_ent tbe_srv_nle; /* server (group) node_list
- * entry */
+ * entry
+ */
union {
struct lstcon_test_ent tbe_test; /* test entry */
struct lstcon_batch_ent tbe_batch;/* batch entry */
} u;
-}; /*** test/batch verbose information entry,
- *** for list_batch command */
+}; /* test/batch verbose information entry,
+ * for list_batch command
+ */
struct lstcon_rpc_ent {
struct list_head rpe_link; /* link chain */
@@ -138,10 +144,10 @@ struct lstcon_rpc_ent {
};
struct lstcon_trans_stat {
- int trs_rpc_stat[4]; /* RPCs stat (0: total
- 1: failed
- 2: finished
- 4: reserved */
+ int trs_rpc_stat[4]; /* RPCs stat (0: total 1: failed
+ * 2: finished
+ * 4: reserved
+ */
int trs_rpc_errno; /* RPC errno */
int trs_fwk_stat[8]; /* framework stat */
int trs_fwk_errno; /* errno of the first remote error */
@@ -275,22 +281,28 @@ struct lstio_session_end_args {
struct lstio_debug_args {
int lstio_dbg_key; /* IN: session key */
int lstio_dbg_type; /* IN: debug
- session|batch|
- group|nodes
- list */
+ * session|batch|
+ * group|nodes list
+ */
int lstio_dbg_flags; /* IN: reserved debug
- flags */
+ * flags
+ */
int lstio_dbg_timeout; /* IN: timeout of
- debug */
+ * debug
+ */
int lstio_dbg_nmlen; /* IN: len of name */
char __user *lstio_dbg_namep; /* IN: name of
- group|batch */
+ * group|batch
+ */
int lstio_dbg_count; /* IN: # of test nodes
- to debug */
+ * to debug
+ */
struct lnet_process_id __user *lstio_dbg_idsp; /* IN: id of test
- nodes */
+ * nodes
+ */
struct list_head __user *lstio_dbg_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_group_add_args {
@@ -307,7 +319,8 @@ struct lstio_group_del_args {
#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */
#define LST_GROUP_REFRESH 2 /* refresh inactive nodes
- * in the group */
+ * in the group
+ */
#define LST_GROUP_RMND 3 /* delete nodes from the group */
struct lstio_group_update_args {
@@ -319,7 +332,8 @@ struct lstio_group_update_args {
int lstio_grp_count; /* IN: # of nodes id */
struct lnet_process_id __user *lstio_grp_idsp; /* IN: array of nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_group_nodes_args {
@@ -331,7 +345,8 @@ struct lstio_group_nodes_args {
unsigned int __user *lstio_grp_featp;
struct lnet_process_id __user *lstio_grp_idsp; /* IN: nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_group_list_args {
@@ -345,8 +360,9 @@ struct lstio_group_info_args {
int lstio_grp_key; /* IN: session key */
int lstio_grp_nmlen; /* IN: name len */
char __user *lstio_grp_namep; /* IN: name */
- struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description of
- group */
+ struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description
+ * of group
+ */
int __user *lstio_grp_idxp; /* IN/OUT: node index */
int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */
struct lstcon_node_ent __user *lstio_grp_dentsp;/* OUT: nodent array */
@@ -369,34 +385,41 @@ struct lstio_batch_del_args {
struct lstio_batch_run_args {
int lstio_bat_key; /* IN: session key */
int lstio_bat_timeout; /* IN: timeout for
- the batch */
+ * the batch
+ */
int lstio_bat_nmlen; /* IN: name length */
char __user *lstio_bat_namep; /* IN: batch name */
struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_batch_stop_args {
int lstio_bat_key; /* IN: session key */
int lstio_bat_force; /* IN: abort unfinished
- test RPC */
+ * test RPC
+ */
int lstio_bat_nmlen; /* IN: name length */
char __user *lstio_bat_namep; /* IN: batch name */
struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_batch_query_args {
int lstio_bat_key; /* IN: session key */
int lstio_bat_testidx; /* IN: test index */
int lstio_bat_client; /* IN: we testing
- client? */
+ * client?
+ */
int lstio_bat_timeout; /* IN: timeout for
- waiting */
+ * waiting
+ */
int lstio_bat_nmlen; /* IN: name length */
char __user *lstio_bat_namep; /* IN: batch name */
struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
struct lstio_batch_list_args {
@@ -411,7 +434,8 @@ struct lstio_batch_info_args {
int lstio_bat_nmlen; /* IN: name length */
char __user *lstio_bat_namep; /* IN: name */
int lstio_bat_server; /* IN: query server
- or not */
+ * or not
+ */
int lstio_bat_testidx; /* IN: test index */
struct lstcon_test_batch_ent __user *lstio_bat_entp;/* OUT: batch ent */
@@ -424,14 +448,17 @@ struct lstio_batch_info_args {
struct lstio_stat_args {
int lstio_sta_key; /* IN: session key */
int lstio_sta_timeout; /* IN: timeout for
- stat request */
+ * stat request
+ */
int lstio_sta_nmlen; /* IN: group name
- length */
+ * length
+ */
char __user *lstio_sta_namep; /* IN: group name */
int lstio_sta_count; /* IN: # of pid */
struct lnet_process_id __user *lstio_sta_idsp; /* IN: pid */
struct list_head __user *lstio_sta_resultp; /* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
enum lst_test_type {
@@ -452,26 +479,32 @@ struct lstio_test_args {
int lstio_tes_concur; /* IN: concurrency */
int lstio_tes_dist; /* IN: node distribution in
- destination groups */
+ * destination groups
+ */
int lstio_tes_span; /* IN: node span in
- destination groups */
+ * destination groups
+ */
int lstio_tes_sgrp_nmlen; /* IN: source group
- name length */
+ * name length
+ */
char __user *lstio_tes_sgrp_name; /* IN: group name */
int lstio_tes_dgrp_nmlen; /* IN: destination group
- name length */
+ * name length
+ */
char __user *lstio_tes_dgrp_name; /* IN: group name */
int lstio_tes_param_len; /* IN: param buffer len */
void __user *lstio_tes_param; /* IN: parameter for specified
- test:
- lstio_bulk_param_t,
- lstio_ping_param_t,
- ... more */
+ * test: lstio_bulk_param_t,
+ * lstio_ping_param_t,
+ * ... more
+ */
int __user *lstio_tes_retp; /* OUT: private returned
- value */
+ * value
+ */
struct list_head __user *lstio_tes_resultp;/* OUT: list head of
- result buffer */
+ * result buffer
+ */
};
enum lst_brw_type {
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index dd5bc0e46560..a1ae66ede7a8 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -76,7 +76,8 @@ struct ksock_msg {
__u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */
union {
struct ksock_lnet_msg lnetmsg; /* lnet message, it's empty if
- * it's NOOP */
+ * it's NOOP
+ */
} WIRE_ATTR ksm_u;
} WIRE_ATTR;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 5540de65f9a2..9eb169da2c2f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -519,17 +519,6 @@ extern struct ksock_proto ksocknal_protocol_v3x;
#define CPU_MASK_NONE 0UL
#endif
-static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
-{
-#if 1
- return crc32_le(crc, p, len);
-#else
- while (len-- > 0)
- crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
- return crc;
-#endif
-}
-
static inline int
ksocknal_route_mask(void)
{
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 8a036f4eb8d8..9c328dc6537b 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -201,9 +201,9 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
if (fragnob > sum)
fragnob = sum;
- conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- iov[i].iov_base,
- fragnob);
+ conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
+ iov[i].iov_base,
+ fragnob);
}
conn->ksnc_msg.ksm_csum = saved_csum;
}
@@ -243,8 +243,8 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn)
if (fragnob > sum)
fragnob = sum;
- conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- base, fragnob);
+ conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
+ base, fragnob);
kunmap(kiov[i].bv_page);
}
@@ -265,22 +265,22 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
tx->tx_msg.ksm_csum = 0;
- csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
- tx->tx_iov[0].iov_len);
+ csum = crc32_le(~0, tx->tx_iov[0].iov_base,
+ tx->tx_iov[0].iov_len);
if (tx->tx_kiov) {
for (i = 0; i < tx->tx_nkiov; i++) {
base = kmap(tx->tx_kiov[i].bv_page) +
tx->tx_kiov[i].bv_offset;
- csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
+ csum = crc32_le(csum, base, tx->tx_kiov[i].bv_len);
kunmap(tx->tx_kiov[i].bv_page);
}
} else {
for (i = 1; i < tx->tx_niov; i++)
- csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
- tx->tx_iov[i].iov_len);
+ csum = crc32_le(csum, tx->tx_iov[i].iov_base,
+ tx->tx_iov[i].iov_len);
}
if (*ksocknal_tunables.ksnd_inject_csum_error) {
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index da36c55b86d3..ae7c2772825e 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -487,10 +487,9 @@ lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
sizeof(struct list_head)))
return -EFAULT;
- if (tmp.next == head_up)
- return 0;
-
next = tmp.next;
+ if (next == head_up)
+ return 0;
ent = list_entry(next, struct lstcon_rpc_ent, rpe_link);
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index b852fed0b10f..adaa0942130f 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -348,9 +348,10 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
f_curr->fce_range.lsr_end = new_start;
fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- } else
+ } else {
CERROR("NEW range =" DRANGE " curr = " DRANGE "\n",
PRANGE(range), PRANGE(&f_curr->fce_range));
+ }
}
struct fld_cache_entry
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ddb46428093f..181025dc36c3 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1029,11 +1029,11 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
if (work_list && lock->l_completion_ast)
ldlm_add_ast_work_item(lock, NULL, work_list);
- if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
+ if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
ldlm_grant_lock_with_skiplist(lock);
- else if (res->lr_type == LDLM_EXTENT)
+ } else if (res->lr_type == LDLM_EXTENT) {
ldlm_extent_add_lock(res, lock);
- else if (res->lr_type == LDLM_FLOCK) {
+ } else if (res->lr_type == LDLM_FLOCK) {
/*
* We should not add locks to granted list in the following cases:
* - this is an UNLOCK but not a real lock;
@@ -1045,8 +1045,9 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
return;
ldlm_resource_add_lock(res, &res->lr_granted, lock);
- } else
+ } else {
LBUG();
+ }
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8e45672b4617..2b6069983ac2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -591,9 +591,10 @@ static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
sbi = f->private;
- if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
+ if (sbi->ll_site->ls_obj_hash->hs_cur_bits >
+ 64 - PGC_OBJ_SHIFT) {
pos = ERR_PTR(-EFBIG);
- else {
+ } else {
*pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
*pos);
if (*pos == ~0ULL)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 52cb1f0c9c94..b19dac15e901 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1026,7 +1026,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
/* check that server granted subset of flags we asked for. */
if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
ocd->ocd_connect_flags) {
- CERROR("%s: Server didn't granted asked subset of flags: asked=%#llx grranted=%#llx\n",
+ CERROR("%s: Server didn't grant the asked for subset of flags: asked=%#llx granted=%#llx\n",
imp->imp_obd->obd_name, imp->imp_connect_flags_orig,
ocd->ocd_connect_flags);
rc = -EPROTO;
diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
new file mode 100644
index 000000000000..004b5027a934
--- /dev/null
+++ b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
@@ -0,0 +1,53 @@
+// Definitions for Pi433
+/dts-v1/;
+/plugin/;
+
+/ {
+ compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709";
+
+ fragment@0 {
+ target = <&spi0>;
+ __overlay__ {
+ status = "okay";
+
+ spidev@0{
+ status = "disabled";
+ };
+
+ spidev@1{
+ status = "disabled";
+ };
+ };
+ };
+
+ fragment@1 {
+ target = <&gpio>;
+ __overlay__ {
+ pi433_pins: pi433_pins {
+ brcm,pins = <7 25 24>;
+ brcm,function = <0 0 0>; // in in in
+ };
+ };
+ };
+
+ fragment@2 {
+ target = <&spi0>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ pi433: pi433@0 {
+ compatible = "Smarthome-Wolf,pi433";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ status = "okay";
+
+ pinctrl-0 = <&pi433_pins>;
+ DIO0-gpio = <&gpio 24 0>;
+ DIO1-gpio = <&gpio 25 0>;
+ DIO2-gpio = <&gpio 7 0>;
+ };
+ };
+ };
+};
diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433.txt b/drivers/staging/pi433/Documentation/devicetree/pi433.txt
new file mode 100644
index 000000000000..9ff217fbcbbd
--- /dev/null
+++ b/drivers/staging/pi433/Documentation/devicetree/pi433.txt
@@ -0,0 +1,62 @@
+* Smarthome-Wolf Pi433 - a 433MHz radio module/shield for Raspberry Pi (see www.pi433.de)
+
+Required properties:
+- compatible: must be "Smarthome-Wolf,pi433"
+- reg: chip select of SPI Interface
+- DIOx-gpio must be dedicated to the GPIO, connected with DIOx of the RFM69 module
+
+
+Example:
+
+With the following lines in gpio-section, the gpio pins, connected with pi433 are
+reserved/declared.
+
+&gpio{
+ [...]
+
+ pi433_pins: pi433_pins {
+ brcm,pins = <7 25 24>;
+ brcm,function = <0 0 0>; // in in in
+ };
+
+ [...]
+}
+
+With the following lines in spi section, the device pi433 is declared.
+It consists of the three gpio pins and an spi interface (here chip select 0)
+
+&spi0{
+ [...]
+
+ pi433: pi433@0 {
+ compatible = "Smarthome-Wolf,pi433";
+ reg = <0>; /* CE 0 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spi-max-frequency = <10000000>;
+
+ pinctrl-0 = <&pi433_pins>;
+ DIO0-gpio = <&gpio 24 0>;
+ DIO1-gpio = <&gpio 25 0>;
+ DIO2-gpio = <&gpio 7 0>;
+ };
+}
+
+
+
+For Raspbian users only
+=======================
+Since Raspbian supports device tree overlays, you may use and overlay, instead
+of editing your boards device tree.
+For using the overlay, you need to compile the file pi433-overlay.dts you can
+find aside to this documentation.
+The file needs to be compiled - either manually or by integration in your kernel
+source tree. For a manual compile, you may use a command line like the following:
+'linux/scripts/dtc/dtc -@ -I dts -O dtb -o pi433.dtbo pi433-overlay.dts'
+
+For compiling inside of the kernel tree, you need to copy pi433-overlay.dts to
+arch/arm/boot/dts/overlays and you need to add the file to the list of files
+in the Makefile over there. Execute 'make dtbs' in kernel tree root to make the
+kernel make files compile the device tree overlay for you.
+
+
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
new file mode 100644
index 000000000000..38b83b86c334
--- /dev/null
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -0,0 +1,274 @@
+=====
+Pi433
+=====
+
+
+Introduction
+============
+This driver is for controlling pi433, a radio module for the Raspberry Pi
+(www.pi433.de). It supports transmission and reception. It can be opened
+by multiple applications for transmission and reception. While transmit
+jobs were queued and process automatically in the background, the first
+application asking for reception will block out all other applications
+until something gets received terminates the read request.
+The driver supports on the fly reloading of the hardware fifo of the rf
+chip, thus enabling for much longer telegrams then hardware fifo size.
+
+Discription of driver operation
+===============================
+
+a) transmission
+
+Each transmission can take place with a different configuration of the rf
+module. Therfore each application can set its own set of parameters. The driver
+takes care, that each transmission takes place with the parameterset of the
+application, that requests the transmission. To allow the transmission to take
+place in the background, a tx thread is introduced.
+The transfer of data from the main thread to the tx thread is realised by a
+kfifo. With each write request of an application, the passed in data and the
+corresponding parameter set gets written to the kfifo.
+On the other "side" of the kfifo, the tx thread continuously checks, whether the
+kfifo is empty. If not, it gets one set of config and data from the kfifo. If
+there is no receive request or the receiver is still waiting for something in
+the air, the rf module is set to standby, the parameters for transmission gets
+set, the hardware fifo of the rf chip gets preloaded and the transmission gets
+started. Upon hardware fifo threshold interrupt it gets reloaded, thus enabling
+much longer telegrams then hardware fifo size. If the telegram is send and there
+is more data available in the kfifo, the procedure is repeated. If not the
+transmission cycle ends.
+
+b) reception
+
+Since there is only one application allowed to receive data at a time, for
+reception there is only one configuration set.
+As soon as an application sets an request for receiving a telegram, the reception
+configuration set is written to the rf module and it gets set into receiving mode.
+Now the driver is waiting, that a predefined RSSI level (signal strength at the
+receiver) is reached. Until this hasn't happened, the reception can be
+interrupted by the transmission thread at any time to insert a transmission cycle.
+As soon as the predefined RSSI level is meat, a receiving cycle starts. Similar
+as described for the transmission cycle the read out of the hardware fifo is done
+dynamically. Upon each hardware fifo threshold interrupt, a portion of data gets
+read. So also for reception it is possible to receive more data then the hardware
+fifo can hold.
+
+
+Driver API
+==========
+
+The driver is currently implemented as a character device. Therefore it supports
+the calls open, ioctl, read, write and close.
+
+
+params for ioctl
+----------------
+
+There are four options:
+PI433_IOC_RD_TX_CFG - get the transmission parameters from the driver
+PI433_IOC_WR_TX_CFG - set the transmission parameters
+PI433_IOC_RD_RX_CFG - get the receiving parameters from the driver
+PI433_IOC_WR_RX_CFG - set the receiving parameters
+
+The tx configuration is transfered via struct pi433_tx_cfg, the parameterset for transmission.
+It is devided into two sections: rf parameters and packet format.
+
+rf params:
+ frequency
+ frequency used for transmission.
+ Allowed values: 433050000...434790000
+ bit_rate
+ bit rate used for transmission.
+ Allowed values: #####
+ dev_frequency
+ frequency deviation in case of FSK.
+ Allowed values: 600...500000
+ modulation
+ FSK - frequency shift key
+ OOK - On-Off-key
+ modShaping
+ shapingOff - no shaping
+ shaping1_0 - gauss filter with BT 1 (FSK only)
+ shaping0_5 - gauss filter with BT 0.5 (FSK only)
+ shaping0_3 - gauss filter with BT 0.3 (FSK only)
+ shapingBR - filter cut off at BR (OOK only)
+ shaping2BR - filter cut off at 2*BR (OOK only)
+ paRamp (FSK only)
+ ramp3400 - amp ramps up in 3.4ms
+ ramp2000 - amp ramps up in 2.0ms
+ ramp1000 - amp ramps up in 1ms
+ ramp500 - amp ramps up in 500us
+ ramp250 - amp ramps up in 250us
+ ramp125 - amp ramps up in 125us
+ ramp100 - amp ramps up in 100us
+ ramp62 - amp ramps up in 62us
+ ramp50 - amp ramps up in 50us
+ ramp40 - amp ramps up in 40us
+ ramp31 - amp ramps up in 31us
+ ramp25 - amp ramps up in 25us
+ ramp20 - amp ramps up in 20us
+ ramp15 - amp ramps up in 15us
+ ramp12 - amp ramps up in 12us
+ ramp10 - amp ramps up in 10us
+ tx_start_condition
+ fifoLevel - transmission starts, if fifo is filled to
+ threshold level
+ fifoNotEmpty - transmission starts, as soon as there is one
+ byte in internal fifo
+ repetitions
+ This gives the option, to send a telegram multiple times. Default: 1
+
+packet format:
+ enable_preamble
+ optionOn - a preamble will be automatically generated
+ optionOff - no preamble will be generated
+ enable_sync
+ optionOn - a sync word will be automatically added to
+ the telegram after preamble
+ optionOff - no sync word will be added
+ Attention: While possible to generate sync without preamble, the
+ receiver won't be able to detect the sync without preamble.
+ enable_length_byte
+ optionOn - the length of the telegram will be automatically
+ added to the telegram. It's part of the payload
+ optionOff - no length information will be automatically added
+ to the telegram.
+ Attention: For telegram length over 255 bytes, this option can't be used
+ Attention: should be used in combination with sync, only
+ enable_address_byte
+ optionOn - the address byte will be automatically added to the
+ telgram. It's part of the payload
+ optionOff - the address byte will not be added to the telegram.
+ The address byte can be used for address filtering, so the receiver
+ will only receive telegrams with a given address byte.
+ Attention: should be used in combination with sync, only
+ enable_crc
+ optionOn - an crc will be automatically calculated over the
+ payload of the telegram and added to the telegram
+ after payload.
+ optionOff - no crc will be calculated
+ preamble_length
+ length of the preamble. Allowed values: 0...65536
+ sync_length
+ length of the sync word. Allowed values: 0...8
+ fixed_message_length
+ length of the payload of the telegram. Will override the length
+ given by the buffer, passed in with the write command. Will be
+ ignored if set to zero.
+ sync_pattern[8]
+ contains up to eight values, that are used as the sync pattern
+ on sync option
+ address_byte
+ one byte, used as address byte on address byte option.
+
+
+The rx configuration is transfered via struct pi433_rx_cfg, the parameterset for receiving. It is devided into two sections: rf parameters and packet format.
+
+rf params:
+ frequency
+ frequency used for transmission.
+ Allowed values: 433050000...434790000
+ bit_rate
+ bit rate used for transmission.
+ Allowed values: #####
+ dev_frequency
+ frequency deviation in case of FSK.
+ Allowed values: 600...500000
+ modulation
+ FSK - frequency shift key
+ OOK - on off key
+ rssi_threshold
+ threshold value for the signal strength on the receiver input.
+ If this value is exeeded, a reception cycle starts
+ Allowed values: 0...255
+ thresholdDecrement
+ in order to adapt to different levels of singnal strength, over
+ time the receiver gets more and more sensitive. This value
+ determs, how fast the sensitivity increases.
+ step_0_5db - increase in 0,5dB steps
+ step_1_0db - increase in 1 db steps
+ step_1_5db - increase in 1,5dB steps
+ step_2_0db - increase in 2 db steps
+ step_3_0db - increase in 3 db steps
+ step_4_0db - increase in 4 db steps
+ step_5_0db - increase in 5 db steps
+ step_6_0db - increase in 6 db steps
+ antennaImpedance
+ sets the electrical adoption of the antenna
+ fiftyOhm - for antennas with an impedance of 50Ohm
+ twohundretOhm - for antennas with an impedance of 200Ohm
+ lnaGain
+ sets the gain of the low noise amp
+ automatic - lna gain is determed by an agc
+ max - lna gain is set to maximum
+ maxMinus6 - lna gain is set to 6db below max
+ maxMinus12 - lna gain is set to 12db below max
+ maxMinus24 - lna gain is set to 24db below max
+ maxMinus36 - lna gain is set to 36db below max
+ maxMinus48 - lna gain is set to 48db below max
+ bw_mantisse
+ sets the bandwidth of the channel filter - part one: mantisse.
+ mantisse16 - mantisse is set to 16
+ mantisse20 - mantisse is set to 20
+ mantisse24 - mantisse is set to 24
+ bw_exponent
+ sets the bandwidth of the channel filter - part two: exponent.
+ Allowd values: 0...7
+ dagc;
+ operation mode of the digital automatic gain control
+ normalMode
+ improve
+ improve4LowModulationIndex
+
+ packet format:
+ enable_sync
+ optionOn - sync detection is enabled. If configured sync pattern
+ isn't found, telegram will be internally discarded
+ optionOff - sync detection is disabled.
+ enable_length_byte
+ optionOn - First byte of payload will be used as length byte,
+ regardless of the amount of bytes that were requested
+ by the read request.
+ optionOff - Number of bytes to be read will be set according to
+ amount of bytes that were requested by the read request.
+ Attention: should be used in combination with sync, only
+ enable_address_filtering;
+ filteringOff - no adress filtering will take place
+ nodeAddress - all telegrams, not matching the node
+ address will be internally discarded
+ nodeOrBroadcastAddress - all telegrams, neither matching the
+ node, nor the broadcast address will
+ be internally discarded
+ Attention: Sync option must be enabled in order to use this feature
+ enable_crc
+ optionOn - a crc will be calculated over the payload of
+ the telegram, that was received. If the
+ calculated crc doesn't match to two bytes,
+ that follow the payload, the telegram will be
+ internally discarded.
+ Attention: This option is only operational, if sync on and fixed length
+ or length byte is used
+ sync_length
+ Gives the length of the payload.
+ Attention: This setting must meet the setting of the transmitter,
+ if sync option is used.
+ fixed_message_length
+ Overrides the telegram length either given by the first byte of
+ payload or by the read request.
+ bytes_to_drop
+ gives the number of bytes, that will be dropped before transfering
+ data to the read buffer
+ This option is only usefull, if all packet helper are switched
+ off and the rf chip is used in raw receiving mode. This may be
+ needed, if a telegram of a third party device should be received,
+ using a protocol not compatible with the packet engine of the rf69 chip.
+ sync_pattern[8]
+ contains up to eight values, that are used as the sync pattern
+ on sync option.
+ This setting must meet the configuration of the transmitting device,
+ if sync option is enabled.
+ node_address
+ one byte, used as node address byte on address byte option.
+ broadcast_address
+ one byte, used as broadcast address byte on address byte option.
+
+
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
new file mode 100644
index 000000000000..b2716b85d5af
--- /dev/null
+++ b/drivers/staging/pi433/Kconfig
@@ -0,0 +1,16 @@
+config PI433
+ tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
+ default n
+ ---help---
+ This option allows you to enable support for the radio module Pi433.
+
+ Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
+ or compatible. It extends the Raspberry Pi with the option, to
+ send and receive data in the 433MHz ISM band - for example to
+ communicate between two systems without using ethernet or bluetooth
+ or for control or read sockets, actors, sensors, widely available
+ for low price.
+
+ For details or the option to buy, please visit https://pi433.de/en.html
+
+ If in doubt, say N here, but saying yes most probably won't hurt
diff --git a/drivers/staging/pi433/Makefile b/drivers/staging/pi433/Makefile
new file mode 100644
index 000000000000..417f3e4d12b1
--- /dev/null
+++ b/drivers/staging/pi433/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_PI433) += pi433.o
+
+pi433-objs := pi433_if.o rf69.o
diff --git a/drivers/staging/pi433/TODO b/drivers/staging/pi433/TODO
new file mode 100644
index 000000000000..63a40bfcc67e
--- /dev/null
+++ b/drivers/staging/pi433/TODO
@@ -0,0 +1,5 @@
+* coding style does not fully comply with the kernel style guide.
+* still TODOs, annotated in the code
+* currently the code introduces new IOCTLs. I'm afraid this is a bad idea.
+ -> Replace this with another interface, hints are welcome!
+* Some missing data (marked with ###) needs to be added in the documentation
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
new file mode 100644
index 000000000000..d9328ce5ec1d
--- /dev/null
+++ b/drivers/staging/pi433/pi433_if.c
@@ -0,0 +1,1314 @@
+/*
+ * userspace interface for pi433 radio module
+ *
+ * Pi433 is a 433MHz radio module for the Raspberry Pi.
+ * It is based on the HopeRf Module RFM69CW. Therefore inside of this
+ * driver, you'll find an abstraction of the rf69 chip.
+ *
+ * If needed, this driver could be extended, to also support other
+ * devices, basing on HopeRfs rf69.
+ *
+ * The driver can also be extended, to support other modules of
+ * HopeRf with a similar interace - e. g. RFM69HCW, RFM12, RFM95, ...
+ *
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/spi/spi.h>
+#ifdef CONFIG_COMPAT
+#include <asm/compat.h>
+#endif
+
+#include "pi433_if.h"
+#include "rf69.h"
+
+
+#define N_PI433_MINORS (1U << MINORBITS) /*32*/ /* ... up to 256 */
+#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
+#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
+#define NUM_DIO 2
+
+static dev_t pi433_dev;
+static DEFINE_IDR(pi433_idr);
+static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
+
+static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
+
+/* tx config is instance specific
+ so with each open a new tx config struct is needed */
+/* rx config is device specific
+ so we have just one rx config, ebedded in device struct */
+struct pi433_device {
+ /* device handling related values */
+ dev_t devt;
+ int minor;
+ struct device *dev;
+ struct cdev *cdev;
+ struct spi_device *spi;
+ unsigned users;
+
+ /* irq related values */
+ struct gpio_desc *gpiod[NUM_DIO];
+ int irq_num[NUM_DIO];
+ u8 irq_state[NUM_DIO];
+
+ /* tx related values */
+ STRUCT_KFIFO_REC_1(MSG_FIFO_SIZE) tx_fifo;
+ struct mutex tx_fifo_lock; // TODO: check, whether necessary or obsolete
+ struct task_struct *tx_task_struct;
+ wait_queue_head_t tx_wait_queue;
+ u8 free_in_fifo;
+
+ /* rx related values */
+ struct pi433_rx_cfg rx_cfg;
+ u8 *rx_buffer;
+ unsigned int rx_buffer_size;
+ u32 rx_bytes_to_drop;
+ u32 rx_bytes_dropped;
+ unsigned int rx_position;
+ struct mutex rx_lock;
+ wait_queue_head_t rx_wait_queue;
+
+ /* fifo wait queue */
+ struct task_struct *fifo_task_struct;
+ wait_queue_head_t fifo_wait_queue;
+
+ /* flags */
+ bool rx_active;
+ bool tx_active;
+ bool interrupt_rx_allowed;
+};
+
+struct pi433_instance {
+ struct pi433_device *device;
+ struct pi433_tx_cfg tx_cfg;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* macro for checked access of registers of radio module */
+#define SET_CHECKED(retval) \
+ if (retval < 0) \
+ return retval;
+
+/*-------------------------------------------------------------------------*/
+
+/* GPIO interrupt handlers */
+static irq_handler_t
+DIO0_irq_handler(unsigned int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pi433_device *device = dev_id;
+
+ if (device->irq_state[DIO0] == DIO_PacketSent)
+ {
+ device->free_in_fifo = FIFO_SIZE;
+ printk("DIO0 irq: Packet sent\n"); // TODO: printk() should include KERN_ facility level
+ wake_up_interruptible(&device->fifo_wait_queue);
+ }
+ else if (device->irq_state[DIO0] == DIO_Rssi_DIO0)
+ {
+ printk("DIO0 irq: RSSI level over threshold\n");
+ wake_up_interruptible(&device->rx_wait_queue);
+ }
+ else if (device->irq_state[DIO0] == DIO_PayloadReady)
+ {
+ printk("DIO0 irq: PayloadReady\n");
+ device->free_in_fifo = 0;
+ wake_up_interruptible(&device->fifo_wait_queue);
+ }
+
+ return (irq_handler_t) IRQ_HANDLED;
+}
+
+static irq_handler_t
+DIO1_irq_handler(unsigned int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pi433_device *device = dev_id;
+
+ if (device->irq_state[DIO1] == DIO_FifoNotEmpty_DIO1)
+ {
+ device->free_in_fifo = FIFO_SIZE;
+ }
+ else if (device->irq_state[DIO1] == DIO_FifoLevel)
+ {
+ if (device->rx_active) device->free_in_fifo = FIFO_THRESHOLD - 1;
+ else device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1;
+ }
+ printk("DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo); // TODO: printk() should include KERN_ facility level
+ wake_up_interruptible(&device->fifo_wait_queue);
+
+ return (irq_handler_t) IRQ_HANDLED;
+}
+
+static void *DIO_irq_handler[NUM_DIO] = {
+ DIO0_irq_handler,
+ DIO1_irq_handler
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int
+rf69_set_rx_cfg(struct pi433_device *dev, struct pi433_rx_cfg *rx_cfg)
+{
+ int payload_length;
+
+ /* receiver config */
+ SET_CHECKED(rf69_set_frequency (dev->spi, rx_cfg->frequency));
+ SET_CHECKED(rf69_set_bit_rate (dev->spi, rx_cfg->bit_rate));
+ SET_CHECKED(rf69_set_modulation (dev->spi, rx_cfg->modulation));
+ SET_CHECKED(rf69_set_antenna_impedance (dev->spi, rx_cfg->antenna_impedance));
+ SET_CHECKED(rf69_set_rssi_threshold (dev->spi, rx_cfg->rssi_threshold));
+ SET_CHECKED(rf69_set_ook_threshold_dec (dev->spi, rx_cfg->thresholdDecrement));
+ SET_CHECKED(rf69_set_bandwidth (dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent));
+ SET_CHECKED(rf69_set_bandwidth_during_afc(dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent));
+ SET_CHECKED(rf69_set_dagc (dev->spi, rx_cfg->dagc));
+
+ dev->rx_bytes_to_drop = rx_cfg->bytes_to_drop;
+
+ /* packet config */
+ /* enable */
+ SET_CHECKED(rf69_set_sync_enable(dev->spi, rx_cfg->enable_sync));
+ if (rx_cfg->enable_sync == optionOn)
+ {
+ SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, afterSyncInterrupt));
+ }
+ else
+ {
+ SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, always));
+ }
+ SET_CHECKED(rf69_set_packet_format (dev->spi, rx_cfg->enable_length_byte));
+ SET_CHECKED(rf69_set_adressFiltering(dev->spi, rx_cfg->enable_address_filtering));
+ SET_CHECKED(rf69_set_crc_enable (dev->spi, rx_cfg->enable_crc));
+
+ /* lengths */
+ SET_CHECKED(rf69_set_sync_size(dev->spi, rx_cfg->sync_length));
+ if (rx_cfg->enable_length_byte == optionOn)
+ {
+ SET_CHECKED(rf69_set_payload_length(dev->spi, 0xff));
+ }
+ else if (rx_cfg->fixed_message_length != 0)
+ {
+ payload_length = rx_cfg->fixed_message_length;
+ if (rx_cfg->enable_length_byte == optionOn) payload_length++;
+ if (rx_cfg->enable_address_filtering != filteringOff) payload_length++;
+ SET_CHECKED(rf69_set_payload_length(dev->spi, payload_length));
+ }
+ else
+ {
+ SET_CHECKED(rf69_set_payload_length(dev->spi, 0));
+ }
+
+ /* values */
+ if (rx_cfg->enable_sync == optionOn)
+ {
+ SET_CHECKED(rf69_set_sync_values(dev->spi, rx_cfg->sync_pattern));
+ }
+ if (rx_cfg->enable_address_filtering != filteringOff)
+ {
+ SET_CHECKED(rf69_set_node_address (dev->spi, rx_cfg->node_address));
+ SET_CHECKED(rf69_set_broadcast_address(dev->spi, rx_cfg->broadcast_address));
+ }
+
+ return 0;
+}
+
+static int
+rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
+{
+ SET_CHECKED(rf69_set_frequency (dev->spi, tx_cfg->frequency));
+ SET_CHECKED(rf69_set_bit_rate (dev->spi, tx_cfg->bit_rate));
+ SET_CHECKED(rf69_set_modulation (dev->spi, tx_cfg->modulation));
+ SET_CHECKED(rf69_set_deviation (dev->spi, tx_cfg->dev_frequency));
+ SET_CHECKED(rf69_set_pa_ramp (dev->spi, tx_cfg->pa_ramp));
+ SET_CHECKED(rf69_set_modulation_shaping(dev->spi, tx_cfg->modShaping));
+ SET_CHECKED(rf69_set_tx_start_condition(dev->spi, tx_cfg->tx_start_condition));
+
+ /* packet format enable */
+ if (tx_cfg->enable_preamble == optionOn)
+ {
+ SET_CHECKED(rf69_set_preamble_length(dev->spi, tx_cfg->preamble_length));
+ }
+ else
+ {
+ SET_CHECKED(rf69_set_preamble_length(dev->spi, 0));
+ }
+ SET_CHECKED(rf69_set_sync_enable (dev->spi, tx_cfg->enable_sync));
+ SET_CHECKED(rf69_set_packet_format(dev->spi, tx_cfg->enable_length_byte));
+ SET_CHECKED(rf69_set_crc_enable (dev->spi, tx_cfg->enable_crc));
+
+ /* configure sync, if enabled */
+ if (tx_cfg->enable_sync == optionOn)
+ {
+ SET_CHECKED(rf69_set_sync_size(dev->spi, tx_cfg->sync_length));
+ SET_CHECKED(rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern));
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+pi433_start_rx(struct pi433_device *dev)
+{
+ int retval;
+
+ /* return without action, if no pending read request */
+ if (!dev->rx_active)
+ return 0;
+
+ /* setup for receiving */
+ retval = rf69_set_rx_cfg(dev, &dev->rx_cfg);
+ if (retval) return retval;
+
+ /* setup rssi irq */
+ SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO0, DIO_Rssi_DIO0));
+ dev->irq_state[DIO0] = DIO_Rssi_DIO0;
+ irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
+
+ /* setup fifo level interrupt */
+ SET_CHECKED(rf69_set_fifo_threshold(dev->spi, FIFO_SIZE - FIFO_THRESHOLD));
+ SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO1, DIO_FifoLevel));
+ dev->irq_state[DIO1] = DIO_FifoLevel;
+ irq_set_irq_type(dev->irq_num[DIO1], IRQ_TYPE_EDGE_RISING);
+
+ /* set module to receiving mode */
+ SET_CHECKED(rf69_set_mode(dev->spi, receive));
+
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+int
+pi433_receive(void *data)
+{
+ struct pi433_device *dev = data;
+ struct spi_device *spi = dev->spi; /* needed for SET_CHECKED */
+ int bytes_to_read, bytes_total;
+ int retval;
+
+ dev->interrupt_rx_allowed = false;
+
+ /* wait for any tx to finish */
+ dev_dbg(dev->dev,"rx: going to wait for any tx to finish");
+ retval = wait_event_interruptible(dev->rx_wait_queue, !dev->tx_active);
+ if(retval) /* wait was interrupted */
+ {
+ dev->interrupt_rx_allowed = true;
+ wake_up_interruptible(&dev->tx_wait_queue);
+ return retval;
+ }
+
+ /* prepare status vars */
+ dev->free_in_fifo = FIFO_SIZE;
+ dev->rx_position = 0;
+ dev->rx_bytes_dropped = 0;
+
+ /* setup radio module to listen for something "in the air" */
+ retval = pi433_start_rx(dev);
+ if (retval)
+ return retval;
+
+ /* now check RSSI, if low wait for getting high (RSSI interrupt) */
+ while ( !rf69_get_flag(dev->spi, rssiExceededThreshold) )
+ {
+ /* allow tx to interrupt us while waiting for high RSSI */
+ dev->interrupt_rx_allowed = true;
+ wake_up_interruptible(&dev->tx_wait_queue);
+
+ /* wait for RSSI level to become high */
+ dev_dbg(dev->dev, "rx: going to wait for high RSSI level");
+ retval = wait_event_interruptible(dev->rx_wait_queue,
+ rf69_get_flag(dev->spi,
+ rssiExceededThreshold));
+ if (retval) goto abort; /* wait was interrupted */
+ dev->interrupt_rx_allowed = false;
+
+ /* cross check for ongoing tx */
+ if (!dev->tx_active) break;
+ }
+
+ /* configure payload ready irq */
+ SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PayloadReady));
+ dev->irq_state[DIO0] = DIO_PayloadReady;
+ irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
+
+ /* fixed or unlimited length? */
+ if (dev->rx_cfg.fixed_message_length != 0)
+ {
+ if (dev->rx_cfg.fixed_message_length > dev->rx_buffer_size)
+ {
+ retval = -1;
+ goto abort;
+ }
+ bytes_total = dev->rx_cfg.fixed_message_length;
+ dev_dbg(dev->dev,"rx: msg len set to %d by fixed length", bytes_total);
+ }
+ else
+ {
+ bytes_total = dev->rx_buffer_size;
+ dev_dbg(dev->dev, "rx: msg len set to %d as requested by read", bytes_total);
+ }
+
+ /* length byte enabled? */
+ if (dev->rx_cfg.enable_length_byte == optionOn)
+ {
+ retval = wait_event_interruptible(dev->fifo_wait_queue,
+ dev->free_in_fifo < FIFO_SIZE);
+ if (retval) goto abort; /* wait was interrupted */
+
+ rf69_read_fifo(spi, (u8 *)&bytes_total, 1);
+ if (bytes_total > dev->rx_buffer_size)
+ {
+ retval = -1;
+ goto abort;
+ }
+ dev->free_in_fifo++;
+ dev_dbg(dev->dev, "rx: msg len reset to %d due to length byte", bytes_total);
+ }
+
+ /* address byte enabled? */
+ if (dev->rx_cfg.enable_address_filtering != filteringOff)
+ {
+ u8 dummy;
+
+ bytes_total--;
+
+ retval = wait_event_interruptible(dev->fifo_wait_queue,
+ dev->free_in_fifo < FIFO_SIZE);
+ if (retval) goto abort; /* wait was interrupted */
+
+ rf69_read_fifo(spi, &dummy, 1);
+ dev->free_in_fifo++;
+ dev_dbg(dev->dev, "rx: address byte stripped off");
+ }
+
+ /* get payload */
+ while (dev->rx_position < bytes_total)
+ {
+ if ( !rf69_get_flag(dev->spi, payloadReady) )
+ {
+ retval = wait_event_interruptible(dev->fifo_wait_queue,
+ dev->free_in_fifo < FIFO_SIZE);
+ if (retval) goto abort; /* wait was interrupted */
+ }
+
+ /* need to drop bytes or acquire? */
+ if (dev->rx_bytes_to_drop > dev->rx_bytes_dropped)
+ bytes_to_read = dev->rx_bytes_to_drop - dev->rx_bytes_dropped;
+ else
+ bytes_to_read = bytes_total - dev->rx_position;
+
+
+ /* access the fifo */
+ if (bytes_to_read > FIFO_SIZE - dev->free_in_fifo)
+ bytes_to_read = FIFO_SIZE - dev->free_in_fifo;
+ retval = rf69_read_fifo(spi,
+ &dev->rx_buffer[dev->rx_position],
+ bytes_to_read);
+ if (retval) goto abort; /* read failed */
+ dev->free_in_fifo += bytes_to_read;
+
+ /* adjust status vars */
+ if (dev->rx_bytes_to_drop > dev->rx_bytes_dropped)
+ dev->rx_bytes_dropped += bytes_to_read;
+ else
+ dev->rx_position += bytes_to_read;
+ }
+
+
+ /* rx done, wait was interrupted or error occured */
+abort:
+ dev->interrupt_rx_allowed = true;
+ SET_CHECKED(rf69_set_mode(dev->spi, standby));
+ wake_up_interruptible(&dev->tx_wait_queue);
+
+ if (retval)
+ return retval;
+ else
+ return bytes_total;
+}
+
+int
+pi433_tx_thread(void *data)
+{
+ struct pi433_device *device = data;
+ struct spi_device *spi = device->spi; /* needed for SET_CHECKED */
+ struct pi433_tx_cfg tx_cfg;
+ u8 buffer[MAX_MSG_SIZE];
+ size_t size;
+ bool rx_interrupted = false;
+ int position, repetitions;
+ int retval;
+
+ while (1)
+ {
+ /* wait for fifo to be populated or for request to terminate*/
+ dev_dbg(device->dev, "thread: going to wait for new messages");
+ wait_event_interruptible(device->tx_wait_queue,
+ ( !kfifo_is_empty(&device->tx_fifo) ||
+ kthread_should_stop() ));
+ if ( kthread_should_stop() )
+ return 0;
+
+ /* get data from fifo in the following order:
+ - tx_cfg
+ - size of message
+ - message */
+ mutex_lock(&device->tx_fifo_lock);
+
+ retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg));
+ if (retval != sizeof(tx_cfg))
+ {
+ dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg) );
+ mutex_unlock(&device->tx_fifo_lock);
+ continue;
+ }
+
+ retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t));
+ if (retval != sizeof(size_t))
+ {
+ dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t) );
+ mutex_unlock(&device->tx_fifo_lock);
+ continue;
+ }
+
+ /* use fixed message length, if requested */
+ if (tx_cfg.fixed_message_length != 0)
+ size = tx_cfg.fixed_message_length;
+
+ /* increase size, if len byte is requested */
+ if (tx_cfg.enable_length_byte == optionOn)
+ size++;
+
+ /* increase size, if adr byte is requested */
+ if (tx_cfg.enable_address_byte == optionOn)
+ size++;
+
+ /* prime buffer */
+ memset(buffer, 0, size);
+ position = 0;
+
+ /* add length byte, if requested */
+ if (tx_cfg.enable_length_byte == optionOn)
+ buffer[position++] = size-1; /* according to spec length byte itself must be excluded from the length calculation */
+
+ /* add adr byte, if requested */
+ if (tx_cfg.enable_address_byte == optionOn)
+ buffer[position++] = tx_cfg.address_byte;
+
+ /* finally get message data from fifo */
+ retval = kfifo_out(&device->tx_fifo, &buffer[position], sizeof(buffer)-position );
+ dev_dbg(device->dev, "read %d message byte(s) from fifo queue.", retval);
+ mutex_unlock(&device->tx_fifo_lock);
+
+ /* if rx is active, we need to interrupt the waiting for
+ incoming telegrams, to be able to send something.
+ We are only allowed, if currently no reception takes
+ place otherwise we need to wait for the incoming telegram
+ to finish */
+ wait_event_interruptible(device->tx_wait_queue,
+ !device->rx_active ||
+ device->interrupt_rx_allowed == true);
+
+ /* prevent race conditions
+ irq will be reenabled after tx config is set */
+ disable_irq(device->irq_num[DIO0]);
+ device->tx_active = true;
+
+ if (device->rx_active && rx_interrupted == false)
+ {
+ /* rx is currently waiting for a telegram;
+ we need to set the radio module to standby */
+ SET_CHECKED(rf69_set_mode(device->spi, standby));
+ rx_interrupted = true;
+ }
+
+ /* clear fifo, set fifo threshold, set payload length */
+ SET_CHECKED(rf69_set_mode(spi, standby)); /* this clears the fifo */
+ SET_CHECKED(rf69_set_fifo_threshold(spi, FIFO_THRESHOLD));
+ if (tx_cfg.enable_length_byte == optionOn)
+ {
+ SET_CHECKED(rf69_set_payload_length(spi, size * tx_cfg.repetitions));
+ }
+ else
+ {
+ SET_CHECKED(rf69_set_payload_length(spi, 0));
+ }
+
+ /* configure the rf chip */
+ rf69_set_tx_cfg(device, &tx_cfg);
+
+ /* enable fifo level interrupt */
+ SET_CHECKED(rf69_set_dio_mapping(spi, DIO1, DIO_FifoLevel));
+ device->irq_state[DIO1] = DIO_FifoLevel;
+ irq_set_irq_type(device->irq_num[DIO1], IRQ_TYPE_EDGE_FALLING);
+
+ /* enable packet sent interrupt */
+ SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PacketSent));
+ device->irq_state[DIO0] = DIO_PacketSent;
+ irq_set_irq_type(device->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
+ enable_irq(device->irq_num[DIO0]); /* was disabled by rx active check */
+
+ /* enable transmission */
+ SET_CHECKED(rf69_set_mode(spi, transmit));
+
+ /* transfer this msg (and repetitions) to chip fifo */
+ device->free_in_fifo = FIFO_SIZE;
+ position = 0;
+ repetitions = tx_cfg.repetitions;
+ while( (repetitions > 0) && (size > position) )
+ {
+ if ( (size - position) > device->free_in_fifo)
+ { /* msg to big for fifo - take a part */
+ int temp = device->free_in_fifo;
+ device->free_in_fifo = 0;
+ rf69_write_fifo(spi,
+ &buffer[position],
+ temp);
+ position +=temp;
+ }
+ else
+ { /* msg fits into fifo - take all */
+ device->free_in_fifo -= size;
+ repetitions--;
+ rf69_write_fifo(spi,
+ &buffer[position],
+ (size - position) );
+ position = 0; /* reset for next repetition */
+ }
+
+ retval = wait_event_interruptible(device->fifo_wait_queue,
+ device->free_in_fifo > 0);
+ if (retval) { printk("ABORT\n"); goto abort; }
+ }
+
+ /* we are done. Wait for packet to get sent */
+ dev_dbg(device->dev, "thread: wait for packet to get sent/fifo to be empty");
+ wait_event_interruptible(device->fifo_wait_queue,
+ device->free_in_fifo == FIFO_SIZE ||
+ kthread_should_stop() );
+ if ( kthread_should_stop() ) printk("ABORT\n");
+
+
+ /* STOP_TRANSMISSION */
+ dev_dbg(device->dev, "thread: Packet sent. Set mode to stby.");
+ SET_CHECKED(rf69_set_mode(spi, standby));
+
+ /* everything sent? */
+ if ( kfifo_is_empty(&device->tx_fifo) )
+ {
+abort:
+ if (rx_interrupted)
+ {
+ rx_interrupted = false;
+ pi433_start_rx(device);
+ }
+ device->tx_active = false;
+ wake_up_interruptible(&device->rx_wait_queue);
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t
+pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
+{
+ struct pi433_instance *instance;
+ struct pi433_device *device;
+ int bytes_received;
+ ssize_t retval;
+
+ /* check, whether internal buffer is big enough for requested size */
+ if (size > MAX_MSG_SIZE)
+ return -EMSGSIZE;
+
+ instance = filp->private_data;
+ device = instance->device;
+
+ /* just one read request at a time */
+ mutex_lock(&device->rx_lock);
+ if (device->rx_active)
+ {
+ mutex_unlock(&device->rx_lock);
+ return -EAGAIN;
+ }
+ else
+ {
+ device->rx_active = true;
+ mutex_unlock(&device->rx_lock);
+ }
+
+ /* start receiving */
+ /* will block until something was received*/
+ device->rx_buffer_size = size;
+ bytes_received = pi433_receive(device);
+
+ /* release rx */
+ mutex_lock(&device->rx_lock);
+ device->rx_active = false;
+ mutex_unlock(&device->rx_lock);
+
+ /* if read was successful copy to user space*/
+ if (bytes_received > 0)
+ {
+ retval = copy_to_user(buf, device->rx_buffer, bytes_received);
+ if (retval)
+ return retval;
+ }
+
+ return bytes_received;
+}
+
+
+static ssize_t
+pi433_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct pi433_instance *instance;
+ struct pi433_device *device;
+ int copied, retval;
+
+ instance = filp->private_data;
+ device = instance->device;
+
+ /* check, whether internal buffer (tx thread) is big enough for requested size */
+ if (count > MAX_MSG_SIZE)
+ return -EMSGSIZE;
+
+ /* write the following sequence into fifo:
+ - tx_cfg
+ - size of message
+ - message */
+ mutex_lock(&device->tx_fifo_lock);
+ retval = kfifo_in(&device->tx_fifo, &instance->tx_cfg, sizeof(instance->tx_cfg));
+ if ( retval != sizeof(instance->tx_cfg) )
+ goto abort;
+
+ retval = kfifo_in (&device->tx_fifo, &count, sizeof(size_t));
+ if ( retval != sizeof(size_t) )
+ goto abort;
+
+ retval = kfifo_from_user(&device->tx_fifo, buf, count, &copied);
+ if (retval || copied != count)
+ goto abort;
+
+ mutex_unlock(&device->tx_fifo_lock);
+
+ /* start transfer */
+ wake_up_interruptible(&device->tx_wait_queue);
+ dev_dbg(device->dev, "write: generated new msg with %d bytes.", copied);
+
+ return 0;
+
+abort:
+ dev_dbg(device->dev, "write to fifo failed: 0x%x", retval);
+ kfifo_reset(&device->tx_fifo); // TODO: maybe find a solution, not to discard already stored, valid entries
+ mutex_unlock(&device->tx_fifo_lock);
+ return -EAGAIN;
+}
+
+
+static long
+pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ int retval = 0;
+ struct pi433_instance *instance;
+ struct pi433_device *device;
+ u32 tmp;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != PI433_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* Check access direction once here; don't repeat below.
+ * IOC_DIR is from the user perspective, while access_ok is
+ * from the kernel perspective; so they look reversed.
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE,
+ (void __user *)arg,
+ _IOC_SIZE(cmd));
+
+ if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ,
+ (void __user *)arg,
+ _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+
+ /* TODO? guard against device removal before, or while,
+ * we issue this ioctl. --> device_get()
+ */
+ instance = filp->private_data;
+ device = instance->device;
+
+ if (device == NULL)
+ return -ESHUTDOWN;
+
+ switch (cmd) {
+ case PI433_IOC_RD_TX_CFG:
+ tmp = _IOC_SIZE(cmd);
+ if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
+ {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (__copy_to_user((void __user *)arg,
+ &instance->tx_cfg,
+ tmp))
+ {
+ retval = -EFAULT;
+ break;
+ }
+
+ break;
+ case PI433_IOC_WR_TX_CFG:
+ tmp = _IOC_SIZE(cmd);
+ if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
+ {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (__copy_from_user(&instance->tx_cfg,
+ (void __user *)arg,
+ tmp))
+ {
+ retval = -EFAULT;
+ break;
+ }
+
+ break;
+
+ case PI433_IOC_RD_RX_CFG:
+ tmp = _IOC_SIZE(cmd);
+ if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (__copy_to_user((void __user *)arg,
+ &device->rx_cfg,
+ tmp))
+ {
+ retval = -EFAULT;
+ break;
+ }
+
+ break;
+ case PI433_IOC_WR_RX_CFG:
+ tmp = _IOC_SIZE(cmd);
+ mutex_lock(&device->rx_lock);
+
+ /* during pendig read request, change of config not allowed */
+ if (device->rx_active) {
+ retval = -EAGAIN;
+ mutex_unlock(&device->rx_lock);
+ break;
+ }
+
+ if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
+ retval = -EINVAL;
+ mutex_unlock(&device->rx_lock);
+ break;
+ }
+
+ if (__copy_from_user(&device->rx_cfg,
+ (void __user *)arg,
+ tmp))
+ {
+ retval = -EFAULT;
+ mutex_unlock(&device->rx_lock);
+ break;
+ }
+
+ mutex_unlock(&device->rx_lock);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+pi433_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return pi433_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define pi433_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+/*-------------------------------------------------------------------------*/
+
+static int pi433_open(struct inode *inode, struct file *filp)
+{
+ struct pi433_device *device;
+ struct pi433_instance *instance;
+
+ mutex_lock(&minor_lock);
+ device = idr_find(&pi433_idr, iminor(inode));
+ mutex_unlock(&minor_lock);
+ if (!device) {
+ pr_debug("device: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ if (!device->rx_buffer) {
+ device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
+ if (!device->rx_buffer)
+ {
+ dev_dbg(device->dev, "open/ENOMEM\n");
+ return -ENOMEM;
+ }
+ }
+
+ device->users++;
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ {
+ kfree(device->rx_buffer);
+ device->rx_buffer = NULL;
+ return -ENOMEM;
+ }
+
+ /* setup instance data*/
+ instance->device = device;
+ instance->tx_cfg.bit_rate = 4711;
+ // TODO: fill instance->tx_cfg;
+
+ /* instance data as context */
+ filp->private_data = instance;
+ nonseekable_open(inode, filp);
+
+ return 0;
+}
+
+static int pi433_release(struct inode *inode, struct file *filp)
+{
+ struct pi433_instance *instance;
+ struct pi433_device *device;
+
+ instance = filp->private_data;
+ device = instance->device;
+ kfree(instance);
+ filp->private_data = NULL;
+
+ /* last close? */
+ device->users--;
+
+ if (!device->users) {
+ kfree(device->rx_buffer);
+ device->rx_buffer = NULL;
+ if (device->spi == NULL)
+ kfree(device);
+ }
+
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int setup_GPIOs(struct pi433_device *device)
+{
+ char name[5];
+ int retval;
+ int i;
+
+ for (i=0; i<NUM_DIO; i++)
+ {
+ /* "construct" name and get the gpio descriptor */
+ snprintf(name, sizeof(name), "DIO%d", i);
+ device->gpiod[i] = gpiod_get(&device->spi->dev, name, 0 /*GPIOD_IN*/);
+
+ if (device->gpiod[i] == ERR_PTR(-ENOENT))
+ {
+ dev_dbg(&device->spi->dev, "Could not find entry for %s. Ignoring.", name);
+ continue;
+ }
+
+ if (device->gpiod[i] == ERR_PTR(-EBUSY))
+ dev_dbg(&device->spi->dev, "%s is busy.", name);
+
+ if ( IS_ERR(device->gpiod[i]) )
+ {
+ retval = PTR_ERR(device->gpiod[i]);
+ /* release already allocated gpios */
+ for (i--; i>=0; i--)
+ {
+ free_irq(device->irq_num[i], device);
+ gpiod_put(device->gpiod[i]);
+ }
+ return retval;
+ }
+
+
+ /* configure the pin */
+ gpiod_unexport(device->gpiod[i]);
+ retval = gpiod_direction_input(device->gpiod[i]);
+ if (retval) return retval;
+
+
+ /* configure irq */
+ device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
+ if (device->irq_num[i] < 0)
+ {
+ device->gpiod[i] = ERR_PTR(-EINVAL);//(struct gpio_desc *)device->irq_num[i];
+ return device->irq_num[i];
+ }
+ retval = request_irq(device->irq_num[i],
+ DIO_irq_handler[i],
+ 0, /* flags */
+ name,
+ device);
+
+ if (retval)
+ return retval;
+
+ dev_dbg(&device->spi->dev, "%s succesfully configured", name);
+ }
+
+ return 0;
+}
+
+static void free_GPIOs(struct pi433_device *device)
+{
+ int i;
+
+ for (i=0; i<NUM_DIO; i++)
+ {
+ /* check if gpiod is valid */
+ if ( IS_ERR(device->gpiod[i]) )
+ continue;
+
+ free_irq(device->irq_num[i], device);
+ gpiod_put(device->gpiod[i]);
+ }
+ return;
+}
+
+static int pi433_get_minor(struct pi433_device *device)
+{
+ int retval = -ENOMEM;
+
+ mutex_lock(&minor_lock);
+ retval = idr_alloc(&pi433_idr, device, 0, N_PI433_MINORS, GFP_KERNEL);
+ if (retval >= 0) {
+ device->minor = retval;
+ retval = 0;
+ } else if (retval == -ENOSPC) {
+ dev_err(device->dev, "too many pi433 devices\n");
+ retval = -EINVAL;
+ }
+ mutex_unlock(&minor_lock);
+ return retval;
+}
+
+static void pi433_free_minor(struct pi433_device *dev)
+{
+ mutex_lock(&minor_lock);
+ idr_remove(&pi433_idr, dev->minor);
+ mutex_unlock(&minor_lock);
+}
+/*-------------------------------------------------------------------------*/
+
+static const struct file_operations pi433_fops = {
+ .owner = THIS_MODULE,
+ /* REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = pi433_write,
+ .read = pi433_read,
+ .unlocked_ioctl = pi433_ioctl,
+ .compat_ioctl = pi433_compat_ioctl,
+ .open = pi433_open,
+ .release = pi433_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int pi433_probe(struct spi_device *spi)
+{
+ struct pi433_device *device;
+ int retval;
+
+ /* setup spi parameters */
+ spi->mode = 0x00;
+ spi->bits_per_word = 8;
+ /* spi->max_speed_hz = 10000000; 1MHz already set by device tree overlay */
+
+ retval = spi_setup(spi);
+ if (retval)
+ {
+ dev_dbg(&spi->dev, "configuration of SPI interface failed!\n");
+ return retval;
+ }
+ else
+ {
+ dev_dbg(&spi->dev,
+ "spi interface setup: mode 0x%2x, %d bits per word, %dhz max speed",
+ spi->mode, spi->bits_per_word, spi->max_speed_hz);
+ }
+
+ /* Ping the chip by reading the version register */
+ retval = spi_w8r8(spi, 0x10);
+ if (retval < 0)
+ return retval;
+
+ switch(retval)
+ {
+ case 0x24:
+ dev_dbg(&spi->dev, "found pi433 (ver. 0x%x)", retval);
+ break;
+ default:
+ dev_dbg(&spi->dev, "unknown chip version: 0x%x", retval);
+ return -ENODEV;
+ }
+
+ /* Allocate driver data */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ device->spi = spi;
+ device->rx_active = false;
+ device->tx_active = false;
+ device->interrupt_rx_allowed = false;
+
+ /* init wait queues */
+ init_waitqueue_head(&device->tx_wait_queue);
+ init_waitqueue_head(&device->rx_wait_queue);
+ init_waitqueue_head(&device->fifo_wait_queue);
+
+ /* init fifo */
+ INIT_KFIFO(device->tx_fifo);
+
+ /* init mutexes and locks */
+ mutex_init(&device->tx_fifo_lock);
+ mutex_init(&device->rx_lock);
+
+ /* setup GPIO (including irq_handler) for the different DIOs */
+ retval = setup_GPIOs(device);
+ if (retval)
+ {
+ dev_dbg(&spi->dev, "setup of GPIOs failed");
+ goto GPIO_failed;
+ }
+
+ /* setup the radio module */
+ SET_CHECKED(rf69_set_mode (spi, standby));
+ SET_CHECKED(rf69_set_data_mode (spi, packet));
+ SET_CHECKED(rf69_set_amplifier_0 (spi, optionOn));
+ SET_CHECKED(rf69_set_amplifier_1 (spi, optionOff));
+ SET_CHECKED(rf69_set_amplifier_2 (spi, optionOff));
+ SET_CHECKED(rf69_set_output_power_level (spi, 13));
+ SET_CHECKED(rf69_set_antenna_impedance (spi, fiftyOhm));
+
+ /* start tx thread */
+ device->tx_task_struct = kthread_run(pi433_tx_thread,
+ device,
+ "pi433_tx_task");
+ if (device->tx_task_struct < 0)
+ {
+ dev_dbg(device->dev, "start of send thread failed");
+ goto send_thread_failed;
+ }
+
+ /* determ minor number */
+ retval = pi433_get_minor(device);
+ if (retval)
+ {
+ dev_dbg(device->dev, "get of minor number failed");
+ goto minor_failed;
+ }
+
+ /* create device */
+ device->devt = MKDEV(MAJOR(pi433_dev), device->minor);
+ device->dev = device_create(pi433_class,
+ &spi->dev,
+ device->devt,
+ device,
+ "pi433");
+ if (IS_ERR(device->dev)) {
+ pr_err("pi433: device register failed\n");
+ retval = PTR_ERR(device->dev);
+ goto device_create_failed;
+ }
+ else {
+ dev_dbg(device->dev,
+ "created device for major %d, minor %d\n",
+ MAJOR(pi433_dev),
+ device->minor);
+ }
+
+ /* create cdev */
+ device->cdev = cdev_alloc();
+ device->cdev->owner = THIS_MODULE;
+ cdev_init(device->cdev, &pi433_fops);
+ retval = cdev_add(device->cdev, device->devt, 1);
+ if (retval)
+ {
+ dev_dbg(device->dev, "register of cdev failed");
+ goto cdev_failed;
+ }
+
+ /* spi setup */
+ spi_set_drvdata(spi, device);
+
+ return 0;
+
+cdev_failed:
+ device_destroy(pi433_class, device->devt);
+device_create_failed:
+ pi433_free_minor(device);
+minor_failed:
+ kthread_stop(device->tx_task_struct);
+send_thread_failed:
+ free_GPIOs(device);
+GPIO_failed:
+ kfree(device);
+
+ return retval;
+}
+
+static int pi433_remove(struct spi_device *spi)
+{
+ struct pi433_device *device = spi_get_drvdata(spi);
+
+ /* free GPIOs */
+ free_GPIOs(device);
+
+ /* make sure ops on existing fds can abort cleanly */
+ device->spi = NULL;
+
+ kthread_stop(device->tx_task_struct);
+
+ device_destroy(pi433_class, device->devt);
+
+ cdev_del(device->cdev);
+
+ pi433_free_minor(device);
+
+ if (device->users == 0)
+ kfree(device);
+
+ return 0;
+}
+
+static const struct of_device_id pi433_dt_ids[] = {
+ { .compatible = "Smarthome-Wolf,pi433" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pi433_dt_ids);
+
+static struct spi_driver pi433_spi_driver = {
+ .driver = {
+ .name = "pi433",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pi433_dt_ids),
+ },
+ .probe = pi433_probe,
+ .remove = pi433_remove,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init pi433_init(void)
+{
+ int status;
+
+ /* If MAX_MSG_SIZE is smaller then FIFO_SIZE, the driver won't
+ work stable - risk of buffer overflow */
+ if (MAX_MSG_SIZE < FIFO_SIZE)
+ return -EINVAL;
+
+ /* Claim device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * Last, register the driver which manages those device numbers.
+ */
+ status = alloc_chrdev_region(&pi433_dev, 0 /*firstminor*/, N_PI433_MINORS /*count*/, "pi433" /*name*/);
+ if (status < 0)
+ return status;
+
+ pi433_class = class_create(THIS_MODULE, "pi433");
+ if (IS_ERR(pi433_class))
+ {
+ unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+ return PTR_ERR(pi433_class);
+ }
+
+ status = spi_register_driver(&pi433_spi_driver);
+ if (status < 0)
+ {
+ class_destroy(pi433_class);
+ unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+ }
+
+ return status;
+}
+
+module_init(pi433_init);
+
+static void __exit pi433_exit(void)
+{
+ spi_unregister_driver(&pi433_spi_driver);
+ class_destroy(pi433_class);
+ unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+}
+module_exit(pi433_exit);
+
+MODULE_AUTHOR("Marcus Wolf, <linux@wolf-entwicklungen.de>");
+MODULE_DESCRIPTION("Driver for Pi433");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:pi433");
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
new file mode 100644
index 000000000000..e6ed3cd9b2e2
--- /dev/null
+++ b/drivers/staging/pi433/pi433_if.h
@@ -0,0 +1,152 @@
+/*
+ * include/linux/TODO
+ *
+ * userspace interface for pi433 radio module
+ *
+ * Pi433 is a 433MHz radio module for the Raspberry Pi.
+ * It is based on the HopeRf Module RFM69CW. Therefore inside of this
+ * driver, you'll find an abstraction of the rf69 chip.
+ *
+ * If needed, this driver could be extended, to also support other
+ * devices, basing on HopeRfs rf69.
+ *
+ * The driver can also be extended, to support other modules of
+ * HopeRf with a similar interace - e. g. RFM69HCW, RFM12, RFM95, ...
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PI433_H
+#define PI433_H
+
+#include <linux/types.h>
+#include "rf69_enum.h"
+
+/*---------------------------------------------------------------------------*/
+
+
+/*---------------------------------------------------------------------------*/
+
+/* IOCTL structs and commands */
+
+/**
+ * struct pi433_tx_config - describes the configuration of the radio module for sending
+ * @frequency:
+ * @bit_rate:
+ * @modulation:
+ * @data_mode:
+ * @preamble_length:
+ * @sync_pattern:
+ * @tx_start_condition:
+ * @payload_length:
+ * @repetitions:
+ *
+ * ATTENTION:
+ * If the contents of 'pi433_tx_config' ever change
+ * incompatibly, then the ioctl number (see define below) must change.
+ *
+ * NOTE: struct layout is the same in 64bit and 32bit userspace.
+ */
+#define PI433_TX_CFG_IOCTL_NR 0
+struct pi433_tx_cfg
+{
+ __u32 frequency;
+ __u16 bit_rate;
+ __u32 dev_frequency;
+ enum modulation modulation;
+ enum modShaping modShaping;
+
+ enum paRamp pa_ramp;
+
+ enum txStartCondition tx_start_condition;
+
+ __u16 repetitions;
+
+
+ /* packet format */
+ enum optionOnOff enable_preamble;
+ enum optionOnOff enable_sync;
+ enum optionOnOff enable_length_byte;
+ enum optionOnOff enable_address_byte;
+ enum optionOnOff enable_crc;
+
+ __u16 preamble_length;
+ __u8 sync_length;
+ __u8 fixed_message_length;
+
+ __u8 sync_pattern[8];
+ __u8 address_byte;
+};
+
+
+/**
+ * struct pi433_rx_config - describes the configuration of the radio module for sending
+ * @frequency:
+ * @bit_rate:
+ * @modulation:
+ * @data_mode:
+ * @preamble_length:
+ * @sync_pattern:
+ * @tx_start_condition:
+ * @payload_length:
+ * @repetitions:
+ *
+ * ATTENTION:
+ * If the contents of 'pi433_rx_config' ever change
+ * incompatibly, then the ioctl number (see define below) must change
+ *
+ * NOTE: struct layout is the same in 64bit and 32bit userspace.
+ */
+#define PI433_RX_CFG_IOCTL_NR 1
+struct pi433_rx_cfg {
+ __u32 frequency;
+ __u16 bit_rate;
+ __u32 dev_frequency;
+
+ enum modulation modulation;
+
+ __u8 rssi_threshold;
+ enum thresholdDecrement thresholdDecrement;
+ enum antennaImpedance antenna_impedance;
+ enum lnaGain lna_gain;
+ enum mantisse bw_mantisse; /* normal: 0x50 */
+ __u8 bw_exponent; /* during AFC: 0x8b */
+ enum dagc dagc;
+
+
+
+ /* packet format */
+ enum optionOnOff enable_sync;
+ enum optionOnOff enable_length_byte; /* should be used in combination with sync, only */
+ enum addressFiltering enable_address_filtering; /* operational with sync, only */
+ enum optionOnOff enable_crc; /* only operational, if sync on and fixed length or length byte is used */
+
+ __u8 sync_length;
+ __u8 fixed_message_length;
+ __u32 bytes_to_drop;
+
+ __u8 sync_pattern[8];
+ __u8 node_address;
+ __u8 broadcast_address;
+};
+
+
+#define PI433_IOC_MAGIC 'r'
+
+#define PI433_IOC_RD_TX_CFG _IOR(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
+#define PI433_IOC_WR_TX_CFG _IOW(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
+
+#define PI433_IOC_RD_RX_CFG _IOR(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
+#define PI433_IOC_WR_RX_CFG _IOW(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
+
+#endif /* PI433_H */
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
new file mode 100644
index 000000000000..e391ce777bc7
--- /dev/null
+++ b/drivers/staging/pi433/rf69.c
@@ -0,0 +1,982 @@
+/*
+ * abstraction of the spi interface of HopeRf rf69 radio module
+ *
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* enable prosa debug info */
+#undef DEBUG
+/* enable print of values on reg access */
+#undef DEBUG_VALUES
+/* enable print of values on fifo access */
+#undef DEBUG_FIFO_ACCESS
+
+#include <linux/types.h>
+#include <linux/spi/spi.h>
+
+#include "rf69.h"
+#include "rf69_registers.h"
+
+#define F_OSC 32000000 /* in Hz */
+#define FIFO_SIZE 66 /* in byte */
+
+/*-------------------------------------------------------------------------*/
+
+#define READ_REG(x) rf69_read_reg (spi, x)
+#define WRITE_REG(x,y) rf69_write_reg(spi, x, y)
+#define INVALID_PARAM \
+ { \
+ dev_dbg(&spi->dev, "set: illegal input param"); \
+ return -EINVAL; \
+ }
+
+/*-------------------------------------------------------------------------*/
+
+int rf69_set_mode(struct spi_device *spi, enum mode mode)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: mode");
+ #endif
+
+ switch (mode){
+ case transmit: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_TRANSMIT);
+ case receive: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_RECEIVE);
+ case synthesizer: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SYNTHESIZER);
+ case standby: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_STANDBY);
+ case mode_sleep: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SLEEP);
+ default: INVALID_PARAM;
+ }
+
+ // we are using packet mode, so this check is not really needed
+ // but waiting for mode ready is necessary when going from sleep because the FIFO may not be immediately available from previous mode
+ //while (_mode == RF69_MODE_SLEEP && (READ_REG(REG_IRQFLAGS1) & RF_IRQFLAGS1_MODEREADY) == 0x00); // Wait for ModeReady
+
+}
+
+int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: data mode");
+ #endif
+
+ switch (dataMode) {
+ case packet: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_PACKET);
+ case continuous: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS);
+ case continuousNoSync: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS_NOSYNC);
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: modulation");
+ #endif
+
+ switch (modulation) {
+ case OOK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_OOK);
+ case FSK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_FSK);
+ default: INVALID_PARAM;
+ }
+}
+
+enum modulation rf69_get_modulation(struct spi_device *spi)
+{
+ u8 currentValue;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "get: mode");
+ #endif
+
+ currentValue = READ_REG(REG_DATAMODUL);
+
+ switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) // TODO improvement: change 3 to define
+ {
+ case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
+ case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
+ default: return undefined;
+ }
+}
+
+int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: mod shaping");
+ #endif
+
+ if (rf69_get_modulation(spi) == FSK)
+ {
+ switch (modShaping) {
+ case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE);
+ case shaping1_0: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_1_0);
+ case shaping0_5: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_3);
+ case shaping0_3: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_5);
+ default: INVALID_PARAM;
+ }
+ }
+ else
+ {
+ switch (modShaping) {
+ case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE);
+ case shapingBR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_BR);
+ case shaping2BR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_2BR);
+ default: INVALID_PARAM;
+ }
+ }
+}
+
+int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
+{
+ int retval;
+ u32 bitRate_min;
+ u32 bitRate_reg;
+ u8 msb;
+ u8 lsb;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: bit rate");
+ #endif
+
+ // check input value
+ bitRate_min = F_OSC / 8388608; // 8388608 = 2^23;
+ if (bitRate < bitRate_min)
+ {
+ dev_dbg(&spi->dev, "setBitRate: illegal input param");
+ INVALID_PARAM;
+ }
+
+ // calculate reg settings
+ bitRate_reg = (F_OSC / bitRate);
+
+ msb = (bitRate_reg&0xff00) >> 8;
+ lsb = (bitRate_reg&0xff);
+
+ // transmit to RF 69
+ retval = WRITE_REG(REG_BITRATE_MSB, msb);
+ if (retval) return retval;
+ retval = WRITE_REG(REG_BITRATE_LSB, lsb);
+ if (retval) return retval;
+
+ return 0;
+}
+
+int rf69_set_deviation(struct spi_device *spi, u32 deviation)
+{
+ int retval;
+// u32 f_max; TODO: Abhängigkeit von Bitrate beachten!!
+ u64 f_reg;
+ u64 f_step;
+ u8 msb;
+ u8 lsb;
+ u64 factor = 1000000; // to improve precision of calculation
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: deviation");
+ #endif
+
+ if (deviation < 600 || deviation > 500000) //TODO: Abhängigkeit von Bitrate beachten!!
+ {
+ dev_dbg(&spi->dev, "set_deviation: illegal input param");
+ INVALID_PARAM;
+ }
+
+ // calculat f step
+ f_step = F_OSC * factor;
+ do_div(f_step, 524288); // 524288 = 2^19
+
+ // calculate register settings
+ f_reg = deviation * factor;
+ do_div(f_reg , f_step);
+
+ msb = (f_reg&0xff00) >> 8;
+ lsb = (f_reg&0xff);
+
+ // check msb
+ if (msb & !FDEVMASB_MASK)
+ {
+ dev_dbg(&spi->dev, "set_deviation: err in calc of msb");
+ INVALID_PARAM;
+ }
+
+ // write to chip
+ retval = WRITE_REG(REG_FDEV_MSB, msb);
+ if (retval) return retval;
+ retval = WRITE_REG(REG_FDEV_LSB, lsb);
+ if (retval) return retval;
+
+ return 0;
+}
+
+int rf69_set_frequency(struct spi_device *spi, u32 frequency)
+{
+ int retval;
+ u32 f_max;
+ u64 f_reg;
+ u64 f_step;
+ u8 msb;
+ u8 mid;
+ u8 lsb;
+ u64 factor = 1000000; // to improve precision of calculation
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: frequency");
+ #endif
+
+ // calculat f step
+ f_step = F_OSC * factor;
+ do_div(f_step, 524288); // 524288 = 2^19
+
+ // check input value
+ f_max = f_step * 8388608 / factor;
+ if (frequency > f_max)
+ {
+ dev_dbg(&spi->dev, "setFrequency: illegal input param");
+ INVALID_PARAM;
+ }
+
+ // calculate reg settings
+ f_reg = frequency * factor;
+ do_div(f_reg , f_step);
+
+ msb = (f_reg&0xff0000) >> 16;
+ mid = (f_reg&0xff00) >> 8;
+ lsb = (f_reg&0xff);
+
+ // write to chip
+ retval = WRITE_REG(REG_FRF_MSB, msb);
+ if (retval) return retval;
+ retval = WRITE_REG(REG_FRF_MID, mid);
+ if (retval) return retval;
+ retval = WRITE_REG(REG_FRF_LSB, lsb);
+ if (retval) return retval;
+
+ return 0;
+}
+
+int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: amp #0");
+ #endif
+
+ switch(optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0) );
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: amp #1");
+ #endif
+
+ switch(optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1) );
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: amp #2");
+ #endif
+
+ switch(optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2) );
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: power level");
+ #endif
+
+ powerLevel +=18; // TODO Abhängigkeit von PA0,1,2 setting
+
+ // check input value
+ if (powerLevel > 0x1f)
+ INVALID_PARAM;
+
+ // write value
+ return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_OUTPUT_POWER) | powerLevel);
+}
+
+int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: pa ramp");
+ #endif
+
+ switch(paRamp) {
+ case ramp3400: return WRITE_REG(REG_PARAMP, PARAMP_3400);
+ case ramp2000: return WRITE_REG(REG_PARAMP, PARAMP_2000);
+ case ramp1000: return WRITE_REG(REG_PARAMP, PARAMP_1000);
+ case ramp500: return WRITE_REG(REG_PARAMP, PARAMP_500);
+ case ramp250: return WRITE_REG(REG_PARAMP, PARAMP_250);
+ case ramp125: return WRITE_REG(REG_PARAMP, PARAMP_125);
+ case ramp100: return WRITE_REG(REG_PARAMP, PARAMP_100);
+ case ramp62: return WRITE_REG(REG_PARAMP, PARAMP_62);
+ case ramp50: return WRITE_REG(REG_PARAMP, PARAMP_50);
+ case ramp40: return WRITE_REG(REG_PARAMP, PARAMP_40);
+ case ramp31: return WRITE_REG(REG_PARAMP, PARAMP_31);
+ case ramp25: return WRITE_REG(REG_PARAMP, PARAMP_25);
+ case ramp20: return WRITE_REG(REG_PARAMP, PARAMP_20);
+ case ramp15: return WRITE_REG(REG_PARAMP, PARAMP_15);
+ case ramp12: return WRITE_REG(REG_PARAMP, PARAMP_12);
+ case ramp10: return WRITE_REG(REG_PARAMP, PARAMP_10);
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: antenna impedance");
+ #endif
+
+ switch(antennaImpedance) {
+ case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN) );
+ case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: lna gain");
+ #endif
+
+ switch(lnaGain) {
+ case automatic: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO) );
+ case max: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX) );
+ case maxMinus6: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6) );
+ case maxMinus12: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12) );
+ case maxMinus24: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24) );
+ case maxMinus36: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36) );
+ case maxMinus48: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48) );
+ default: INVALID_PARAM;
+ }
+}
+
+enum lnaGain rf69_get_lna_gain(struct spi_device *spi)
+{
+ u8 currentValue;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "get: lna gain");
+ #endif
+
+ currentValue = READ_REG(REG_LNA);
+
+ switch (currentValue & MASK_LNA_CURRENT_GAIN >> 3) // improvement: change 3 to define
+ {
+ case LNA_GAIN_AUTO: return automatic;
+ case LNA_GAIN_MAX: return max;
+ case LNA_GAIN_MAX_MINUS_6: return maxMinus6;
+ case LNA_GAIN_MAX_MINUS_12: return maxMinus12;
+ case LNA_GAIN_MAX_MINUS_24: return maxMinus24;
+ case LNA_GAIN_MAX_MINUS_36: return maxMinus36;
+ case LNA_GAIN_MAX_MINUS_48: return maxMinus48;
+ default: return undefined;
+ }
+}
+
+int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi ,u8 reg, enum dccPercent dccPercent)
+{
+ switch (dccPercent) {
+ case dcc16Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT) );
+ case dcc8Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT) );
+ case dcc4Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT) );
+ case dcc2Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT) );
+ case dcc1Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT) );
+ case dcc0_5Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT) );
+ case dcc0_25Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT) );
+ case dcc0_125Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: cut off freq");
+ #endif
+
+ return rf69_set_dc_cut_off_frequency_intern(spi, REG_RXBW, dccPercent);
+}
+
+int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: cut off freq during afc");
+ #endif
+
+ return rf69_set_dc_cut_off_frequency_intern(spi, REG_AFCBW, dccPercent);
+}
+
+int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg, enum mantisse mantisse, u8 exponent)
+{
+ u8 newValue;
+
+ // check value for mantisse and exponent
+ if (exponent > 7) INVALID_PARAM;
+ if ( (mantisse!=mantisse16) &&
+ (mantisse!=mantisse20) &&
+ (mantisse!=mantisse24) ) INVALID_PARAM;
+
+ // read old value
+ newValue = READ_REG(reg);
+
+ // "delete" mantisse and exponent = just keep the DCC setting
+ newValue = newValue & MASK_BW_DCC_FREQ;
+
+ // add new mantisse
+ switch(mantisse) {
+ case mantisse16: newValue = newValue | BW_MANT_16; break;
+ case mantisse20: newValue = newValue | BW_MANT_20; break;
+ case mantisse24: newValue = newValue | BW_MANT_24; break;
+ }
+
+ // add new exponent
+ newValue = newValue | exponent;
+
+ // write back
+ return WRITE_REG(reg, newValue);
+}
+
+int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: band width");
+ #endif
+
+ return rf69_set_bandwidth_intern(spi, REG_RXBW, mantisse, exponent);
+}
+
+int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: band width during afc");
+ #endif
+
+ return rf69_set_bandwidth_intern(spi, REG_AFCBW, mantisse, exponent);
+}
+
+int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: threshold type");
+ #endif
+
+ switch (thresholdType)
+ {
+ case fixed: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED) );
+ case peak: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK) );
+ case average: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: threshold step");
+ #endif
+
+ switch (thresholdStep) {
+ case step_0_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB) );
+ case step_1_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB) );
+ case step_1_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB) );
+ case step_2_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB) );
+ case step_3_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB) );
+ case step_4_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB) );
+ case step_5_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB) );
+ case step_6_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: threshold decrement");
+ #endif
+
+ switch (thresholdDecrement) {
+ case dec_every8th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH) );
+ case dec_every4th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH) );
+ case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND) );
+ case dec_once: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE) );
+ case dec_twice: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE) );
+ case dec_4times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES) );
+ case dec_8times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES) );
+ case dec_16times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
+{
+ u8 mask;
+ u8 shift;
+ u8 regaddr;
+ u8 regValue;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: DIO mapping");
+ #endif
+
+ // check DIO number
+ if (DIONumber > 5) INVALID_PARAM;
+
+ switch (DIONumber) {
+ case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
+ case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
+ case 2: mask=MASK_DIO2; shift=SHIFT_DIO2; regaddr=REG_DIOMAPPING1; break;
+ case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
+ case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
+ case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
+ }
+
+ // read reg
+ regValue=READ_REG(regaddr);
+ // delete old value
+ regValue = regValue & ~mask;
+ // add new value
+ regValue = regValue | value << shift;
+ // write back
+ return WRITE_REG(regaddr,regValue);
+}
+
+bool rf69_get_flag(struct spi_device *spi, enum flag flag)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "get: flag");
+ #endif
+
+ switch(flag) {
+ case modeSwitchCompleted: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY);
+ case readyToReceive: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY);
+ case readyToSend: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY);
+ case pllLocked: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_PLL_LOCK);
+ case rssiExceededThreshold: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RSSI);
+ case timeout: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TIMEOUT);
+ case automode: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_AUTOMODE);
+ case syncAddressMatch: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
+ case fifoFull: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_FULL);
+/* case fifoNotEmpty: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); */
+ case fifoEmpty: return !(READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY);
+ case fifoLevelBelowThreshold: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_LEVEL);
+ case fifoOverrun: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_OVERRUN);
+ case packetSent: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PACKET_SENT);
+ case payloadReady: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PAYLOAD_READY);
+ case crcOk: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_CRC_OK);
+ case batteryLow: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_LOW_BAT);
+ default: return false;
+ }
+}
+
+int rf69_reset_flag(struct spi_device *spi, enum flag flag)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "reset: flag");
+ #endif
+
+ switch(flag) {
+ case rssiExceededThreshold: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_RSSI);
+ case syncAddressMatch: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
+ case fifoOverrun: return WRITE_REG(REG_IRQFLAGS2, MASK_IRQFLAGS2_FIFO_OVERRUN);
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: rssi threshold");
+ #endif
+
+ /* no value check needed - u8 exactly matches register size */
+
+ return WRITE_REG(REG_RSSITHRESH, threshold);
+}
+
+int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: start timeout");
+ #endif
+
+ /* no value check needed - u8 exactly matches register size */
+
+ return WRITE_REG(REG_RXTIMEOUT1, timeout);
+}
+
+int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: rssi timeout");
+ #endif
+
+ /* no value check needed - u8 exactly matches register size */
+
+ return WRITE_REG(REG_RXTIMEOUT2, timeout);
+}
+
+int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength)
+{
+ int retval;
+ u8 msb, lsb;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: preample length");
+ #endif
+
+ /* no value check needed - u16 exactly matches register size */
+
+ /* calculate reg settings */
+ msb = (preambleLength&0xff00) >> 8;
+ lsb = (preambleLength&0xff);
+
+ /* transmit to chip */
+ retval = WRITE_REG(REG_PREAMBLE_MSB, msb);
+ if (retval) return retval;
+ retval = WRITE_REG(REG_PREAMBLE_LSB, lsb);
+
+ return retval;
+}
+
+int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: sync enable");
+ #endif
+
+ switch(optionOnOff) {
+ case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON) );
+ case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: fifo fill condition");
+ #endif
+
+ switch(fifoFillCondition) {
+ case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
+ case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_sync_size(struct spi_device *spi, u8 syncSize)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: sync size");
+ #endif
+
+ // check input value
+ if (syncSize > 0x07)
+ INVALID_PARAM;
+
+ // write value
+ return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3) );
+}
+
+int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: sync tolerance");
+ #endif
+
+ // check input value
+ if (syncTolerance > 0x07)
+ INVALID_PARAM;
+
+ // write value
+ return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | syncTolerance);
+}
+
+int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8])
+{
+ int retval = 0;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: sync values");
+ #endif
+
+ retval += WRITE_REG(REG_SYNCVALUE1, syncValues[0]);
+ retval += WRITE_REG(REG_SYNCVALUE2, syncValues[1]);
+ retval += WRITE_REG(REG_SYNCVALUE3, syncValues[2]);
+ retval += WRITE_REG(REG_SYNCVALUE4, syncValues[3]);
+ retval += WRITE_REG(REG_SYNCVALUE5, syncValues[4]);
+ retval += WRITE_REG(REG_SYNCVALUE6, syncValues[5]);
+ retval += WRITE_REG(REG_SYNCVALUE7, syncValues[6]);
+ retval += WRITE_REG(REG_SYNCVALUE8, syncValues[7]);
+
+ return retval;
+}
+
+int rf69_set_packet_format(struct spi_device * spi, enum packetFormat packetFormat)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: packet format");
+ #endif
+
+ switch(packetFormat) {
+ case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
+ case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: crc enable");
+ #endif
+
+ switch(optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON) );
+ case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: address filtering");
+ #endif
+
+ switch (addressFiltering) {
+ case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF) );
+ case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE) );
+ case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: payload length");
+ #endif
+
+ return WRITE_REG(REG_PAYLOAD_LENGTH, payloadLength);
+}
+
+u8 rf69_get_payload_length(struct spi_device *spi)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "get: payload length");
+ #endif
+
+ return (u8) READ_REG(REG_PAYLOAD_LENGTH);
+}
+
+int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: node address");
+ #endif
+
+ return WRITE_REG(REG_NODEADRS, nodeAddress);
+}
+
+int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: broadcast address");
+ #endif
+
+ return WRITE_REG(REG_BROADCASTADRS, broadcastAddress);
+}
+
+int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: start condition");
+ #endif
+
+ switch(txStartCondition) {
+ case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART) );
+ case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART) );
+ default: INVALID_PARAM;
+ }
+}
+
+int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
+{
+ int retval;
+
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: fifo threshold");
+ #endif
+
+ // check input value
+ if (threshold & 0x80)
+ INVALID_PARAM;
+
+ // write value
+ retval = WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_VALUE) | threshold);
+ if (retval)
+ return retval;
+
+ // access the fifo to activate new threshold
+ return rf69_read_fifo (spi, (u8*) &retval, 1); // retval used as buffer
+}
+
+int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
+{
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "set: dagc");
+ #endif
+
+ switch(dagc) {
+ case normalMode: return WRITE_REG(REG_TESTDAGC, DAGC_NORMAL);
+ case improve: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
+ case improve4LowModulationIndex: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
+ default: INVALID_PARAM;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size)
+{
+ #ifdef DEBUG_FIFO_ACCESS
+ int i;
+ #endif
+ struct spi_transfer transfer;
+ u8 local_buffer[FIFO_SIZE + 1];
+ int retval;
+
+ if (size > FIFO_SIZE)
+ {
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n");
+ #endif
+ return -EMSGSIZE;
+ }
+
+ /* prepare a bidirectional transfer */
+ local_buffer[0] = REG_FIFO;
+ memset(&transfer, 0, sizeof(transfer));
+ transfer.tx_buf = local_buffer;
+ transfer.rx_buf = local_buffer;
+ transfer.len = size+1;
+
+ retval = spi_sync_transfer(spi, &transfer, 1);
+
+ #ifdef DEBUG_FIFO_ACCESS
+ for (i=0; i<size; i++)
+ dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i+1]);
+ #endif
+
+ memcpy(buffer, &local_buffer[1], size); // TODO: ohne memcopy wäre schöner
+
+ return retval;
+}
+
+int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
+{
+ #ifdef DEBUG_FIFO_ACCESS
+ int i;
+ #endif
+ char spi_address = REG_FIFO | WRITE_BIT;
+ u8 local_buffer[FIFO_SIZE + 1];
+
+ if (size > FIFO_SIZE)
+ {
+ #ifdef DEBUG
+ dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n");
+ #endif
+ return -EMSGSIZE;
+ }
+
+ local_buffer[0] = spi_address;
+ memcpy(&local_buffer[1], buffer, size); // TODO: ohne memcopy wäre schöner
+
+ #ifdef DEBUG_FIFO_ACCESS
+ for (i=0; i<size; i++)
+ dev_dbg(&spi->dev, "0x%x\n",buffer[i]);
+ #endif
+
+ return spi_write (spi, local_buffer, size + 1);
+}
+
+/*-------------------------------------------------------------------------*/
+
+u8 rf69_read_reg(struct spi_device *spi, u8 addr)
+{
+ int retval;
+
+ retval = spi_w8r8(spi, addr);
+
+ #ifdef DEBUG_VALUES
+ if (retval < 0)
+ /* should never happen, since we already checked,
+ that module is connected. Therefore no error
+ handling, just an optional error message... */
+ dev_dbg(&spi->dev, "read 0x%x FAILED\n",
+ addr);
+ else
+ dev_dbg(&spi->dev, "read 0x%x from reg 0x%x\n",
+ retval,
+ addr);
+ #endif
+
+ return retval;
+}
+
+int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value)
+{
+ int retval;
+ char buffer[2];
+
+ buffer[0] = addr | WRITE_BIT;
+ buffer[1] = value;
+
+ retval = spi_write(spi, &buffer, 2);
+
+ #ifdef DEBUG_VALUES
+ if (retval < 0)
+ /* should never happen, since we already checked,
+ that module is connected. Therefore no error
+ handling, just an optional error message... */
+ dev_dbg(&spi->dev, "write 0x%x to 0x%x FAILED\n",
+ value,
+ addr);
+ else
+ dev_dbg(&spi->dev, "wrote 0x%x to reg 0x%x\n",
+ value,
+ addr);
+ #endif
+
+ return retval;
+}
+
+
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
new file mode 100644
index 000000000000..b81e0762032e
--- /dev/null
+++ b/drivers/staging/pi433/rf69.h
@@ -0,0 +1,82 @@
+/*
+ * hardware abstraction/register access for HopeRf rf69 radio module
+ *
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RF69_H
+#define RF69_H
+
+#include "rf69_enum.h"
+#include "rf69_registers.h"
+
+#define F_OSC 32000000 /* in Hz */
+#define FREQUENCY 433920000 /* in Hz, modifying this value impacts CE certification */
+#define FIFO_SIZE 66 /* in byte */
+#define FIFO_THRESHOLD 15 /* in byte */
+
+int rf69_set_mode(struct spi_device *spi, enum mode mode);
+int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode);
+int rf69_set_modulation(struct spi_device *spi, enum modulation modulation);
+enum modulation rf69_get_modulation(struct spi_device *spi);
+int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping);
+int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate);
+int rf69_set_deviation(struct spi_device *spi, u32 deviation);
+int rf69_set_frequency(struct spi_device *spi, u32 frequency);
+int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel);
+int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp);
+int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance);
+int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain);
+enum lnaGain rf69_get_lna_gain(struct spi_device *spi);
+int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent);
+int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent);
+int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent);
+int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
+int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
+int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType);
+int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep);
+int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement);
+int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value);
+bool rf69_get_flag(struct spi_device *spi, enum flag flag);
+int rf69_reset_flag(struct spi_device *spi, enum flag flag);
+int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold);
+int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout);
+int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout);
+int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength);
+int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition);
+int rf69_set_sync_size(struct spi_device *spi, u8 sync_size);
+int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance);
+int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8]);
+int rf69_set_packet_format(struct spi_device * spi, enum packetFormat packetFormat);
+int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering);
+int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength);
+u8 rf69_get_payload_length(struct spi_device *spi);
+int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress);
+int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress);
+int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition);
+int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold);
+int rf69_set_dagc(struct spi_device *spi, enum dagc dagc);
+
+int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size);
+int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size);
+
+u8 rf69_read_reg (struct spi_device *spi, u8 addr);
+int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value);
+
+
+#endif
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
new file mode 100644
index 000000000000..fbfb59bd3f3d
--- /dev/null
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -0,0 +1,201 @@
+/*
+ * enumerations for HopeRf rf69 radio module
+ *
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef RF69_ENUM_H
+#define RF69_ENUM_H
+
+enum optionOnOff
+{
+ optionOff,
+ optionOn
+};
+
+enum mode
+{
+ mode_sleep,
+ standby,
+ synthesizer,
+ transmit,
+ receive
+};
+
+enum dataMode
+{
+ packet,
+ continuous,
+ continuousNoSync
+};
+
+enum modulation
+{
+ OOK,
+ FSK
+};
+
+enum modShaping
+{
+ shapingOff,
+ shaping1_0,
+ shaping0_5,
+ shaping0_3,
+ shapingBR,
+ shaping2BR
+};
+
+enum paRamp
+{
+ ramp3400,
+ ramp2000,
+ ramp1000,
+ ramp500,
+ ramp250,
+ ramp125,
+ ramp100,
+ ramp62,
+ ramp50,
+ ramp40,
+ ramp31,
+ ramp25,
+ ramp20,
+ ramp15,
+ ramp12,
+ ramp10
+};
+
+enum antennaImpedance
+{
+ fiftyOhm,
+ twohundretOhm
+};
+
+enum lnaGain
+{
+ automatic,
+ max,
+ maxMinus6,
+ maxMinus12,
+ maxMinus24,
+ maxMinus36,
+ maxMinus48,
+ undefined
+};
+
+enum dccPercent
+{
+ dcc16Percent,
+ dcc8Percent,
+ dcc4Percent,
+ dcc2Percent,
+ dcc1Percent,
+ dcc0_5Percent,
+ dcc0_25Percent,
+ dcc0_125Percent
+};
+
+enum mantisse
+{
+ mantisse16,
+ mantisse20,
+ mantisse24
+};
+
+enum thresholdType
+{
+ fixed,
+ peak,
+ average
+};
+
+enum thresholdStep
+{
+ step_0_5db,
+ step_1_0db,
+ step_1_5db,
+ step_2_0db,
+ step_3_0db,
+ step_4_0db,
+ step_5_0db,
+ step_6_0db
+};
+
+enum thresholdDecrement
+{
+ dec_every8th,
+ dec_every4th,
+ dec_every2nd,
+ dec_once,
+ dec_twice,
+ dec_4times,
+ dec_8times,
+ dec_16times
+};
+
+enum flag
+{
+ modeSwitchCompleted,
+ readyToReceive,
+ readyToSend,
+ pllLocked,
+ rssiExceededThreshold,
+ timeout,
+ automode,
+ syncAddressMatch,
+ fifoFull,
+// fifoNotEmpty, collision with next enum; replaced by following enum...
+ fifoEmpty,
+ fifoLevelBelowThreshold,
+ fifoOverrun,
+ packetSent,
+ payloadReady,
+ crcOk,
+ batteryLow
+};
+
+enum fifoFillCondition
+{
+ afterSyncInterrupt,
+ always
+};
+
+enum packetFormat
+{
+ packetLengthFix,
+ packetLengthVar
+};
+
+enum txStartCondition
+{
+ fifoLevel,
+ fifoNotEmpty
+};
+
+enum addressFiltering
+{
+ filteringOff,
+ nodeAddress,
+ nodeOrBroadcastAddress
+};
+
+enum dagc
+{
+ normalMode,
+ improve,
+ improve4LowModulationIndex
+};
+
+
+#endif
diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h
new file mode 100644
index 000000000000..d0c4992b0778
--- /dev/null
+++ b/drivers/staging/pi433/rf69_registers.h
@@ -0,0 +1,489 @@
+/*
+ * register description for HopeRf rf69 radio module
+ *
+ * Copyright (C) 2016 Wolf-Entwicklungen
+ * Marcus Wolf <linux@wolf-entwicklungen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*******************************************/
+/* RF69 register addresses */
+/*******************************************/
+#define REG_FIFO 0x00
+#define REG_OPMODE 0x01
+#define REG_DATAMODUL 0x02
+#define REG_BITRATE_MSB 0x03
+#define REG_BITRATE_LSB 0x04
+#define REG_FDEV_MSB 0x05
+#define REG_FDEV_LSB 0x06
+#define REG_FRF_MSB 0x07
+#define REG_FRF_MID 0x08
+#define REG_FRF_LSB 0x09
+#define REG_OSC1 0x0A
+#define REG_AFCCTRL 0x0B
+#define REG_LOWBAT 0x0C
+#define REG_LISTEN1 0x0D
+#define REG_LISTEN2 0x0E
+#define REG_LISTEN3 0x0F
+#define REG_VERSION 0x10
+#define REG_PALEVEL 0x11
+#define REG_PARAMP 0x12
+#define REG_OCP 0x13
+#define REG_AGCREF 0x14 /* not available on RF69 */
+#define REG_AGCTHRESH1 0x15 /* not available on RF69 */
+#define REG_AGCTHRESH2 0x16 /* not available on RF69 */
+#define REG_AGCTHRESH3 0x17 /* not available on RF69 */
+#define REG_LNA 0x18
+#define REG_RXBW 0x19
+#define REG_AFCBW 0x1A
+#define REG_OOKPEAK 0x1B
+#define REG_OOKAVG 0x1C
+#define REG_OOKFIX 0x1D
+#define REG_AFCFEI 0x1E
+#define REG_AFCMSB 0x1F
+#define REG_AFCLSB 0x20
+#define REG_FEIMSB 0x21
+#define REG_FEILSB 0x22
+#define REG_RSSICONFIG 0x23
+#define REG_RSSIVALUE 0x24
+#define REG_DIOMAPPING1 0x25
+#define REG_DIOMAPPING2 0x26
+#define REG_IRQFLAGS1 0x27
+#define REG_IRQFLAGS2 0x28
+#define REG_RSSITHRESH 0x29
+#define REG_RXTIMEOUT1 0x2A
+#define REG_RXTIMEOUT2 0x2B
+#define REG_PREAMBLE_MSB 0x2C
+#define REG_PREAMBLE_LSB 0x2D
+#define REG_SYNC_CONFIG 0x2E
+#define REG_SYNCVALUE1 0x2F
+#define REG_SYNCVALUE2 0x30
+#define REG_SYNCVALUE3 0x31
+#define REG_SYNCVALUE4 0x32
+#define REG_SYNCVALUE5 0x33
+#define REG_SYNCVALUE6 0x34
+#define REG_SYNCVALUE7 0x35
+#define REG_SYNCVALUE8 0x36
+#define REG_PACKETCONFIG1 0x37
+#define REG_PAYLOAD_LENGTH 0x38
+#define REG_NODEADRS 0x39
+#define REG_BROADCASTADRS 0x3A
+#define REG_AUTOMODES 0x3B
+#define REG_FIFO_THRESH 0x3C
+#define REG_PACKETCONFIG2 0x3D
+#define REG_AESKEY1 0x3E
+#define REG_AESKEY2 0x3F
+#define REG_AESKEY3 0x40
+#define REG_AESKEY4 0x41
+#define REG_AESKEY5 0x42
+#define REG_AESKEY6 0x43
+#define REG_AESKEY7 0x44
+#define REG_AESKEY8 0x45
+#define REG_AESKEY9 0x46
+#define REG_AESKEY10 0x47
+#define REG_AESKEY11 0x48
+#define REG_AESKEY12 0x49
+#define REG_AESKEY13 0x4A
+#define REG_AESKEY14 0x4B
+#define REG_AESKEY15 0x4C
+#define REG_AESKEY16 0x4D
+#define REG_TEMP1 0x4E
+#define REG_TEMP2 0x4F
+#define REG_TESTPA1 0x5A /* only present on RFM69HW */
+#define REG_TESTPA2 0x5C /* only present on RFM69HW */
+#define REG_TESTDAGC 0x6F
+
+/******************************************************/
+/* RF69/SX1231 bit definition */
+/******************************************************/
+/* write bit */
+#define WRITE_BIT 0x80
+
+/* RegOpMode */
+#define MASK_OPMODE_SEQUENCER_OFF 0x80
+#define MASK_OPMODE_LISTEN_ON 0x40
+#define MASK_OPMODE_LISTEN_ABORT 0x20
+#define MASK_OPMODE_MODE 0x1C
+
+#define OPMODE_MODE_SLEEP 0x00
+#define OPMODE_MODE_STANDBY 0x04 /* default */
+#define OPMODE_MODE_SYNTHESIZER 0x08
+#define OPMODE_MODE_TRANSMIT 0x0C
+#define OPMODE_MODE_RECEIVE 0x10
+
+/* RegDataModul */
+#define MASK_DATAMODUL_MODE 0x06
+#define MASK_DATAMODUL_MODULATION_TYPE 0x18
+#define MASK_DATAMODUL_MODULATION_SHAPE 0x03
+
+#define DATAMODUL_MODE_PACKET 0x00 /* default */
+#define DATAMODUL_MODE_CONTINUOUS 0x40
+#define DATAMODUL_MODE_CONTINUOUS_NOSYNC 0x60
+
+#define DATAMODUL_MODULATION_TYPE_FSK 0x00 /* default */
+#define DATAMODUL_MODULATION_TYPE_OOK 0x08
+
+#define DATAMODUL_MODULATION_SHAPE_NONE 0x00 /* default */
+#define DATAMODUL_MODULATION_SHAPE_1_0 0x01
+#define DATAMODUL_MODULATION_SHAPE_0_5 0x02
+#define DATAMODUL_MODULATION_SHAPE_0_3 0x03
+#define DATAMODUL_MODULATION_SHAPE_BR 0x01
+#define DATAMODUL_MODULATION_SHAPE_2BR 0x02
+
+/* RegFDevMsb (0x05)*/
+#define FDEVMASB_MASK 0x3f
+
+/*
+// RegOsc1
+#define OSC1_RCCAL_START 0x80
+#define OSC1_RCCAL_DONE 0x40
+
+// RegLowBat
+#define LOWBAT_MONITOR 0x10
+#define LOWBAT_ON 0x08
+#define LOWBAT_OFF 0x00 // Default
+
+#define LOWBAT_TRIM_1695 0x00
+#define LOWBAT_TRIM_1764 0x01
+#define LOWBAT_TRIM_1835 0x02 // Default
+#define LOWBAT_TRIM_1905 0x03
+#define LOWBAT_TRIM_1976 0x04
+#define LOWBAT_TRIM_2045 0x05
+#define LOWBAT_TRIM_2116 0x06
+#define LOWBAT_TRIM_2185 0x07
+
+
+// RegListen1
+#define LISTEN1_RESOL_64 0x50
+#define LISTEN1_RESOL_4100 0xA0 // Default
+#define LISTEN1_RESOL_262000 0xF0
+
+#define LISTEN1_CRITERIA_RSSI 0x00 // Default
+#define LISTEN1_CRITERIA_RSSIANDSYNC 0x08
+
+#define LISTEN1_END_00 0x00
+#define LISTEN1_END_01 0x02 // Default
+#define LISTEN1_END_10 0x04
+
+
+// RegListen2
+#define LISTEN2_COEFIDLE_VALUE 0xF5 // Default
+
+// RegListen3
+#define LISTEN3_COEFRX_VALUE 0x20 // Default
+*/
+
+// RegPaLevel
+#define MASK_PALEVEL_PA0 0x80
+#define MASK_PALEVEL_PA1 0x40
+#define MASK_PALEVEL_PA2 0x20
+#define MASK_PALEVEL_OUTPUT_POWER 0x1F
+
+
+
+// RegPaRamp
+#define PARAMP_3400 0x00
+#define PARAMP_2000 0x01
+#define PARAMP_1000 0x02
+#define PARAMP_500 0x03
+#define PARAMP_250 0x04
+#define PARAMP_125 0x05
+#define PARAMP_100 0x06
+#define PARAMP_62 0x07
+#define PARAMP_50 0x08
+#define PARAMP_40 0x09 /* default */
+#define PARAMP_31 0x0A
+#define PARAMP_25 0x0B
+#define PARAMP_20 0x0C
+#define PARAMP_15 0x0D
+#define PARAMP_12 0x0E
+#define PARAMP_10 0x0F
+
+#define MASK_PARAMP 0x0F
+
+/*
+// RegOcp
+#define OCP_OFF 0x0F
+#define OCP_ON 0x1A // Default
+
+#define OCP_TRIM_45 0x00
+#define OCP_TRIM_50 0x01
+#define OCP_TRIM_55 0x02
+#define OCP_TRIM_60 0x03
+#define OCP_TRIM_65 0x04
+#define OCP_TRIM_70 0x05
+#define OCP_TRIM_75 0x06
+#define OCP_TRIM_80 0x07
+#define OCP_TRIM_85 0x08
+#define OCP_TRIM_90 0x09
+#define OCP_TRIM_95 0x0A
+#define OCP_TRIM_100 0x0B // Default
+#define OCP_TRIM_105 0x0C
+#define OCP_TRIM_110 0x0D
+#define OCP_TRIM_115 0x0E
+#define OCP_TRIM_120 0x0F
+*/
+
+/* RegLna (0x18) */
+#define MASK_LNA_ZIN 0x80
+#define MASK_LNA_CURRENT_GAIN 0x38
+#define MASK_LNA_GAIN 0x07
+
+#define LNA_GAIN_AUTO 0x00 /* default */
+#define LNA_GAIN_MAX 0x01
+#define LNA_GAIN_MAX_MINUS_6 0x02
+#define LNA_GAIN_MAX_MINUS_12 0x03
+#define LNA_GAIN_MAX_MINUS_24 0x04
+#define LNA_GAIN_MAX_MINUS_36 0x05
+#define LNA_GAIN_MAX_MINUS_48 0x06
+
+
+/* RegRxBw (0x19) and RegAfcBw (0x1A) */
+#define MASK_BW_DCC_FREQ 0xE0
+#define MASK_BW_MANTISSE 0x18
+#define MASK_BW_EXPONENT 0x07
+
+#define BW_DCC_16_PERCENT 0x00
+#define BW_DCC_8_PERCENT 0x20
+#define BW_DCC_4_PERCENT 0x40 /* default */
+#define BW_DCC_2_PERCENT 0x60
+#define BW_DCC_1_PERCENT 0x80
+#define BW_DCC_0_5_PERCENT 0xA0
+#define BW_DCC_0_25_PERCENT 0xC0
+#define BW_DCC_0_125_PERCENT 0xE0
+
+#define BW_MANT_16 0x00
+#define BW_MANT_20 0x08
+#define BW_MANT_24 0x10 /* default */
+
+
+/* RegOokPeak (0x1B) */
+#define MASK_OOKPEAK_THRESTYPE 0xc0
+#define MASK_OOKPEAK_THRESSTEP 0x38
+#define MASK_OOKPEAK_THRESDEC 0x07
+
+#define OOKPEAK_THRESHTYPE_FIXED 0x00
+#define OOKPEAK_THRESHTYPE_PEAK 0x40 /* default */
+#define OOKPEAK_THRESHTYPE_AVERAGE 0x80
+
+#define OOKPEAK_THRESHSTEP_0_5_DB 0x00 /* default */
+#define OOKPEAK_THRESHSTEP_1_0_DB 0x08
+#define OOKPEAK_THRESHSTEP_1_5_DB 0x10
+#define OOKPEAK_THRESHSTEP_2_0_DB 0x18
+#define OOKPEAK_THRESHSTEP_3_0_DB 0x20
+#define OOKPEAK_THRESHSTEP_4_0_DB 0x28
+#define OOKPEAK_THRESHSTEP_5_0_DB 0x30
+#define OOKPEAK_THRESHSTEP_6_0_DB 0x38
+
+#define OOKPEAK_THRESHDEC_ONCE 0x00 /* default */
+#define OOKPEAK_THRESHDEC_EVERY_2ND 0x01
+#define OOKPEAK_THRESHDEC_EVERY_4TH 0x02
+#define OOKPEAK_THRESHDEC_EVERY_8TH 0x03
+#define OOKPEAK_THRESHDEC_TWICE 0x04
+#define OOKPEAK_THRESHDEC_4_TIMES 0x05
+#define OOKPEAK_THRESHDEC_8_TIMES 0x06
+#define OOKPEAK_THRESHDEC_16_TIMES 0x07
+
+/*
+// RegOokAvg
+#define OOKAVG_AVERAGETHRESHFILT_00 0x00
+#define OOKAVG_AVERAGETHRESHFILT_01 0x40
+#define OOKAVG_AVERAGETHRESHFILT_10 0x80 // Default
+#define OOKAVG_AVERAGETHRESHFILT_11 0xC0
+
+
+// RegAfcFei
+#define AFCFEI_FEI_DONE 0x40
+#define AFCFEI_FEI_START 0x20
+#define AFCFEI_AFC_DONE 0x10
+#define AFCFEI_AFCAUTOCLEAR_ON 0x08
+#define AFCFEI_AFCAUTOCLEAR_OFF 0x00 // Default
+
+#define AFCFEI_AFCAUTO_ON 0x04
+#define AFCFEI_AFCAUTO_OFF 0x00 // Default
+
+#define AFCFEI_AFC_CLEAR 0x02
+#define AFCFEI_AFC_START 0x01
+
+// RegRssiConfig
+#define RSSI_FASTRX_ON 0x08
+#define RSSI_FASTRX_OFF 0x00 // Default
+#define RSSI_DONE 0x02
+#define RSSI_START 0x01
+*/
+
+/* RegDioMapping1 */
+#define MASK_DIO0 0xC0
+#define MASK_DIO1 0x30
+#define MASK_DIO2 0x0C
+#define MASK_DIO3 0x03
+#define SHIFT_DIO0 6
+#define SHIFT_DIO1 4
+#define SHIFT_DIO2 2
+#define SHIFT_DIO3 0
+
+/* RegDioMapping2 */
+#define MASK_DIO4 0xC0
+#define MASK_DIO5 0x30
+#define SHIFT_DIO4 6
+#define SHIFT_DIO5 4
+
+/* DIO numbers */
+#define DIO0 0
+#define DIO1 1
+#define DIO2 2
+#define DIO3 3
+#define DIO4 4
+#define DIO5 5
+
+/* DIO Mapping values (packet mode) */
+#define DIO_ModeReady_DIO4 0x00
+#define DIO_ModeReady_DIO5 0x03
+#define DIO_ClkOut 0x00
+#define DIO_Data 0x01
+#define DIO_TimeOut_DIO1 0x03
+#define DIO_TimeOut_DIO4 0x00
+#define DIO_Rssi_DIO0 0x03
+#define DIO_Rssi_DIO3_4 0x01
+#define DIO_RxReady 0x02
+#define DIO_PLLLock 0x03
+#define DIO_TxReady 0x01
+#define DIO_FifoFull_DIO1 0x01
+#define DIO_FifoFull_DIO3 0x00
+#define DIO_SyncAddress 0x02
+#define DIO_FifoNotEmpty_DIO1 0x02
+#define DIO_FifoNotEmpty_FIO2 0x00
+#define DIO_Automode 0x04
+#define DIO_FifoLevel 0x00
+#define DIO_CrcOk 0x00
+#define DIO_PayloadReady 0x01
+#define DIO_PacketSent 0x00
+#define DIO_Dclk 0x00
+
+/* RegDioMapping2 CLK_OUT part */
+#define MASK_DIOMAPPING2_CLK_OUT 0x07
+
+#define DIOMAPPING2_CLK_OUT_NO_DIV 0x00
+#define DIOMAPPING2_CLK_OUT_DIV_2 0x01
+#define DIOMAPPING2_CLK_OUT_DIV_4 0x02
+#define DIOMAPPING2_CLK_OUT_DIV_8 0x03
+#define DIOMAPPING2_CLK_OUT_DIV_16 0x04
+#define DIOMAPPING2_CLK_OUT_DIV_32 0x05
+#define DIOMAPPING2_CLK_OUT_RC 0x06
+#define DIOMAPPING2_CLK_OUT_OFF 0x07 /* default */
+
+/* RegIrqFlags1 */
+#define MASK_IRQFLAGS1_MODE_READY 0x80
+#define MASK_IRQFLAGS1_RX_READY 0x40
+#define MASK_IRQFLAGS1_TX_READY 0x20
+#define MASK_IRQFLAGS1_PLL_LOCK 0x10
+#define MASK_IRQFLAGS1_RSSI 0x08
+#define MASK_IRQFLAGS1_TIMEOUT 0x04
+#define MASK_IRQFLAGS1_AUTOMODE 0x02
+#define MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH 0x01
+
+/* RegIrqFlags2 */
+#define MASK_IRQFLAGS2_FIFO_FULL 0x80
+#define MASK_IRQFLAGS2_FIFO_NOT_EMPTY 0x40
+#define MASK_IRQFLAGS2_FIFO_LEVEL 0x20
+#define MASK_IRQFLAGS2_FIFO_OVERRUN 0x10
+#define MASK_IRQFLAGS2_PACKET_SENT 0x08
+#define MASK_IRQFLAGS2_PAYLOAD_READY 0x04
+#define MASK_IRQFLAGS2_CRC_OK 0x02
+#define MASK_IRQFLAGS2_LOW_BAT 0x01
+
+/* RegSyncConfig */
+#define MASK_SYNC_CONFIG_SYNC_ON 0x80 /* default */
+#define MASK_SYNC_CONFIG_FIFO_FILL_CONDITION 0x40
+#define MASK_SYNC_CONFIG_SYNC_SIZE 0x38
+#define MASK_SYNC_CONFIG_SYNC_TOLERANCE 0x07
+
+/* RegPacketConfig1 */
+#define MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE 0x80
+#define MASK_PACKETCONFIG1_DCFREE 0x60
+#define MASK_PACKETCONFIG1_CRC_ON 0x10 /* default */
+#define MASK_PACKETCONFIG1_CRCAUTOCLEAR_OFF 0x08
+#define MASK_PACKETCONFIG1_ADDRESSFILTERING 0x06
+
+#define PACKETCONFIG1_DCFREE_OFF 0x00 /* default */
+#define PACKETCONFIG1_DCFREE_MANCHESTER 0x20
+#define PACKETCONFIG1_DCFREE_WHITENING 0x40
+#define PACKETCONFIG1_ADDRESSFILTERING_OFF 0x00 /* default */
+#define PACKETCONFIG1_ADDRESSFILTERING_NODE 0x02
+#define PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST 0x04
+
+/*
+// RegAutoModes
+#define AUTOMODES_ENTER_OFF 0x00 // Default
+#define AUTOMODES_ENTER_FIFONOTEMPTY 0x20
+#define AUTOMODES_ENTER_FIFOLEVEL 0x40
+#define AUTOMODES_ENTER_CRCOK 0x60
+#define AUTOMODES_ENTER_PAYLOADREADY 0x80
+#define AUTOMODES_ENTER_SYNCADRSMATCH 0xA0
+#define AUTOMODES_ENTER_PACKETSENT 0xC0
+#define AUTOMODES_ENTER_FIFOEMPTY 0xE0
+
+#define AUTOMODES_EXIT_OFF 0x00 // Default
+#define AUTOMODES_EXIT_FIFOEMPTY 0x04
+#define AUTOMODES_EXIT_FIFOLEVEL 0x08
+#define AUTOMODES_EXIT_CRCOK 0x0C
+#define AUTOMODES_EXIT_PAYLOADREADY 0x10
+#define AUTOMODES_EXIT_SYNCADRSMATCH 0x14
+#define AUTOMODES_EXIT_PACKETSENT 0x18
+#define AUTOMODES_EXIT_RXTIMEOUT 0x1C
+
+#define AUTOMODES_INTERMEDIATE_SLEEP 0x00 // Default
+#define AUTOMODES_INTERMEDIATE_STANDBY 0x01
+#define AUTOMODES_INTERMEDIATE_RECEIVER 0x02
+#define AUTOMODES_INTERMEDIATE_TRANSMITTER 0x03
+
+*/
+/* RegFifoThresh (0x3c) */
+#define MASK_FIFO_THRESH_TXSTART 0x80
+#define MASK_FIFO_THRESH_VALUE 0x7F
+
+/*
+
+// RegPacketConfig2
+#define PACKET2_RXRESTARTDELAY_1BIT 0x00 // Default
+#define PACKET2_RXRESTARTDELAY_2BITS 0x10
+#define PACKET2_RXRESTARTDELAY_4BITS 0x20
+#define PACKET2_RXRESTARTDELAY_8BITS 0x30
+#define PACKET2_RXRESTARTDELAY_16BITS 0x40
+#define PACKET2_RXRESTARTDELAY_32BITS 0x50
+#define PACKET2_RXRESTARTDELAY_64BITS 0x60
+#define PACKET2_RXRESTARTDELAY_128BITS 0x70
+#define PACKET2_RXRESTARTDELAY_256BITS 0x80
+#define PACKET2_RXRESTARTDELAY_512BITS 0x90
+#define PACKET2_RXRESTARTDELAY_1024BITS 0xA0
+#define PACKET2_RXRESTARTDELAY_2048BITS 0xB0
+#define PACKET2_RXRESTARTDELAY_NONE 0xC0
+#define PACKET2_RXRESTART 0x04
+
+#define PACKET2_AUTORXRESTART_ON 0x02 // Default
+#define PACKET2_AUTORXRESTART_OFF 0x00
+
+#define PACKET2_AES_ON 0x01
+#define PACKET2_AES_OFF 0x00 // Default
+
+
+// RegTemp1
+#define TEMP1_MEAS_START 0x08
+#define TEMP1_MEAS_RUNNING 0x04
+#define TEMP1_ADCLOWPOWER_ON 0x01 // Default
+#define TEMP1_ADCLOWPOWER_OFF 0x00
+*/
+
+// RegTestDagc (0x6F)
+#define DAGC_NORMAL 0x00 /* Reset value */
+#define DAGC_IMPROVED_LOWBETA1 0x20
+#define DAGC_IMPROVED_LOWBETA0 0x30 /* Recommended val */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index a4aedb489e92..cbf8eb4a049d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -2385,7 +2385,7 @@ static inline void ieee80211_process_probe_response(
struct ieee80211_probe_response *beacon,
struct ieee80211_rx_stats *stats)
{
- struct ieee80211_network network;
+ struct ieee80211_network *network;
struct ieee80211_network *target;
struct ieee80211_network *oldest = NULL;
#ifdef CONFIG_IEEE80211_DEBUG
@@ -2397,7 +2397,10 @@ static inline void ieee80211_process_probe_response(
u16 capability;
//u8 wmm_info;
- memset(&network, 0, sizeof(struct ieee80211_network));
+ network = kzalloc(sizeof(*network), GFP_ATOMIC);
+ if (!network)
+ goto out;
+
capability = le16_to_cpu(beacon->capability);
IEEE80211_DEBUG_SCAN(
"'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
@@ -2420,14 +2423,14 @@ static inline void ieee80211_process_probe_response(
(capability & (1 << 0x1)) ? '1' : '0',
(capability & (1 << 0x0)) ? '1' : '0');
- if (ieee80211_network_init(ieee, beacon, &network, stats)) {
+ if (ieee80211_network_init(ieee, beacon, network, stats)) {
IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
escape_essid(info_element->data,
info_element->len),
beacon->header.addr3,
fc == IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
- return;
+ goto out;
}
// For Asus EeePc request,
@@ -2437,8 +2440,8 @@ static inline void ieee80211_process_probe_response(
// then wireless adapter should do active scan from ch1~11 and
// passive scan from ch12~14
- if (!IsLegalChannel(ieee, network.channel))
- return;
+ if (!IsLegalChannel(ieee, network->channel))
+ goto out;
if (ieee->bGlobalDomain)
{
if (fc == IEEE80211_STYPE_PROBE_RESP)
@@ -2446,19 +2449,19 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if (!IsLegalChannel(ieee, network.channel)) {
- printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
- return;
+ if (!IsLegalChannel(ieee, network->channel)) {
+ printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
+ goto out;
}
}
// Case 2: No any country code.
else
{
// Filter over channel ch12~14
- if (network.channel > 11)
+ if (network->channel > 11)
{
- printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel);
- return;
+ printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
+ goto out;
}
}
}
@@ -2467,19 +2470,19 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if (!IsLegalChannel(ieee, network.channel)) {
- printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel);
- return;
+ if (!IsLegalChannel(ieee, network->channel)) {
+ printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network->channel);
+ goto out;
}
}
// Case 2: No any country code.
else
{
// Filter over channel ch12~14
- if (network.channel > 14)
+ if (network->channel > 14)
{
- printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel);
- return;
+ printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network->channel);
+ goto out;
}
}
}
@@ -2497,8 +2500,8 @@ static inline void ieee80211_process_probe_response(
spin_lock_irqsave(&ieee->lock, flags);
- if (is_same_network(&ieee->current_network, &network, ieee)) {
- update_network(&ieee->current_network, &network);
+ if (is_same_network(&ieee->current_network, network, ieee)) {
+ update_network(&ieee->current_network, network);
if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
&& ieee->current_network.berp_info_valid){
if(ieee->current_network.erp_value& ERP_UseProtection)
@@ -2512,11 +2515,11 @@ static inline void ieee80211_process_probe_response(
ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
}
else //hidden AP
- network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
+ network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
}
list_for_each_entry(target, &ieee->network_list, list) {
- if (is_same_network(target, &network, ieee))
+ if (is_same_network(target, network, ieee))
break;
if ((oldest == NULL) ||
(target->last_scanned < oldest->last_scanned))
@@ -2545,16 +2548,16 @@ static inline void ieee80211_process_probe_response(
#ifdef CONFIG_IEEE80211_DEBUG
IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
- escape_essid(network.ssid,
- network.ssid_len),
- network.bssid,
+ escape_essid(network->ssid,
+ network->ssid_len),
+ network->bssid,
fc == IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
#endif
- memcpy(target, &network, sizeof(*target));
+ memcpy(target, network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
- ieee80211_softmac_new_net(ieee,&network);
+ ieee80211_softmac_new_net(ieee,network);
} else {
IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
escape_essid(target->ssid,
@@ -2570,27 +2573,30 @@ static inline void ieee80211_process_probe_response(
renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
//YJ,add,080819,for hidden ap
if(is_beacon(beacon->header.frame_ctl) == 0)
- network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags);
- //if(strncmp(network.ssid, "linksys-c",9) == 0)
- // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags);
- if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
- && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\
- ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK))))
+ network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & target->flags);
+ //if(strncmp(network->ssid, "linksys-c",9) == 0)
+ // printk("====>2 network->ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network->ssid, network->flags, target->ssid, target->flags);
+ if(((network->flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
+ && (((network->ssid_len > 0) && (strncmp(target->ssid, network->ssid, network->ssid_len)))\
+ ||((ieee->current_network.ssid_len == network->ssid_len)&&(strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK))))
renew = 1;
//YJ,add,080819,for hidden ap,end
- update_network(target, &network);
+ update_network(target, network);
if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
- ieee80211_softmac_new_net(ieee,&network);
+ ieee80211_softmac_new_net(ieee,network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
- if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\
+ if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, network, ieee)&&\
(ieee->state == IEEE80211_LINKED)) {
if (ieee->handle_beacon != NULL) {
ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network);
}
}
+
+out:
+ kfree(network);
}
void ieee80211_rx_mgt(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 779ecdbc4e17..46b3f19e0878 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -270,8 +270,7 @@ int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
kfree(usbdata);
if (status < 0) {
- netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n",
- status);
+ netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
}
return 0;
@@ -321,7 +320,7 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data)
kfree(usbdata);
if (status < 0) {
- netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
+ netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
}
@@ -348,7 +347,7 @@ int write_nic_word(struct net_device *dev, int indx, u16 data)
kfree(usbdata);
if (status < 0) {
- netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
+ netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
}
@@ -376,8 +375,7 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
if (status < 0) {
- netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
- status);
+ netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
}
@@ -3095,7 +3093,8 @@ static RESET_TYPE TxCheckStuck(struct net_device *dev)
if (bCheckFwTxCnt) {
if (HalTxCheckStuck819xUsb(dev)) {
RT_TRACE(COMP_RESET,
- "TxCheckStuck(): Fw indicates no Tx condition!\n");
+ "%s: Fw indicates no Tx condition!\n",
+ __func__);
return RESET_TYPE_SILENT;
}
}
@@ -3237,7 +3236,7 @@ static void CamRestoreAllEntry(struct net_device *dev)
static u8 CAM_CONST_BROAD[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- RT_TRACE(COMP_SEC, "CamRestoreAllEntry:\n");
+ RT_TRACE(COMP_SEC, "%s:\n", __func__);
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
@@ -3835,8 +3834,8 @@ static u8 HwRateToMRate90(bool bIsHT, u8 rate)
default:
ret_rate = 0xff;
RT_TRACE(COMP_RECV,
- "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",
- rate, bIsHT);
+ "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
+ __func__, rate, bIsHT);
break;
}
@@ -3897,8 +3896,8 @@ static u8 HwRateToMRate90(bool bIsHT, u8 rate)
default:
ret_rate = 0xff;
RT_TRACE(COMP_RECV,
- "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",
- rate, bIsHT);
+ "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
+ __func__, rate, bIsHT);
break;
}
}
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 174ccf618d3e..00a123d44207 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -20,25 +20,24 @@
#ifndef R8192_HW
#define R8192_HW
-typedef enum _VERSION_819xU{
+typedef enum _VERSION_819xU {
VERSION_819xU_A, // A-cut
VERSION_819xU_B, // B-cut
VERSION_819xU_C,// C-cut
} VERSION_819xU, *PVERSION_819xU;
//added for different RF type
-typedef enum _RT_RF_TYPE_DEF
-{
+typedef enum _RT_RF_TYPE_DEF {
RF_1T2R = 0,
RF_2T4R,
RF_819X_MAX_TYPE
-}RT_RF_TYPE_DEF;
+} RT_RF_TYPE_DEF;
-typedef enum _BaseBand_Config_Type{
+typedef enum _BaseBand_Config_Type {
BaseBand_Config_PHY_REG = 0, //Radio Path A
BaseBand_Config_AGC_TAB = 1, //Radio Path B
-}BaseBand_Config_Type, *PBaseBand_Config_Type;
+} BaseBand_Config_Type, *PBaseBand_Config_Type;
#define RTL8187_REQT_READ 0xc0
#define RTL8187_REQT_WRITE 0x40
#define RTL8187_REQ_GET_REGS 0x05
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index 20372659d15d..a077069d6227 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -111,8 +111,8 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter)
*/
memcpy(&backupPMKIDList[0],
- &adapter->securitypriv.PMKIDList[0],
- sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ &adapter->securitypriv.PMKIDList[0],
+ sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
backupTKIPCountermeasure =
adapter->securitypriv.btkip_countermeasure;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 5346c657485d..0104aced113e 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -385,7 +385,7 @@ _next:
if (blnPending)
wr_sz += 8; /* Append 8 bytes */
r8712_write_mem(padapter, RTL8712_DMA_H2CCMD, wr_sz,
- (u8 *)pdesc);
+ (u8 *)pdesc);
pcmdpriv->cmd_seq++;
if (pcmd->cmdcode == GEN_CMD_CODE(_CreateBss)) {
pcmd->res = H2C_SUCCESS;
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 205298e23656..d90213eb5e20 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -347,7 +347,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
ret = false;
if (value == 0xFF) /* write again */
efuse_one_byte_write(padapter, addr,
- pkt.data[i * 2]);
+ pkt.data[i * 2]);
}
if (!efuse_one_byte_read(padapter, addr + 1, &value)) {
ret = false;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 7fe626583c8a..42d014007764 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -640,7 +640,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
/* 1st frame dequeued */
pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
/* need to remember the 1st frame */
- if (pxmitframe != NULL) {
+ if (pxmitframe) {
#ifdef CONFIG_R8712_TX_AGGR
/* 1. dequeue 2nd frame
@@ -653,13 +653,13 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
}
- if (p2ndxmitframe != NULL)
+ if (p2ndxmitframe)
if (p2ndxmitframe->frame_tag != DATA_FRAMETAG) {
r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
}
r8712_xmitframe_aggr_1st(pxmitbuf, pxmitframe);
- if (p2ndxmitframe != NULL) {
+ if (p2ndxmitframe) {
u16 total_length;
total_length = r8712_xmitframe_aggr_next(
@@ -667,7 +667,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
do {
p2ndxmitframe = dequeue_xframe_ex(
pxmitpriv, phwxmits, hwentry);
- if (p2ndxmitframe != NULL)
+ if (p2ndxmitframe)
total_length =
r8712_xmitframe_aggr_next(
pxmitbuf,
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 3c5cb78b52ea..01f78d1671de 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -55,7 +55,7 @@ void rtw_btcoex_ConnectNotify(struct adapter *padapter, u8 action)
void rtw_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus)
{
- if ((RT_MEDIA_CONNECT == mediaStatus)
+ if ((mediaStatus == RT_MEDIA_CONNECT)
&& (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
rtw_hal_set_hwreg(padapter, HW_VAR_DL_RSVD_PAGE, NULL);
}
diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c
index 20eb4ebcf8c3..2195c80235a1 100644
--- a/drivers/staging/typec/tcpm.c
+++ b/drivers/staging/typec/tcpm.c
@@ -1015,8 +1015,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
if (port->data_role == TYPEC_DEVICE &&
port->nr_snk_vdo) {
for (i = 0; i < port->nr_snk_vdo; i++)
- response[i + 1]
- = cpu_to_le32(port->snk_vdo[i]);
+ response[i + 1] = port->snk_vdo[i];
rlen = port->nr_snk_vdo + 1;
}
break;
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 692efcb38245..d6d99cc33a33 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/*
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -20,87 +21,67 @@
#include <linux/io.h>
#include <linux/uuid.h>
-#define __SUPERVISOR_CHANNEL_H__
-
#define SIGNATURE_16(A, B) ((A) | ((B) << 8))
#define SIGNATURE_32(A, B, C, D) \
(SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
-#define SIGNATURE_64(A, B, C, D, E, F, G, H) \
- (SIGNATURE_32(A, B, C, D) | ((u64)(SIGNATURE_32(E, F, G, H)) << 32))
-
-#ifndef COVER
-#define COVER(v, d) ((d) * DIV_ROUND_UP(v, d))
-#endif
#define VISOR_CHANNEL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+/*
+ * enum channel_serverstate
+ * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
+ * @CHANNELSRV_READY: Channel has been initialized by server.
+ */
enum channel_serverstate {
- CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
- CHANNELSRV_READY = 1 /* channel has been initialized by server */
+ CHANNELSRV_UNINITIALIZED = 0,
+ CHANNELSRV_READY = 1
};
+/*
+ * enum channel_clientstate
+ * @CHANNELCLI_DETACHED:
+ * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
+ * unless given TBD* explicit request
+ * (should actually be < DETACHED).
+ * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
+ * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
+ * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
+ * @CHANNELCLI_OWNED: "No worries" state - client can access channel
+ * anytime.
+ */
enum channel_clientstate {
CHANNELCLI_DETACHED = 0,
- CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT
- * allowed to use it unless given TBD
- * explicit request (should actually be
- * < DETACHED)
- */
- CHANNELCLI_ATTACHING = 2, /* legacy EFI client request
- * for EFI server to attach
- */
- CHANNELCLI_ATTACHED = 3, /* idle, but client may want
- * to use channel any time
- */
- CHANNELCLI_BUSY = 4, /* client either wants to use or is
- * using channel
- */
- CHANNELCLI_OWNED = 5 /* "no worries" state - client can */
- /* access channel anytime */
+ CHANNELCLI_DISABLED = 1,
+ CHANNELCLI_ATTACHING = 2,
+ CHANNELCLI_ATTACHED = 3,
+ CHANNELCLI_BUSY = 4,
+ CHANNELCLI_OWNED = 5
};
-#define VISOR_CHANNEL_SERVER_READY(ch) \
- (readl(&(ch)->srv_state) == CHANNELSRV_READY)
-
-#define VISOR_VALID_CHANNELCLI_TRANSITION(o, n) \
- (((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
- (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \
- (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \
- (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DETACHED)) || \
- (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DETACHED)) || \
- (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHING)) || \
- (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_ATTACHED)) || \
- (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHED)) || \
- (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_ATTACHED)) || \
- (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_BUSY)) || \
- (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_OWNED)) || \
- (((o) == CHANNELCLI_DISABLED) && ((n) == CHANNELCLI_OWNED)) || \
- (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_OWNED)) || \
- (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_OWNED)) || \
- (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
- ? (1) : (0))
-
/* Values for VISORA_CHANNEL_PROTOCOL.CliErrorBoot: */
-/* throttling invalid boot channel statetransition error due to client
- * disabled
+
+/*
+ * Throttling invalid boot channel statetransition error due to client
+ * disabled.
*/
#define VISOR_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
-/* throttling invalid boot channel statetransition error due to client
- * not attached
+/*
+ * Throttling invalid boot channel statetransition error due to client
+ * not attached.
*/
#define VISOR_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
-/* throttling invalid boot channel statetransition error due to busy channel */
+/* Throttling invalid boot channel statetransition error due to busy channel */
#define VISOR_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
-/* Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so
- * that windows guest can look at the FeatureFlags in the io channel,
- * and configure the windows driver to use interrupts or not based on
- * this setting. This flag is set in uislib after the
- * VISOR_VHBA_init_channel is called. All feature bits for all
- * channels should be defined here. The io channel feature bits are
- * defined right here
+/*
+ * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
+ * windows guest can look at the FeatureFlags in the io channel, and configure
+ * the windows driver to use interrupts or not based on this setting. This flag
+ * is set in uislib after the VISOR_VHBA_init_channel is called. All feature
+ * bits for all channels should be defined here. The io channel feature bits
+ * are defined right here
*/
#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
@@ -108,151 +89,120 @@ enum channel_clientstate {
#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
-/* Common Channel Header */
+/*
+ * struct channel_header - Common Channel Header
+ * @signature: Signature.
+ * @legacy_state: DEPRECATED - being replaced by.
+ * @header_size: sizeof(struct channel_header).
+ * @size: Total size of this channel in bytes.
+ * @features: Flags to modify behavior.
+ * @chtype: Channel type: data, bus, control, etc..
+ * @partition_handle: ID of guest partition.
+ * @handle: Device number of this channel in client.
+ * @ch_space_offset: Offset in bytes to channel specific area.
+ * @version_id: Struct channel_header Version ID.
+ * @partition_index: Index of guest partition.
+ * @zone_uuid: Guid of Channel's zone.
+ * @cli_str_offset: Offset from channel header to null-terminated
+ * ClientString (0 if ClientString not present).
+ * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
+ * channel.
+ * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
+ * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
+ * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @srv_state: CHANNEL_SERVERSTATE.
+ * @cli_error_boot: Bits to indicate err states for boot clients, so err
+ * messages can be throttled.
+ * @cli_error_os: Bits to indicate err states for OS clients, so err
+ * messages can be throttled.
+ * @filler: Pad out to 128 byte cacheline.
+ * @recover_channel: Please add all new single-byte values below here.
+ */
struct channel_header {
- u64 signature; /* Signature */
- u32 legacy_state; /* DEPRECATED - being replaced by */
- /* SrvState, CliStateBoot, and CliStateOS below */
- u32 header_size; /* sizeof(struct channel_header) */
- u64 size; /* Total size of this channel in bytes */
- u64 features; /* Flags to modify behavior */
- uuid_le chtype; /* Channel type: data, bus, control, etc. */
- u64 partition_handle; /* ID of guest partition */
- u64 handle; /* Device number of this channel in client */
- u64 ch_space_offset; /* Offset in bytes to channel specific area */
- u32 version_id; /* struct channel_header Version ID */
- u32 partition_index; /* Index of guest partition */
- uuid_le zone_uuid; /* Guid of Channel's zone */
- u32 cli_str_offset; /* offset from channel header to
- * nul-terminated ClientString (0 if
- * ClientString not present)
- */
- u32 cli_state_boot; /* CHANNEL_CLIENTSTATE of pre-boot
- * EFI client of this channel
- */
- u32 cmd_state_cli; /* CHANNEL_COMMANDSTATE (overloaded in
- * Windows drivers, see ServerStateUp,
- * ServerStateDown, etc)
- */
- u32 cli_state_os; /* CHANNEL_CLIENTSTATE of Guest OS
- * client of this channel
- */
- u32 ch_characteristic; /* CHANNEL_CHARACTERISTIC_<xxx> */
- u32 cmd_state_srv; /* CHANNEL_COMMANDSTATE (overloaded in
- * Windows drivers, see ServerStateUp,
- * ServerStateDown, etc)
- */
- u32 srv_state; /* CHANNEL_SERVERSTATE */
- u8 cli_error_boot; /* bits to indicate err states for
- * boot clients, so err messages can
- * be throttled
- */
- u8 cli_error_os; /* bits to indicate err states for OS
- * clients, so err messages can be
- * throttled
- */
- u8 filler[1]; /* Pad out to 128 byte cacheline */
- /* Please add all new single-byte values below here */
+ u64 signature;
+ u32 legacy_state;
+ /* SrvState, CliStateBoot, and CliStateOS below */
+ u32 header_size;
+ u64 size;
+ u64 features;
+ uuid_le chtype;
+ u64 partition_handle;
+ u64 handle;
+ u64 ch_space_offset;
+ u32 version_id;
+ u32 partition_index;
+ uuid_le zone_uuid;
+ u32 cli_str_offset;
+ u32 cli_state_boot;
+ u32 cmd_state_cli;
+ u32 cli_state_os;
+ u32 ch_characteristic;
+ u32 cmd_state_srv;
+ u32 srv_state;
+ u8 cli_error_boot;
+ u8 cli_error_os;
+ u8 filler[1];
u8 recover_channel;
} __packed;
#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
-/* Subheader for the Signal Type variation of the Common Channel */
+/*
+ * struct signal_queue_header - Subheader for the Signal Type variation of the
+ * Common Channel.
+ * @version: SIGNAL_QUEUE_HEADER Version ID.
+ * @chtype: Queue type: storage, network.
+ * @size: Total size of this queue in bytes.
+ * @sig_base_offset: Offset to signal queue area.
+ * @features: Flags to modify behavior.
+ * @num_sent: Total # of signals placed in this queue.
+ * @num_overflows: Total # of inserts failed due to full queue.
+ * @signal_size: Total size of a signal for this queue.
+ * @max_slots: Max # of slots in queue, 1 slot is always empty.
+ * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
+ * @head: Queue head signal #.
+ * @num_received: Total # of signals removed from this queue.
+ * @tail: Queue tail signal.
+ * @reserved1: Reserved field.
+ * @reserved2: Reserved field.
+ * @client_queue:
+ * @num_irq_received: Total # of Interrupts received. This is incremented by the
+ * ISR in the guest windows driver.
+ * @num_empty: Number of times that visor_signal_remove is called and
+ * returned Empty Status.
+ * @errorflags: Error bits set during SignalReinit to denote trouble with
+ * client's fields.
+ * @filler: Pad out to 64 byte cacheline.
+ */
struct signal_queue_header {
/* 1st cache line */
- u32 version; /* SIGNAL_QUEUE_HEADER Version ID */
- u32 chtype; /* Queue type: storage, network */
- u64 size; /* Total size of this queue in bytes */
- u64 sig_base_offset; /* Offset to signal queue area */
- u64 features; /* Flags to modify behavior */
- u64 num_sent; /* Total # of signals placed in this queue */
- u64 num_overflows; /* Total # of inserts failed due to
- * full queue
- */
- u32 signal_size; /* Total size of a signal for this queue */
- u32 max_slots; /* Max # of slots in queue, 1 slot is
- * always empty
- */
- u32 max_signals; /* Max # of signals in queue
- * (MaxSignalSlots-1)
- */
- u32 head; /* Queue head signal # */
+ u32 version;
+ u32 chtype;
+ u64 size;
+ u64 sig_base_offset;
+ u64 features;
+ u64 num_sent;
+ u64 num_overflows;
+ u32 signal_size;
+ u32 max_slots;
+ u32 max_signals;
+ u32 head;
/* 2nd cache line */
- u64 num_received; /* Total # of signals removed from this queue */
- u32 tail; /* Queue tail signal */
- u32 reserved1; /* Reserved field */
- u64 reserved2; /* Reserved field */
+ u64 num_received;
+ u32 tail;
+ u32 reserved1;
+ u64 reserved2;
u64 client_queue;
- u64 num_irq_received; /* Total # of Interrupts received. This
- * is incremented by the ISR in the
- * guest windows driver
- */
- u64 num_empty; /* Number of times that visor_signal_remove
- * is called and returned Empty Status.
- */
- u32 errorflags; /* Error bits set during SignalReinit
- * to denote trouble with client's
- * fields
- */
- u8 filler[12]; /* Pad out to 64 byte cacheline */
+ u64 num_irq_received;
+ u64 num_empty;
+ u32 errorflags;
+ u8 filler[12];
} __packed;
-/* Generic function useful for validating any type of channel when it is
- * received by the client that will be accessing the channel.
- * Note that <logCtx> is only needed for callers in the EFI environment, and
- * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
- */
-static inline int
-visor_check_channel(struct channel_header *ch,
- uuid_le expected_uuid,
- char *chname,
- u64 expected_min_bytes,
- u32 expected_version,
- u64 expected_signature)
-{
- if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
- /* caller wants us to verify type GUID */
- if (uuid_le_cmp(ch->chtype, expected_uuid) != 0) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
- chname, &expected_uuid,
- &expected_uuid, &ch->chtype);
- return 0;
- }
- }
- if (expected_min_bytes > 0) { /* verify channel size */
- if (ch->size < expected_min_bytes) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, &expected_uuid,
- (unsigned long long)expected_min_bytes,
- ch->size);
- return 0;
- }
- }
- if (expected_version > 0) { /* verify channel version */
- if (ch->version_id != expected_version) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
- chname, &expected_uuid,
- (unsigned long)expected_version,
- ch->version_id);
- return 0;
- }
- }
- if (expected_signature > 0) { /* verify channel signature */
- if (ch->signature != expected_signature) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
- chname, &expected_uuid,
- expected_signature, ch->signature);
- return 0;
- }
- }
- return 1;
-}
-
-/*
- * CHANNEL Guids
- */
-
+/* CHANNEL Guids */
/* {414815ed-c58c-11da-95a9-00e08161165f} */
#define VISOR_VHBA_CHANNEL_UUID \
UUID_LE(0x414815ed, 0xc58c, 0x11da, \
@@ -260,7 +210,6 @@ visor_check_channel(struct channel_header *ch,
static const uuid_le visor_vhba_channel_uuid = VISOR_VHBA_CHANNEL_UUID;
#define VISOR_VHBA_CHANNEL_UUID_STR \
"414815ed-c58c-11da-95a9-00e08161165f"
-
/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
#define VISOR_VNIC_CHANNEL_UUID \
UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
@@ -268,7 +217,6 @@ static const uuid_le visor_vhba_channel_uuid = VISOR_VHBA_CHANNEL_UUID;
static const uuid_le visor_vnic_channel_uuid = VISOR_VNIC_CHANNEL_UUID;
#define VISOR_VNIC_CHANNEL_UUID_STR \
"8cd5994d-c58e-11da-95a9-00e08161165f"
-
/* {72120008-4AAB-11DC-8530-444553544200} */
#define VISOR_SIOVM_UUID \
UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index c7cb3fbde7b2..80b9ef3ceb0c 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -7,9 +7,7 @@
* Everything needed for IOPart-GuestPart communication is define in
* this file. Note: Everything is OS-independent because this file is
* used by Windows, Linux and possible EFI drivers.
- */
-
-/*
+ *
* Communication flow between the IOPart and GuestPart uses the channel headers
* channel state. The following states are currently being used:
* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
@@ -30,14 +28,9 @@
*/
#include <linux/uuid.h>
-
#include <linux/dma-direction.h>
#include "channel.h"
-#define VISOR_VHBA_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
-#define VISOR_VNIC_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
-#define VISOR_VSWITCH_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
-
/*
* Must increment these whenever you insert or delete fields within this channel
* struct. Also increment whenever you change the meaning of fields within this
@@ -47,19 +40,6 @@
*/
#define VISOR_VHBA_CHANNEL_VERSIONID 2
#define VISOR_VNIC_CHANNEL_VERSIONID 2
-#define VISOR_VSWITCH_CHANNEL_VERSIONID 1
-
-#define VISOR_VHBA_CHANNEL_OK_CLIENT(ch) \
- (visor_check_channel(ch, visor_vhba_channel_uuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, \
- VISOR_VHBA_CHANNEL_VERSIONID, \
- VISOR_VHBA_CHANNEL_SIGNATURE))
-
-#define VISOR_VNIC_CHANNEL_OK_CLIENT(ch) \
- (visor_check_channel(ch, visor_vnic_channel_uuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, \
- VISOR_VNIC_CHANNEL_VERSIONID, \
- VISOR_VNIC_CHANNEL_SIGNATURE))
/*
* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
@@ -71,58 +51,57 @@
#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
/* Define the two queues per data channel between iopart and ioguestparts. */
+
/* Used by ioguestpart to 'insert' signals to iopart. */
#define IOCHAN_TO_IOPART 0
+
/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */
#define IOCHAN_FROM_IOPART 1
/* Size of cdb - i.e., SCSI cmnd */
#define MAX_CMND_SIZE 16
-
#define MAX_SENSE_SIZE 64
-
#define MAX_PHYS_INFO 64
-/* Various types of network packets that can be sent in cmdrsp. */
+/*
+ * enum net_types - Various types of network packets that can be sent in cmdrsp.
+ * @NET_RCV_POST: Submit buffer to hold receiving incoming packet.
+ * @NET_RCV: visornic -> uisnic. Incoming packet received.
+ * @NET_XMIT: uisnic -> visornic. For outgoing packet.
+ * @NET_XMIT_DONE: visornic -> uisnic. Outgoing packet xmitted.
+ * @NET_RCV_ENBDIS: uisnic -> visornic. Enable/Disable packet reception.
+ * @NET_RCV_ENBDIS_ACK: visornic -> uisnic. Acknowledge enable/disable packet.
+ * @NET_RCV_PROMISC: uisnic -> visornic. Enable/Disable promiscuous mode.
+ * @NET_CONNECT_STATUS: visornic -> uisnic. Indicate the loss or restoration of
+ * a network connection.
+ * @NET_MACADDR: uisnic -> visornic. Indicates the client has requested
+ * to update it's MAC address.
+ * @NET_MACADDR_ACK: MAC address acknowledge.
+ */
enum net_types {
- NET_RCV_POST = 0, /*
- * Submit buffer to hold receiving
- * incoming packet
- */
- /* visornic -> uisnic */
- NET_RCV, /* incoming packet received */
- /* uisnic -> visornic */
- NET_XMIT, /* for outgoing net packets */
- /* visornic -> uisnic */
- NET_XMIT_DONE, /* outgoing packet xmitted */
- /* uisnic -> visornic */
- NET_RCV_ENBDIS, /* enable/disable packet reception */
- /* visornic -> uisnic */
- NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */
- /* reception */
- /* uisnic -> visornic */
- NET_RCV_PROMISC, /* enable/disable promiscuous mode */
- /* visornic -> uisnic */
- NET_CONNECT_STATUS, /*
- * indicate the loss or restoration of a network
- * connection
- */
- /* uisnic -> visornic */
- NET_MACADDR, /*
- * Indicates the client has requested to update
- * it's MAC address
- */
- NET_MACADDR_ACK, /* MAC address acknowledge */
-
+ NET_RCV_POST = 0,
+ NET_RCV,
+ NET_XMIT,
+ NET_XMIT_DONE,
+ NET_RCV_ENBDIS,
+ NET_RCV_ENBDIS_ACK,
+ /* Reception */
+ NET_RCV_PROMISC,
+ NET_CONNECT_STATUS,
+ NET_MACADDR,
+ NET_MACADDR_ACK,
};
-#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
+/* Minimum eth data size */
+#define ETH_MIN_DATA_SIZE 46
#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */
+/* Maximum data size */
+#define VISOR_ETH_MAX_MTU 16384
#ifndef MAX_MACADDR_LEN
-#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
+/* Number of bytes in MAC address */
+#define MAX_MACADDR_LEN 6
#endif
/* Various types of scsi task mgmt commands. */
@@ -156,10 +135,16 @@ struct guest_phys_info {
#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
+/*
+ * struct uisscsi_dest
+ * @channel: Bus number.
+ * @id: Target number.
+ * @lun: Logical unit number.
+ */
struct uisscsi_dest {
- u32 channel; /* channel == bus number */
- u32 id; /* id == target number */
- u32 lun; /* lun == logical unit number */
+ u32 channel;
+ u32 id;
+ u32 lun;
} __packed;
struct vhba_wwnn {
@@ -168,61 +153,77 @@ struct vhba_wwnn {
} __packed;
/*
- * WARNING: Values stired in this structure must contain maximum counts (not
+ * struct vhba_config_max
+ * @max_channel: Maximum channel for devices attached to this bus.
+ * @max_id: Maximum SCSI ID for devices attached to bus.
+ * @max_lun: Maximum SCSI LUN for devices attached to bus.
+ * @cmd_per_lun: Maximum number of outstanding commands per LUN.
+ * @max_io_size: Maximum io size for devices attached to this bus. Max io size
+ * is often determined by the resource of the hba.
+ * e.g Max scatter gather list length * page size / sector size.
+ *
+ * WARNING: Values stored in this structure must contain maximum counts (not
* maximum values).
+ *
+ * 20 bytes
*/
-struct vhba_config_max {/* 20 bytes */
- u32 max_channel;/* maximum channel for devices attached to this bus */
- u32 max_id; /* maximum SCSI ID for devices attached to bus */
- u32 max_lun; /* maximum SCSI LUN for devices attached to bus */
- u32 cmd_per_lun;/* maximum number of outstanding commands per LUN */
- u32 max_io_size;/* maximum io size for devices attached to this bus */
- /* max io size is often determined by the resource of the hba. e.g */
- /* max scatter gather list length * page size / sector size */
+struct vhba_config_max {
+ u32 max_channel;
+ u32 max_id;
+ u32 max_lun;
+ u32 cmd_per_lun;
+ u32 max_io_size;
} __packed;
+/*
+ * struct uiscmdrsp_scsi
+ *
+ * @handle: The handle to the cmd that was received. Send it back as
+ * is in the rsp packet.
+ * @cmnd: The cdb for the command.
+ * @bufflen: Length of data to be transferred out or in.
+ * @guest_phys_entries: Number of entries in scatter-gather list.
+ * @struct gpi_list: Physical address information for each fragment.
+ * @enum data_dir: Direction of the data, if any.
+ * @struct vdest: Identifies the virtual hba, id, channel, lun to which
+ * cmd was sent.
+ * @linuxstat: Original Linux status used by Linux vdisk.
+ * @scsistat: The scsi status.
+ * @addlstat: Non-scsi status.
+ * @sensebuf: Sense info in case cmd failed. sensebuf holds the
+ * sense_data struct. See sense_data struct for more
+ * details.
+ * @*vdisk: Pointer to the vdisk to clean up when IO completes.
+ * @no_disk_result: Used to return no disk inquiry result when
+ * no_disk_result is set to 1
+ * scsi.scsistat is SAM_STAT_GOOD
+ * scsi.addlstat is 0
+ * scsi.linuxstat is SAM_STAT_GOOD
+ * That is, there is NO error.
+ */
struct uiscmdrsp_scsi {
- u64 handle; /* the handle to the cmd that was received */
- /* send it back as is in the rsp packet. */
- u8 cmnd[MAX_CMND_SIZE]; /* the cdb for the command */
- u32 bufflen; /* length of data to be transferred out or in */
- u16 guest_phys_entries; /* Number of entries in scatter-gather list */
- struct guest_phys_info gpi_list[MAX_PHYS_INFO]; /* physical address
- * information for each
- * fragment
- */
- enum dma_data_direction data_dir; /* direction of the data, if any */
- struct uisscsi_dest vdest; /* identifies the virtual hba, id, */
- /* channel, lun to which cmd was sent */
-
+ u64 handle;
+ u8 cmnd[MAX_CMND_SIZE];
+ u32 bufflen;
+ u16 guest_phys_entries;
+ struct guest_phys_info gpi_list[MAX_PHYS_INFO];
+ enum dma_data_direction data_dir;
+ struct uisscsi_dest vdest;
/* Needed to queue the rsp back to cmd originator. */
- int linuxstat; /* original Linux status used by Linux vdisk */
- u8 scsistat; /* the scsi status */
- u8 addlstat; /* non-scsi status */
+ int linuxstat;
+ u8 scsistat;
+ u8 addlstat;
#define ADDL_SEL_TIMEOUT 4
-
/* The following fields are need to determine the result of command. */
- u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
- /* sensebuf holds the sense_data struct; */
- /* See sense_data struct for more details. */
- void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */
+ u8 sensebuf[MAX_SENSE_SIZE];
+ void *vdisk;
int no_disk_result;
- /*
- * Used to return no disk inquiry result
- * when no_disk_result is set to 1,
- * scsi.scsistat is SAM_STAT_GOOD
- * scsi.addlstat is 0
- * scsi.linuxstat is SAM_STAT_GOOD
- * That is, there is NO error.
- */
} __packed;
/*
* Defines to support sending correct inquiry result when no disk is
* configured.
- */
-
-/*
+ *
* From SCSI SPC2 -
*
* If the target is not capable of supporting a device on this logical unit, the
@@ -234,22 +235,35 @@ struct uiscmdrsp_scsi {
* connected to this logical unit.
*/
-#define DEV_NOT_CAPABLE 0x7f /*
- * peripheral qualifier of 0x3
- * peripheral type of 0x1f
- * specifies no device but target present
- */
-
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1
- * peripheral type of 0 - disk
- * Specifies device capable, but
- * not present
- */
+/*
+ * Peripheral qualifier of 0x3
+ * Peripheral type of 0x1f
+ * Specifies no device but target present
+ */
+#define DEV_NOT_CAPABLE 0x7f
+/*
+ * Peripheral qualifier of 0x1
+ * Peripheral type of 0 - disk
+ * Specifies device capable, but not present
+ */
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
+/* HiSup = 1; shows support for report luns must be returned for lun 0. */
+#define DEV_HISUPPORT 0x10
-#define DEV_HISUPPORT 0x10 /*
- * HiSup = 1; shows support for report luns
- * must be returned for lun 0.
- */
+/*
+ * Peripheral qualifier of 0x3
+ * Peripheral type of 0x1f
+ * Specifies no device but target present
+ */
+#define DEV_NOT_CAPABLE 0x7f
+/*
+ * Peripheral qualifier of 0x1
+ * Peripheral type of 0 - disk
+ * Specifies device capable, but not present
+ */
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
+/* HiSup = 1; shows support for report luns must be returned for lun 0. */
+#define DEV_HISUPPORT 0x10
/*
* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
@@ -258,11 +272,12 @@ struct uiscmdrsp_scsi {
* inquiry result.
*/
#define NO_DISK_INQUIRY_RESULT_LEN 36
-
-#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */
+/* 5 bytes minimum for inquiry result */
+#define MIN_INQUIRY_RESULT_LEN 5
/* SCSI device version for no disk inquiry result */
-#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
+/* indicates SCSI SPC2 (SPC3 is 5) */
+#define SCSI_SPC2_VER 4
/* Struct and Defines to support sense information. */
@@ -297,35 +312,48 @@ struct sense_data {
u8 sense_key_specific[3];
} __packed;
+/*
+ * struct net_pkt_xmt
+ * @len: Full length of data in the packet.
+ * @num_frags: Number of fragments in frags containing data.
+ * @struct phys_info frags: Physical page information.
+ * @ethhdr: The ethernet header.
+ * @struct lincsum: These are needed for csum at uisnic end.
+ * @valid: 1 = struct is valid - else ignore.
+ * @hrawoffv: 1 = hwrafoff is valid.
+ * @nhrawoffv: 1 = nhwrafoff is valid.
+ * @protocol: Specifies packet protocol.
+ * @csum: Value used to set skb->csum at IOPart.
+ * @hrawoff: Value used to set skb->h.raw at IOPart. hrawoff points to
+ * the start of the TRANSPORT LAYER HEADER.
+ * @nhrawoff: Value used to set skb->nh.raw at IOPart. nhrawoff points to
+ * the start of the NETWORK LAYER HEADER.
+ *
+ * NOTE:
+ * The full packet is described in frags but the ethernet header is
+ * separately kept in ethhdr so that uisnic doesn't have "MAP" the
+ * guest memory to get to the header. uisnic needs ethhdr to
+ * determine how to route the packet.
+ */
struct net_pkt_xmt {
- int len; /* full length of data in the packet */
- int num_frags; /* number of fragments in frags containing data */
- struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */
- char ethhdr[ETH_HLEN]; /* the ethernet header */
+ int len;
+ int num_frags;
+ struct phys_info frags[MAX_PHYS_INFO];
+ char ethhdr[ETH_HLEN];
struct {
- /* These are needed for csum at uisnic end */
- u8 valid; /* 1 = struct is valid - else ignore */
- u8 hrawoffv; /* 1 = hwrafoff is valid */
- u8 nhrawoffv; /* 1 = nhwrafoff is valid */
- __be16 protocol; /* specifies packet protocol */
- __wsum csum; /* value used to set skb->csum at IOPart */
- u32 hrawoff; /* value used to set skb->h.raw at IOPart */
- /* hrawoff points to the start of the TRANSPORT LAYER HEADER */
- u32 nhrawoff; /* value used to set skb->nh.raw at IOPart */
- /* nhrawoff points to the start of the NETWORK LAYER HEADER */
+ u8 valid;
+ u8 hrawoffv;
+ u8 nhrawoffv;
+ __be16 protocol;
+ __wsum csum;
+ u32 hrawoff;
+ u32 nhrawoff;
} lincsum;
-
- /*
- * NOTE:
- * The full packet is described in frags but the ethernet header is
- * separately kept in ethhdr so that uisnic doesn't have "MAP" the
- * guest memory to get to the header. uisnic needs ethhdr to
- * determine how to route the packet.
- */
} __packed;
struct net_pkt_xmtdone {
- u32 xmt_done_result; /* result of NET_XMIT */
+ /* Result of NET_XMIT */
+ u32 xmt_done_result;
} __packed;
/*
@@ -341,14 +369,12 @@ struct net_pkt_xmtdone {
((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
/ RCVPOST_BUF_SIZE)
-/*
- * rcv buf size must be large enough to include ethernet data len + ethernet
+/* rcv buf size must be large enough to include ethernet data len + ethernet
* header len - we are choosing 2K because it is guaranteed to be describable.
*/
struct net_pkt_rcvpost {
/* Physical page information for the single fragment 2K rcv buf */
struct phys_info frag;
-
/*
* Ensures that receive posts are returned to the adapter which we sent
* them from originally.
@@ -358,143 +384,148 @@ struct net_pkt_rcvpost {
} __packed;
/*
+ * struct net_pkt_rcv
+ * @rcv_done_len: Length of the received data.
+ * @numrcvbufs: Contains the incoming data. Guest side MUST chain these
+ * together.
+ * @*rcvbuf: List of chained rcvbufa. Each entry is a receive buffer
+ * provided by NET_RCV_POST. NOTE: First rcvbuf in the
+ * chain will also be provided in net.buf.
+ * @unique_num:
+ * @rcvs_dropped_delta:
+ *
* The number of rcvbuf that can be chained is based on max mtu and size of each
* rcvbuf.
*/
struct net_pkt_rcv {
- u32 rcv_done_len; /* length of received data */
-
- /*
- * numrcvbufs: contain the incoming data; guest side MUST chain these
- * together.
- */
+ u32 rcv_done_len;
u8 numrcvbufs;
-
- void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
-
- /* Each entry is a receive buffer provided by NET_RCV_POST. */
- /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
+ void *rcvbuf[MAX_NET_RCV_CHAIN];
u64 unique_num;
u32 rcvs_dropped_delta;
} __packed;
struct net_pkt_enbdis {
void *context;
- u16 enable; /* 1 = enable, 0 = disable */
+ /* 1 = enable, 0 = disable */
+ u16 enable;
} __packed;
struct net_pkt_macaddr {
void *context;
- u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+ /* 6 bytes */
+ u8 macaddr[MAX_MACADDR_LEN];
} __packed;
-/* cmd rsp packet used for VNIC network traffic */
+/*
+ * struct uiscmdrsp_net - cmd rsp packet used for VNIC network traffic.
+ * @enum type:
+ * @*buf:
+ * @union:
+ * @struct xmt: Used for NET_XMIT.
+ * @struct xmtdone: Used for NET_XMIT_DONE.
+ * @struct rcvpost: Used for NET_RCV_POST.
+ * @struct rcv: Used for NET_RCV.
+ * @struct enbdis: Used for NET_RCV_ENBDIS, NET_RCV_ENBDIS_ACK,
+ * NET_RCV_PROMSIC, and NET_CONNECT_STATUS.
+ * @struct macaddr:
+ */
struct uiscmdrsp_net {
enum net_types type;
void *buf;
union {
- struct net_pkt_xmt xmt; /* used for NET_XMIT */
- struct net_pkt_xmtdone xmtdone; /* used for NET_XMIT_DONE */
- struct net_pkt_rcvpost rcvpost; /* used for NET_RCV_POST */
- struct net_pkt_rcv rcv; /* used for NET_RCV */
- struct net_pkt_enbdis enbdis; /* used for NET_RCV_ENBDIS, */
- /* NET_RCV_ENBDIS_ACK, */
- /* NET_RCV_PROMSIC, */
- /* and NET_CONNECT_STATUS */
+ struct net_pkt_xmt xmt;
+ struct net_pkt_xmtdone xmtdone;
+ struct net_pkt_rcvpost rcvpost;
+ struct net_pkt_rcv rcv;
+ struct net_pkt_enbdis enbdis;
struct net_pkt_macaddr macaddr;
};
} __packed;
+/*
+ * struct uiscmdrsp_scsitaskmgmt
+ * @enum tasktype: The type of task.
+ * @struct vdest: The vdisk for which this task mgmt is generated.
+ * @handle: This is a handle that the guest has saved off for its
+ * own use. The handle value is preserved by iopart and
+ * returned as in task mgmt rsp.
+ * @notify_handle: For Linux guests, this is a pointer to wait_queue_head
+ * that a thread is waiting on to see if the taskmgmt
+ * command has completed. When the rsp is received by
+ * guest, the thread receiving the response uses this to
+ * notify the thread waiting for taskmgmt command
+ * completion. It's value is preserved by iopart and
+ * returned as in the task mgmt rsp.
+ * @notifyresult_handle: This is a handle to the location in the guest where
+ * the result of the taskmgmt command (result field) is
+ * saved to when the response is handled. It's value is
+ * preserved by iopart and returned as is in the task mgmt
+ * rsp.
+ * @result: Result of taskmgmt command - set by IOPart.
+ */
struct uiscmdrsp_scsitaskmgmt {
- /* The type of task. */
enum task_mgmt_types tasktype;
-
- /* The vdisk for which this task mgmt is generated. */
struct uisscsi_dest vdest;
-
- /*
- * This is a handle that the guest has saved off for its own use.
- * The handle value is preserved by iopart and returned as in task
- * mgmt rsp.
- */
u64 handle;
-
- /*
- * For Linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the taskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. It's value is preserved by iopart and returned
- * as in the task mgmt rsp.
- */
u64 notify_handle;
-
- /*
- * This is a handle to the location in the guest where the result of
- * the taskmgmt command (result field) is saved to when the response
- * is handled. It's value is preserved by iopart and returned as in
- * the task mgmt rsp.
- */
u64 notifyresult_handle;
-
- /* Result of taskmgmt command - set by IOPart - values are: */
char result;
#define TASK_MGMT_FAILED 0
} __packed;
-/* Used by uissd to send disk add/remove notifications to Guest. */
-/* Note that the vHba pointer is not used by the Client/Guest side. */
+/*
+ * struct uiscmdrsp_disknotify - Used by uissd to send disk add/remove
+ * notifications to Guest.
+ * @add: 0-remove, 1-add.
+ * @*v_hba: Channel info to route msg.
+ * @channel: SCSI Path of Disk to added or removed.
+ * @id: SCSI Path of Disk to added or removed.
+ * @lun: SCSI Path of Disk to added or removed.
+ *
+ * Note that the vHba pointer is not used by the Client/Guest side.
+ */
struct uiscmdrsp_disknotify {
- u8 add; /* 0-remove, 1-add */
- void *v_hba; /* channel info to route msg */
- u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
+ u8 add;
+ void *v_hba;
+ u32 channel, id, lun;
} __packed;
/*
- * The following is used by virthba/vSCSI to send the Acquire/Release commands
- * to the IOVM.
+ * struct uiscmdrsp_vdiskmgmt - The following is used by virthba/vSCSI to send
+ * the Acquire/Release commands to the IOVM.
+ * @enum vdisktype: The type of task.
+ * @struct vdest: The vdisk for which this task mgmt is generated.
+ * @handle: This is a handle that the guest has saved off for its
+ * own use. It's value is preserved by iopart and returned
+ * as in the task mgmt rsp.
+ * @notify_handle: For Linux guests, this is a pointer to wait_queue_head
+ * that a thread is waiting on to see if the tskmgmt
+ * command has completed. When the rsp is received by
+ * guest, the thread receiving the response uses this to
+ * notify the thread waiting for taskmgmt command
+ * completion. It's value is preserved by iopart and
+ * returned as in the task mgmt rsp.
+ * @notifyresult_handle: Handle to the location in guest where the result of the
+ * taskmgmt command (result field) is saved to when the
+ * response is handled. It's value is preserved by iopart
+ * and returned as in the task mgmt rsp.
+ * @result: Result of taskmgmt command - set by IOPart.
*/
struct uiscmdrsp_vdiskmgmt {
- /* The type of task */
enum vdisk_mgmt_types vdisktype;
-
- /* The vdisk for which this task mgmt is generated */
struct uisscsi_dest vdest;
-
- /*
- * This is a handle that the guest has saved off for its own use. It's
- * value is preserved by iopart and returned as in the task mgmt rsp.
- */
u64 handle;
-
- /*
- * For Linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the tskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. It's value is preserved by iopart and returned
- * as in the task mgmt rsp.
- */
u64 notify_handle;
-
- /*
- * Handle to the location in guest where the result of the
- * taskmgmt command (result field) is saved to when the response
- * is handled. It's value is preserved by iopart and returned as in
- * the task mgmt rsp.
- */
u64 notifyresult_handle;
-
- /* Result of taskmgmt command - set by IOPart - values are: */
char result;
} __packed;
/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
struct uiscmdrsp {
char cmdtype;
-
-/* Describes what type of information is in the struct */
+ /* Describes what type of information is in the struct */
#define CMD_SCSI_TYPE 1
#define CMD_NET_TYPE 2
#define CMD_SCSITASKMGMT_TYPE 3
@@ -509,21 +540,33 @@ struct uiscmdrsp {
};
/* Send the response when the cmd is done (scsi and scsittaskmgmt). */
void *private_data;
- struct uiscmdrsp *next; /* General Purpose Queue Link */
- struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */
- struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */
+ /* General Purpose Queue Link */
+ struct uiscmdrsp *next;
+ /* Pointer to the nextactive commands */
+ struct uiscmdrsp *activeQ_next;
+ /* Pointer to the prevactive commands */
+ struct uiscmdrsp *activeQ_prev;
} __packed;
+/* total = 28 bytes */
struct iochannel_vhba {
- struct vhba_wwnn wwnn; /* 8 bytes */
- struct vhba_config_max max; /* 20 bytes */
-} __packed; /* total = 28 bytes */
+ /* 8 bytes */
+ struct vhba_wwnn wwnn;
+ /* 20 bytes */
+ struct vhba_config_max max;
+} __packed;
+
struct iochannel_vnic {
- u8 macaddr[6]; /* 6 bytes */
- u32 num_rcv_bufs; /* 4 bytes */
- u32 mtu; /* 4 bytes */
- uuid_le zone_uuid; /* 16 bytes */
+ /* 6 bytes */
+ u8 macaddr[6];
+ /* 4 bytes */
+ u32 num_rcv_bufs;
+ /* 4 bytes */
+ u32 mtu;
+ /* 16 bytes */
+ uuid_le zone_uuid;
} __packed;
+
/*
* This is just the header of the IO channel. It is assumed that directly after
* this header there is a large region of memory which contains the command and
@@ -544,10 +587,11 @@ struct visor_io_channel {
} __packed;
/* INLINE functions for initializing and accessing I/O data channels. */
-#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
+#define SIZEOF_CMDRSP (64 * DIV_ROUND_UP(sizeof(struct uiscmdrsp), 64))
/* Use 4K page sizes when passing page info between Guest and IOPartition. */
#define PI_PAGE_SIZE 0x1000
#define PI_PAGE_MASK 0x0FFF
-#endif /* __IOCHANNEL_H__ */
+/* __IOCHANNEL_H__ */
+#endif
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index de0635542fbd..17c92294b4d1 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -23,7 +23,6 @@
*
* There should be nothing in this file that is private to the visorbus
* bus implementation itself.
- *
*/
#ifndef __VISORBUS_H__
@@ -45,6 +44,7 @@ extern struct bus_type visorbus_type;
typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
int status);
+
struct visorchipset_state {
u32 created:1;
u32 attached:1;
@@ -172,6 +172,10 @@ struct visor_device {
#define to_visor_device(x) container_of(x, struct visor_device, device)
+int visor_check_channel(struct channel_header *ch, uuid_le expected_uuid,
+ char *chname, u64 expected_min_bytes,
+ u32 expected_version, u64 expected_signature);
+
int visorbus_register_visor_driver(struct visor_driver *drv);
void visorbus_unregister_visor_driver(struct visor_driver *drv);
int visorbus_read_channel(struct visor_device *dev,
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index ed045eff0e33..96ac574ef1e7 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,56 +24,51 @@
UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-#define VISOR_CONTROLVM_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
#define CONTROLVM_MESSAGE_MAX 64
-/* Must increment this whenever you insert or delete fields within
- * this channel struct. Also increment whenever you change the meaning
- * of fields within this channel struct so as to break pre-existing
- * software. Note that you can usually add fields to the END of the
- * channel struct withOUT needing to increment this.
+/*
+ * Must increment this whenever you insert or delete fields within this channel
+ * struct. Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software. Note that you can
+ * usually add fields to the END of the channel struct withOUT needing to
+ * increment this.
*/
#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
-#define VISOR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
- (visor_check_channel(ch, \
- VISOR_CONTROLVM_CHANNEL_UUID, \
- "controlvm", \
- sizeof(struct visor_controlvm_channel), \
- VISOR_CONTROLVM_CHANNEL_VERSIONID, \
- VISOR_CONTROLVM_CHANNEL_SIGNATURE))
-
/* Defines for various channel queues */
-#define CONTROLVM_QUEUE_REQUEST 0
-#define CONTROLVM_QUEUE_RESPONSE 1
-#define CONTROLVM_QUEUE_EVENT 2
-#define CONTROLVM_QUEUE_ACK 3
+#define CONTROLVM_QUEUE_REQUEST 0
+#define CONTROLVM_QUEUE_RESPONSE 1
+#define CONTROLVM_QUEUE_EVENT 2
+#define CONTROLVM_QUEUE_ACK 3
/* Max num of messages stored during IOVM creation to be reused after crash */
#define CONTROLVM_CRASHMSG_MAX 2
+/*
+ * struct visor_segment_state
+ * @enabled: May enter other states.
+ * @active: Assigned to active partition.
+ * @alive: Configure message sent to service/server.
+ * @revoked: Similar to partition state ShuttingDown.
+ * @allocated: Memory (device/port number) has been selected by Command.
+ * @known: Has been introduced to the service/guest partition.
+ * @ready: Service/Guest partition has responded to introduction.
+ * @operating: Resource is configured and operating.
+ * @reserved: Natural alignment.
+ *
+ * Note: Don't use high bit unless we need to switch to ushort which is
+ * non-compliant.
+ */
struct visor_segment_state {
- /* Bit 0: May enter other states */
u16 enabled:1;
- /* Bit 1: Assigned to active partition */
u16 active:1;
- /* Bit 2: Configure message sent to service/server */
u16 alive:1;
- /* Bit 3: similar to partition state ShuttingDown */
u16 revoked:1;
- /* Bit 4: memory (device/port number) has been selected by Command */
u16 allocated:1;
- /* Bit 5: has been introduced to the service/guest partition */
u16 known:1;
- /* Bit 6: service/Guest partition has responded to introduction */
u16 ready:1;
- /* Bit 7: resource is configured and operating */
u16 operating:1;
- /* Natural alignment*/
u16 reserved:8;
-/* Note: don't use high bit unless we need to switch to ushort
- * which is non-compliant
- */
} __packed;
static const struct visor_segment_state segment_state_running = {
@@ -87,74 +83,101 @@ static const struct visor_segment_state segment_state_standby = {
1, 1, 0, 0, 1, 1, 1, 0
};
-/* Ids for commands that may appear in either queue of a ControlVm channel.
+/*
+ * enum controlvm_id
+ * @CONTROLVM_INVALID:
+ * @CONTROLVM_BUS_CREATE: CP --> SP, GP.
+ * @CONTROLVM_BUS_DESTROY: CP --> SP, GP.
+ * @CONTROLVM_BUS_CONFIGURE: CP --> SP.
+ * @CONTROLVM_BUS_CHANGESTATE: CP --> SP, GP.
+ * @CONTROLVM_BUS_CHANGESTATE_EVENT: SP, GP --> CP.
+ * @CONTROLVM_DEVICE_CREATE: CP --> SP, GP.
+ * @CONTROLVM_DEVICE_DESTROY: CP --> SP, GP.
+ * @CONTROLVM_DEVICE_CONFIGURE: CP --> SP.
+ * @CONTROLVM_DEVICE_CHANGESTATE: CP --> SP, GP.
+ * @CONTROLVM_DEVICE_CHANGESTATE_EVENT: SP, GP --> CP.
+ * @CONTROLVM_DEVICE_RECONFIGURE: CP --> Boot.
+ * @CONTROLVM_CHIPSET_INIT: CP --> SP, GP.
+ * @CONTROLVM_CHIPSET_STOP: CP --> SP, GP.
+ * @CONTROLVM_CHIPSET_READY: CP --> SP.
+ * @CONTROLVM_CHIPSET_SELFTEST: CP --> SP.
*
- * Commands that are initiated by the command partition (CP), by an IO or
- * console service partition (SP), or by a guest partition (GP)are:
- * - issued on the RequestQueue queue (q #0) in the ControlVm channel
- * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ * Ids for commands that may appear in either queue of a ControlVm channel.
*
- * Events that are initiated by an IO or console service partition (SP) or
- * by a guest partition (GP) are:
- * - issued on the EventQueue queue (q #2) in the ControlVm channel
- * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
+ * Commands that are initiated by the command partition (CP), by an IO or
+ * console service partition (SP), or by a guest partition (GP) are:
+ * - issued on the RequestQueue queue (q #0) in the ControlVm channel
+ * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ *
+ * Events that are initiated by an IO or console service partition (SP) or
+ * by a guest partition (GP) are:
+ * - issued on the EventQueue queue (q #2) in the ControlVm channel
+ * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
*/
enum controlvm_id {
CONTROLVM_INVALID = 0,
- /* SWITCH commands required Parameter: SwitchNumber */
- /* BUS commands required Parameter: BusNumber */
- CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */
- CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */
- CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */
- CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */
- CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */
-/* DEVICE commands required Parameter: BusNumber, DeviceNumber */
-
- CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */
- CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */
- CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */
- CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */
- CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */
- CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */
-/* CHIPSET commands */
- CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */
- CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */
- CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
- CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
-
+ /*
+ * SWITCH commands required Parameter: SwitchNumber.
+ * BUS commands required Parameter: BusNumber
+ */
+ CONTROLVM_BUS_CREATE = 0x101,
+ CONTROLVM_BUS_DESTROY = 0x102,
+ CONTROLVM_BUS_CONFIGURE = 0x104,
+ CONTROLVM_BUS_CHANGESTATE = 0x105,
+ CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106,
+ /* DEVICE commands required Parameter: BusNumber, DeviceNumber */
+ CONTROLVM_DEVICE_CREATE = 0x201,
+ CONTROLVM_DEVICE_DESTROY = 0x202,
+ CONTROLVM_DEVICE_CONFIGURE = 0x203,
+ CONTROLVM_DEVICE_CHANGESTATE = 0x204,
+ CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205,
+ CONTROLVM_DEVICE_RECONFIGURE = 0x206,
+ /* CHIPSET commands */
+ CONTROLVM_CHIPSET_INIT = 0x301,
+ CONTROLVM_CHIPSET_STOP = 0x302,
+ CONTROLVM_CHIPSET_READY = 0x304,
+ CONTROLVM_CHIPSET_SELFTEST = 0x305,
};
+/*
+ * struct irq_info
+ * @reserved1: Natural alignment purposes
+ * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the
+ * corresponding interrupt pin from Monitor; and the interrupt
+ * pin is used to connect to the corresponding interrupt.
+ * Used by IOPart-GP only.
+ * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared
+ * are used to connect to the corresponding interrupt.
+ * Used by IOPart-GP only.
+ * @recv_irq_shared: Specifies if the recvInterrupt is shared. It, interrupt
+ * pin and vector are used to connect to 0 = not shared;
+ * 1 = shared the corresponding interrupt.
+ * Used by IOPart-GP only.
+ * @reserved: Natural alignment purposes
+ */
struct irq_info {
u64 reserved1;
-
- /* specifies interrupt handle. It is used to retrieve the
- * corresponding interrupt pin from Monitor; and the
- * interrupt pin is used to connect to the corresponding
- * interrupt. Used by IOPart-GP only.
- */
u64 recv_irq_handle;
-
- /* specifies interrupt vector. It, interrupt pin, and shared are
- * used to connect to the corresponding interrupt. Used by
- * IOPart-GP only.
- */
u32 recv_irq_vector;
-
- /* specifies if the recvInterrupt is shared. It, interrupt pin
- * and vector are used to connect to 0 = not shared; 1 = shared.
- * the corresponding interrupt. Used by IOPart-GP only.
- */
u8 recv_irq_shared;
- u8 reserved[3]; /* Natural alignment purposes */
+ u8 reserved[3];
} __packed;
+/*
+ * struct efi_visor_indication
+ * @boot_to_fw_ui: Stop in UEFI UI
+ * @clear_nvram: Clear NVRAM
+ * @clear_cmos: Clear CMOS
+ * @boot_to_tool: Run install tool
+ * @reserved: Natural alignment
+ */
struct efi_visor_indication {
- u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */
- u64 clear_nvram:1; /* Bit 1: Clear NVRAM */
- u64 clear_cmos:1; /* Bit 2: Clear CMOS */
- u64 boot_to_tool:1; /* Bit 3: Run install tool */
- /* remaining bits are available */
- u64 reserved:60; /* Natural alignment */
+ u64 boot_to_fw_ui:1;
+ u64 clear_nvram:1;
+ u64 clear_cmos:1;
+ u64 boot_to_tool:1;
+ /* Remaining bits are available */
+ u64 reserved:60;
} __packed;
enum visor_chipset_feature {
@@ -162,182 +185,249 @@ enum visor_chipset_feature {
VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
};
-/* This is the common structure that is at the beginning of every
- * ControlVm message (both commands and responses) in any ControlVm
- * queue. Commands are easily distinguished from responses by
- * looking at the flags.response field.
+/*
+ * struct controlvm_message_header
+ * @id: See CONTROLVM_ID.
+ * @message_size: Includes size of this struct + size of message.
+ * @segment_index: Index of segment containing Vm message/information.
+ * @completion_status: Error status code or result of message completion.
+ * @struct flags:
+ * @failed: =1 in a response to signify failure.
+ * @response_expected: =1 in all messages that expect a response.
+ * @server: =1 in all bus & device-related messages where the
+ * message receiver is to act as the bus or device
+ * server.
+ * @test_message: =1 for testing use only (Control and Command
+ * ignore this).
+ * @partial_completion: =1 if there are forthcoming responses/acks
+ * associated with this message.
+ * @preserve: =1 this is to let us know to preserve channel
+ * contents.
+ * @writer_in_diag: =1 the DiagWriter is active in the Diagnostic
+ * Partition.
+ * @reserve: Natural alignment.
+ * @reserved: Natural alignment.
+ * @message_handle: Identifies the particular message instance.
+ * @payload_vm_offset: Offset of payload area from start of this instance.
+ * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm
+ * segment.
+ * @payload_bytes: Actual number of bytes of payload area to copy between
+ * IO/Command. If non-zero, there is a payload to copy.
+ *
+ * This is the common structure that is at the beginning of every
+ * ControlVm message (both commands and responses) in any ControlVm
+ * queue. Commands are easily distinguished from responses by
+ * looking at the flags.response field.
*/
struct controlvm_message_header {
- u32 id; /* See CONTROLVM_ID. */
- /* For requests, indicates the message type. */
- /* For responses, indicates the type of message we are responding to. */
-
- /* Includes size of this struct + size of message */
+ u32 id;
+ /*
+ * For requests, indicates the message type. For responses, indicates
+ * the type of message we are responding to.
+ */
u32 message_size;
- /* Index of segment containing Vm message/information */
u32 segment_index;
- /* Error status code or result of message completion */
u32 completion_status;
struct {
- /* =1 in a response to signify failure */
u32 failed:1;
- /* =1 in all messages that expect a response */
u32 response_expected:1;
- /* =1 in all bus & device-related messages where the message
- * receiver is to act as the bus or device server
- */
u32 server:1;
- /* =1 for testing use only (Control and Command ignore this */
u32 test_message:1;
- /* =1 if there are forthcoming responses/acks associated
- * with this message
- */
u32 partial_completion:1;
- /* =1 this is to let us know to preserve channel contents */
u32 preserve:1;
- /* =1 the DiagWriter is active in the Diagnostic Partition */
u32 writer_in_diag:1;
- /* Natural alignment */
u32 reserve:25;
} __packed flags;
- /* Natural alignment */
u32 reserved;
- /* Identifies the particular message instance */
u64 message_handle;
- /* request instances with the corresponding response instance. */
- /* Offset of payload area from start of this instance */
u64 payload_vm_offset;
- /* Maximum bytes allocated in payload area of ControlVm segment */
u32 payload_max_bytes;
- /* Actual number of bytes of payload area to copy between IO/Command */
u32 payload_bytes;
- /* if non-zero, there is a payload to copy. */
} __packed;
+/*
+ * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE
+ * @bus_no: Bus # (0..n-1) from the msg receiver's end.
+ * @dev_no: Bus-relative (0..n-1) device number.
+ * @channel_addr: Guest physical address of the channel, which can be
+ * dereferenced by the receiver of this ControlVm command.
+ * @channel_bytes: Specifies size of the channel in bytes.
+ * @data_type_uuid: Specifies format of data in channel.
+ * @dev_inst_uuid: Instance guid for the device.
+ * @irq_info intr: Specifies interrupt information.
+ */
struct controlvm_packet_device_create {
- u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
- u32 dev_no; /* bus-relative (0..n-1) device number */
- /* Guest physical address of the channel, which can be dereferenced by
- * the receiver of this ControlVm command
- */
+ u32 bus_no;
+ u32 dev_no;
u64 channel_addr;
- u64 channel_bytes; /* specifies size of the channel in bytes */
- uuid_le data_type_uuid; /* specifies format of data in channel */
- uuid_le dev_inst_uuid; /* instance guid for the device */
- struct irq_info intr; /* specifies interrupt information */
-} __packed; /* for CONTROLVM_DEVICE_CREATE */
+ u64 channel_bytes;
+ uuid_le data_type_uuid;
+ uuid_le dev_inst_uuid;
+ struct irq_info intr;
+} __packed;
+/*
+ * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE
+ * @bus_no: Bus number (0..n-1) from the msg receiver's perspective.
+ * @dev_no: Bus-relative (0..n-1) device number.
+ */
struct controlvm_packet_device_configure {
- /* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
- /* Control uses header SegmentIndex field to access bus number... */
- u32 dev_no; /* bus-relative (0..n-1) device number */
-} __packed; /* for CONTROLVM_DEVICE_CONFIGURE */
+ /* Control uses header SegmentIndex field to access bus number. */
+ u32 dev_no;
+} __packed;
+/* Total 128 bytes */
struct controlvm_message_device_create {
struct controlvm_message_header header;
struct controlvm_packet_device_create packet;
-} __packed; /* total 128 bytes */
+} __packed;
+/* Total 56 bytes */
struct controlvm_message_device_configure {
struct controlvm_message_header header;
struct controlvm_packet_device_configure packet;
-} __packed; /* total 56 bytes */
+} __packed;
-/* This is the format for a message in any ControlVm queue. */
+/*
+ * struct controlvm_message_packet - This is the format for a message in any
+ * ControlVm queue.
+ * @struct create_bus: For CONTROLVM_BUS_CREATE.
+ * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ * @dev_count: Indicates the max number of devices on this bus.
+ * @channel_addr: Guest physical address of the channel, which can be
+ * dereferenced by the receiver of this ControlVM
+ * command.
+ * @channel_bytes: Size of the channel.
+ * @bus_data_type_uuid: Indicates format of data in bus channel.
+ * @bus_inst_uuid: Instance uuid for the bus.
+ *
+ * @struct destroy_bus: For CONTROLVM_BUS_DESTROY.
+ * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ * @reserved: Natural alignment purposes.
+ *
+ * @struct configure_bus: For CONTROLVM_BUS_CONFIGURE.
+ * @bus_no: Bus # (0..n-1) from the receiver's perspective.
+ * @reserved1: For alignment purposes.
+ * @guest_handle: This is used to convert guest physical address to
+ * physical address.
+ * @recv_bus_irq_handle: Specifies interrupt info. It is used by SP to
+ * register to receive interrupts from the CP. This
+ * interrupt is used for bus level notifications.
+ * The corresponding sendBusInterruptHandle is kept
+ * in CP.
+ *
+ * @struct create_device: For CONTROLVM_DEVICE_CREATE.
+ *
+ * @struct destroy_device: For CONTROLVM_DEVICE_DESTROY.
+ * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ * @dev_no: Bus-relative (0..n-1) device number.
+ *
+ * @struct configure_device: For CONTROLVM_DEVICE_CONFIGURE.
+ *
+ * @struct reconfigure_device: For CONTROLVM_DEVICE_RECONFIGURE.
+ * @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ * @dev_no: Bus-relative (0..n-1) device number.
+ *
+ * @struct bus_change_state: For CONTROLVM_BUS_CHANGESTATE.
+ * @bus_no:
+ * @struct state:
+ * @reserved: Natural alignment purposes.
+ *
+ * @struct device_change_state: For CONTROLVM_DEVICE_CHANGESTATE.
+ * @bus_no:
+ * @dev_no:
+ * @struct state:
+ * @struct flags:
+ * @phys_device: =1 if message is for a physical device.
+ * @reserved: Natural alignment.
+ * @reserved1: Natural alignment.
+ * @reserved: Natural alignment purposes.
+ *
+ * @struct device_change_state_event: For CONTROLVM_DEVICE_CHANGESTATE_EVENT.
+ * @bus_no:
+ * @dev_no:
+ * @struct state:
+ * @reserved: Natural alignment purposes.
+ *
+ * @struct init_chipset: For CONTROLVM_CHIPSET_INIT.
+ * @bus_count: Indicates the max number of busses.
+ * @switch_count: Indicates the max number of switches.
+ * @enum features:
+ * @platform_number:
+ *
+ * @struct chipset_selftest: For CONTROLVM_CHIPSET_SELFTEST.
+ * @options: Reserved.
+ * @test: Bit 0 set to run embedded selftest.
+ *
+ * @addr: A physical address of something, that can be dereferenced by the
+ * receiver of this ControlVm command.
+ *
+ * @handle: A handle of something (depends on command id).
+ */
struct controlvm_message_packet {
union {
struct {
- /* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
- /* indicates the max number of devices on this bus */
u32 dev_count;
- /* Guest physical address of the channel, which can be
- * dereferenced by the receiver of this ControlVm command
- */
u64 channel_addr;
- u64 channel_bytes; /* size of the channel */
- /* indicates format of data in bus channel*/
+ u64 channel_bytes;
uuid_le bus_data_type_uuid;
- uuid_le bus_inst_uuid; /* instance uuid for the bus */
- } __packed create_bus; /* for CONTROLVM_BUS_CREATE */
+ uuid_le bus_inst_uuid;
+ } __packed create_bus;
struct {
- /* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
- u32 reserved; /* Natural alignment purposes */
- } __packed destroy_bus; /* for CONTROLVM_BUS_DESTROY */
+ u32 reserved;
+ } __packed destroy_bus;
struct {
- /* bus # (0..n-1) from the receiver's perspective */
u32 bus_no;
- u32 reserved1; /* for alignment purposes */
- /* This is used to convert guest physical address to physical address */
+ u32 reserved1;
u64 guest_handle;
u64 recv_bus_irq_handle;
- /* specifies interrupt info. It is used by SP
- * to register to receive interrupts from the
- * CP. This interrupt is used for bus level
- * notifications. The corresponding
- * sendBusInterruptHandle is kept in CP.
- */
- } __packed configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
- /* for CONTROLVM_DEVICE_CREATE */
+ } __packed configure_bus;
struct controlvm_packet_device_create create_device;
struct {
- /* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
- u32 dev_no; /* bus-relative (0..n-1) device # */
- } __packed destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
- /* for CONTROLVM_DEVICE_CONFIGURE */
+ u32 dev_no;
+ } __packed destroy_device;
struct controlvm_packet_device_configure configure_device;
struct {
- /* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
- u32 dev_no; /* bus-relative (0..n-1) device # */
+ u32 dev_no;
} __packed reconfigure_device;
- /* for CONTROLVM_DEVICE_RECONFIGURE */
struct {
u32 bus_no;
struct visor_segment_state state;
- u8 reserved[2]; /* Natural alignment purposes */
- } __packed bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
+ u8 reserved[2];
+ } __packed bus_change_state;
struct {
u32 bus_no;
u32 dev_no;
struct visor_segment_state state;
struct {
- /* =1 if message is for a physical device */
u32 phys_device:1;
- u32 reserved:31; /* Natural alignment */
- u32 reserved1; /* Natural alignment */
+ u32 reserved:31;
+ u32 reserved1;
} __packed flags;
- u8 reserved[2]; /* Natural alignment purposes */
+ u8 reserved[2];
} __packed device_change_state;
- /* for CONTROLVM_DEVICE_CHANGESTATE */
struct {
u32 bus_no;
u32 dev_no;
struct visor_segment_state state;
- u8 reserved[6]; /* Natural alignment purposes */
+ u8 reserved[6];
} __packed device_change_state_event;
- /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
struct {
- /* indicates the max number of busses */
u32 bus_count;
- /* indicates the max number of switches */
u32 switch_count;
enum visor_chipset_feature features;
- u32 platform_number; /* Platform Number */
- } __packed init_chipset; /* for CONTROLVM_CHIPSET_INIT */
+ u32 platform_number;
+ } __packed init_chipset;
struct {
- u32 options; /* reserved */
- u32 test; /* bit 0 set to run embedded selftest */
+ u32 options;
+ u32 test;
} __packed chipset_selftest;
- /* for CONTROLVM_CHIPSET_SELFTEST */
- /* a physical address of something, that can be dereferenced
- * by the receiver of this ControlVm command
- */
u64 addr;
- /* a handle of something (depends on command id) */
u64 handle;
};
} __packed;
@@ -348,93 +438,139 @@ struct controlvm_message {
struct controlvm_message_packet cmd;
} __packed;
+/*
+ * struct visor_controlvm_channel
+ * @struct header:
+ * @gp_controlvm: Guest phys addr of this channel.
+ * @gp_partition_tables: Guest phys addr of partition tables.
+ * @gp_diag_guest: Guest phys addr of diagnostic channel.
+ * @gp_boot_romdisk: Guest phys addr of (read* only) Boot
+ * ROM disk.
+ * @gp_boot_ramdisk: Guest phys addr of writable Boot RAM
+ * disk.
+ * @gp_acpi_table: Guest phys addr of acpi table.
+ * @gp_control_channel: Guest phys addr of control channel.
+ * @gp_diag_romdisk: Guest phys addr of diagnostic ROM disk.
+ * @gp_nvram: Guest phys addr of NVRAM channel.
+ * @request_payload_offset: Offset to request payload area.
+ * @event_payload_offset: Offset to event payload area.
+ * @request_payload_bytes: Bytes available in request payload area.
+ * @event_payload_bytes: Bytes available in event payload area.
+ * @control_channel_bytes:
+ * @nvram_channel_bytes: Bytes in PartitionNvram segment.
+ * @message_bytes: sizeof(CONTROLVM_MESSAGE).
+ * @message_count: CONTROLVM_MESSAGE_MAX.
+ * @gp_smbios_table: Guest phys addr of SMBIOS tables.
+ * @gp_physical_smbios_table: Guest phys addr of SMBIOS table.
+ * @gp_reserved: VISOR_MAX_GUESTS_PER_SERVICE.
+ * @virtual_guest_firmware_image_base: Guest physical address of EFI firmware
+ * image base.
+ * @virtual_guest_firmware_entry_point: Guest physical address of EFI firmware
+ * entry point.
+ * @virtual_guest_firmware_image_size: Guest EFI firmware image size.
+ * @virtual_guest_firmware_boot_base: GPA = 1MB where EFI firmware image is
+ * copied to.
+ * @virtual_guest_image_base:
+ * @virtual_guest_image_size:
+ * @prototype_control_channel_offset:
+ * @virtual_guest_partition_handle:
+ * @restore_action: Restore Action field to restore the
+ * guest partition.
+ * @dump_action: For Windows guests it shows if the
+ * visordisk is in dump mode.
+ * @nvram_fail_count:
+ * @saved_crash_message_count: = CONTROLVM_CRASHMSG_MAX.
+ * @saved_crash_message_offset: Offset to request payload area needed
+ * for crash dump.
+ * @installation_error: Type of error encountered during
+ * installation.
+ * @installation_text_id: Id of string to display.
+ * @installation_remaining_steps: Number of remaining installation steps
+ * (for progress bars).
+ * @tool_action: VISOR_TOOL_ACTIONS Installation Action
+ * field.
+ * @reserved: Alignment.
+ * @struct efi_visor_ind:
+ * @sp_reserved:
+ * @reserved2: Force signals to begin on 128-byte
+ * cache line.
+ * @struct request_queue: Guest partition uses this queue to send
+ * requests to Control.
+ * @struct response_queue: Control uses this queue to respond to
+ * service or guest partition request.
+ * @struct event_queue: Control uses this queue to send events
+ * to guest partition.
+ * @struct event_ack_queue: Service or guest partition uses this
+ * queue to ack Control events.
+ * @struct request_msg: Request fixed-size message pool -
+ * does not include payload.
+ * @struct response_msg: Response fixed-size message pool -
+ * does not include payload.
+ * @struct event_msg: Event fixed-size message pool -
+ * does not include payload.
+ * @struct event_ack_msg: Ack fixed-size message pool -
+ * does not include payload.
+ * @struct saved_crash_msg: Message stored during IOVM creation to
+ * be reused after crash.
+ */
struct visor_controlvm_channel {
struct channel_header header;
- u64 gp_controlvm; /* guest phys addr of this channel */
- u64 gp_partition_tables;/* guest phys addr of partition tables */
- u64 gp_diag_guest; /* guest phys addr of diagnostic channel */
- u64 gp_boot_romdisk;/* guest phys addr of (read* only) Boot ROM disk */
- u64 gp_boot_ramdisk;/* guest phys addr of writable Boot RAM disk */
- u64 gp_acpi_table; /* guest phys addr of acpi table */
- u64 gp_control_channel;/* guest phys addr of control channel */
- u64 gp_diag_romdisk;/* guest phys addr of diagnostic ROM disk */
- u64 gp_nvram; /* guest phys addr of NVRAM channel */
- u64 request_payload_offset; /* Offset to request payload area */
- u64 event_payload_offset; /* Offset to event payload area */
- /* Bytes available in request payload area */
+ u64 gp_controlvm;
+ u64 gp_partition_tables;
+ u64 gp_diag_guest;
+ u64 gp_boot_romdisk;
+ u64 gp_boot_ramdisk;
+ u64 gp_acpi_table;
+ u64 gp_control_channel;
+ u64 gp_diag_romdisk;
+ u64 gp_nvram;
+ u64 request_payload_offset;
+ u64 event_payload_offset;
u32 request_payload_bytes;
- u32 event_payload_bytes;/* Bytes available in event payload area */
+ u32 event_payload_bytes;
u32 control_channel_bytes;
- u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */
- u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */
- u32 message_count; /* CONTROLVM_MESSAGE_MAX */
- u64 gp_smbios_table; /* guest phys addr of SMBIOS tables */
- u64 gp_physical_smbios_table; /* guest phys addr of SMBIOS table */
- /* VISOR_MAX_GUESTS_PER_SERVICE */
+ u32 nvram_channel_bytes;
+ u32 message_bytes;
+ u32 message_count;
+ u64 gp_smbios_table;
+ u64 gp_physical_smbios_table;
char gp_reserved[2688];
-
- /* guest physical address of EFI firmware image base */
u64 virtual_guest_firmware_image_base;
-
- /* guest physical address of EFI firmware entry point */
u64 virtual_guest_firmware_entry_point;
-
- /* guest EFI firmware image size */
u64 virtual_guest_firmware_image_size;
-
- /* GPA = 1MB where EFI firmware image is copied to */
u64 virtual_guest_firmware_boot_base;
u64 virtual_guest_image_base;
u64 virtual_guest_image_size;
u64 prototype_control_channel_offset;
u64 virtual_guest_partition_handle;
- /* Restore Action field to restore the guest partition */
u16 restore_action;
- /* For Windows guests it shows if the visordisk is in dump mode */
u16 dump_action;
u16 nvram_fail_count;
- u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */
- /* Offset to request payload area needed for crash dump */
+ u16 saved_crash_message_count;
u32 saved_crash_message_offset;
- /* Type of error encountered during installation */
u32 installation_error;
- u32 installation_text_id; /* Id of string to display */
- /* Number of remaining installation steps (for progress bars) */
+ u32 installation_text_id;
u16 installation_remaining_steps;
- /* VISOR_TOOL_ACTIONS Installation Action field */
u8 tool_action;
- u8 reserved; /* alignment */
+ u8 reserved;
struct efi_visor_indication efi_visor_ind;
u32 sp_reserved;
- /* Force signals to begin on 128-byte cache line */
u8 reserved2[28];
- /* guest partition uses this queue to send requests to Control */
struct signal_queue_header request_queue;
- /* Control uses this queue to respond to service or guest
- * partition requests
- */
struct signal_queue_header response_queue;
- /* Control uses this queue to send events to guest partition */
struct signal_queue_header event_queue;
- /* Service or guest partition uses this queue to ack Control events */
struct signal_queue_header event_ack_queue;
- /* Request fixed-size message pool - does not include payload */
- struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
-
- /* Response fixed-size message pool - does not include payload */
- struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
-
- /* Event fixed-size message pool - does not include payload */
- struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
-
- /* Ack fixed-size message pool - does not include payload */
- struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
-
- /* Message stored during IOVM creation to be reused after crash */
- struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
+ struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
} __packed;
-/* The following header will be located at the beginning of PayloadVmOffset for
+/*
+ * struct visor_controlvm_parameters_header
+ *
+ * The following header will be located at the beginning of PayloadVmOffset for
* various ControlVm commands. The receiver of a ControlVm command with a
* PayloadVmOffset will dereference this address and then use connection_offset,
* initiator_offset, and target_offset to get the location of UTF-8 formatted
@@ -457,7 +593,8 @@ struct visor_controlvm_parameters_header {
u32 name_length;
uuid_le id;
u32 revision;
- u32 reserved; /* Natural alignment */
+ /* Natural alignment */
+ u32 reserved;
} __packed;
/* General Errors------------------------------------------------------[0-99] */
@@ -467,72 +604,57 @@ struct visor_controlvm_parameters_header {
#define CONTROLVM_RESP_KMALLOC_FAILED 3
#define CONTROLVM_RESP_ID_UNKNOWN 4
#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5
-
/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100
#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101
-
/* Maximum Limit----------------------------------------------------[200-299] */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+/* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201
+/* DEVICE_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202
/* Payload and Parameter Related------------------------------------[400-499] */
-#define CONTROLVM_RESP_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
- * DEVICE_CONFIGURE
- */
-#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
-#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
-/* Specified[Packet Structure] Value-------------------------------[500-599] */
-#define CONTROLVM_RESP_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
- * BUS_CONFIGURE,
- * DEVICE_CREATE,
- * DEVICE_CONFIG
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT*/
- /* DEVICE_CREATE,
- * DEVICE_CONFIGURE,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_CHANNEL_INVALID 502 /* DEVICE_CREATE,
- * DEVICE_CONFIGURE
- */
-/* Partition Driver Callback Interface----------------------[600-699] */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
-/* Unable to invoke VIRTPCI callback */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
-/* VIRTPCI Callback returned error */
+/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_PAYLOAD_INVALID 400
+/* Multiple */
+#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401
+/* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402
+/* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403
+/* Specified[Packet Structure] Value--------------------------------[500-599] */
+/* SWITCH_ATTACHINTPORT */
+/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */
+#define CONTROLVM_RESP_BUS_INVALID 500
+/* SWITCH_ATTACHINTPORT*/
+/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_DEVICE_INVALID 501
+/* DEVICE_CREATE, DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_CHANNEL_INVALID 502
+/* Partition Driver Callback Interface------------------------------[600-699] */
+/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604
+/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */
+/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605
+/* Generic device callback returned error. */
+/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */
#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606
- /* SWITCH_ATTACHEXTPORT,
- * SWITCH_DETACHEXTPORT
- * DEVICE_CONFIGURE
- */
-
-/* generic device callback returned error */
/* Bus Related------------------------------------------------------[700-799] */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+/* BUS_DESTROY */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700
/* Channel Related--------------------------------------------------[800-899] */
-#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+/* GET_CHANNELINFO, DEVICE_DESTROY */
+#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800
+/* DEVICE_CREATE */
+#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801
/* Chipset Shutdown Related---------------------------------------[1000-1099] */
#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000
#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
-
/* Chipset Stop Related-------------------------------------------[1100-1199] */
#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100
#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101
-
/* Device Related-------------------------------------------------[1400-1499] */
#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400
-#endif /* __CONTROLVMCHANNEL_H__ */
+/* __CONTROLVMCHANNEL_H__ */
+#endif
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 01d7d517dba7..2c820e21f1b7 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -15,13 +16,15 @@
#ifndef __VBUSCHANNEL_H__
#define __VBUSCHANNEL_H__
-/* The vbus channel is the channel area provided via the BUS_CREATE controlvm
- * message for each virtual bus. This channel area is provided to both server
- * and client ends of the bus. The channel header area is initialized by
- * the server, and the remaining information is filled in by the client.
- * We currently use this for the client to provide various information about
- * the client devices and client drivers for the server end to see.
+/*
+ * The vbus channel is the channel area provided via the BUS_CREATE controlvm
+ * message for each virtual bus. This channel area is provided to both server
+ * and client ends of the bus. The channel header area is initialized by
+ * the server, and the remaining information is filled in by the client.
+ * We currently use this for the client to provide various information about
+ * the client devices and client drivers for the server end to see.
*/
+
#include <linux/uuid.h>
#include <linux/ctype.h>
#include "channel.h"
@@ -30,11 +33,9 @@
#define VISOR_VBUS_CHANNEL_UUID \
UUID_LE(0x193b331b, 0xc58f, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID;
-#define VISOR_VBUS_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
-
-/* Must increment this whenever you insert or delete fields within this channel
+/*
+ * Must increment this whenever you insert or delete fields within this channel
* struct. Also increment whenever you change the meaning of fields within this
* channel struct so as to break pre-existing software. Note that you can
* usually add fields to the END of the channel struct withOUT needing to
@@ -42,43 +43,65 @@ static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID;
*/
#define VISOR_VBUS_CHANNEL_VERSIONID 1
-/*
+static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID;
+
+/* struct visor_vbus_deviceinfo
+ * @devtype: Short string identifying the device type.
+ * @drvname: Driver .sys file name.
+ * @infostrs: Kernel vversion.
+ * @reserved: Pad size to 256 bytes.
+ *
* An array of this struct is present in the channel area for each vbus.
- * (See vbuschannel.h.)
- * It is filled in by the client side to provide info about the device
- * and driver from the client's perspective.
+ * (See vbuschannel.h.). It is filled in by the client side to provide info
+ * about the device and driver from the client's perspective.
*/
struct visor_vbus_deviceinfo {
- u8 devtype[16]; /* short string identifying the device type */
- u8 drvname[16]; /* driver .sys file name */
- u8 infostrs[96]; /* kernel version */
- u8 reserved[128]; /* pad size to 256 bytes */
+ u8 devtype[16];
+ u8 drvname[16];
+ u8 infostrs[96];
+ u8 reserved[128];
} __packed;
+/*
+ * struct visor_vbus_headerinfo
+ * @struct_bytes: Size of this struct in bytes.
+ * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO.
+ * @dev_info_count: Num of items in DevInfo member. This is the
+ * allocated size.
+ * @chp_info_offset: Byte offset from beginning of this struct to the
+ * ChpInfo struct.
+ * @bus_info_offset: Byte offset from beginning of this struct to the
+ * BusInfo struct.
+ * @dev_info_offset: Byte offset from beginning of this struct to the
+ * DevInfo array.
+ * @reserved: Natural Alignment
+ */
struct visor_vbus_headerinfo {
- u32 struct_bytes; /* size of this struct in bytes */
- u32 device_info_struct_bytes; /* sizeof(VISOR_VBUS_DEVICEINFO) */
- u32 dev_info_count; /* num of items in DevInfo member */
- /* (this is the allocated size) */
- u32 chp_info_offset; /* byte offset from beginning of this struct */
- /* to the ChpInfo struct (below) */
- u32 bus_info_offset; /* byte offset from beginning of this struct */
- /* to the BusInfo struct (below) */
- u32 dev_info_offset; /* byte offset from beginning of this struct */
- /* to the DevInfo array (below) */
+ u32 struct_bytes;
+ u32 device_info_struct_bytes;
+ u32 dev_info_count;
+ u32 chp_info_offset;
+ u32 bus_info_offset;
+ u32 dev_info_offset;
u8 reserved[104];
} __packed;
+/*
+ * struct visor_vbus_channel
+ * @channel_header: Initialized by server.
+ * @hdr_info: Initialized by server.
+ * @chp_info: Describes client chipset device and driver.
+ * @bus_info: Describes client bus device and driver.
+ * @dev_info: Describes client device and driver for each device on the
+ * bus.
+ */
struct visor_vbus_channel {
- struct channel_header channel_header; /* initialized by server */
- struct visor_vbus_headerinfo hdr_info; /* initialized by server */
- /* the remainder of this channel is filled in by the client */
+ struct channel_header channel_header;
+ struct visor_vbus_headerinfo hdr_info;
+ /* The remainder of this channel is filled in by the client */
struct visor_vbus_deviceinfo chp_info;
- /* describes client chipset device and driver */
struct visor_vbus_deviceinfo bus_info;
- /* describes client bus device and driver */
struct visor_vbus_deviceinfo dev_info[0];
- /* describes client device and driver for each device on the bus */
} __packed;
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 1c785dd19ddd..9012cc1a8902 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -1,4 +1,5 @@
-/* visorbus_main.c
+/*
+ * visorbus_main.c
*
* Copyright � 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
@@ -20,15 +21,12 @@
#include "visorbus.h"
#include "visorbus_private.h"
-#define MYDRVNAME "visorbus"
-
-/* Display string that is guaranteed to be no longer the 99 characters*/
+/* Display string that is guaranteed to be no longer the 99 characters */
#define LINESIZE 99
-
-#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
#define POLLJIFFIES_NORMALCHANNEL 10
-static bool initialized; /* stores whether bus_registration was successful */
+/* stores whether bus_registration was successful */
+static bool initialized;
static struct dentry *visorbus_debugfs_dir;
/*
@@ -73,8 +71,62 @@ static LIST_HEAD(list_all_bus_instances);
/* list of visor_device structs, linked via .list_all */
static LIST_HEAD(list_all_device_instances);
-static int
-visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
+/*
+ * Generic function useful for validating any type of channel when it is
+ * received by the client that will be accessing the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+int visor_check_channel(struct channel_header *ch,
+ uuid_le expected_uuid,
+ char *chname,
+ u64 expected_min_bytes,
+ u32 expected_version,
+ u64 expected_signature)
+{
+ if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
+ /* caller wants us to verify type GUID */
+ if (uuid_le_cmp(ch->chtype, expected_uuid) != 0) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
+ chname, &expected_uuid,
+ &expected_uuid, &ch->chtype);
+ return 0;
+ }
+ }
+ /* verify channel size */
+ if (expected_min_bytes > 0) {
+ if (ch->size < expected_min_bytes) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+ chname, &expected_uuid,
+ (unsigned long long)expected_min_bytes,
+ ch->size);
+ return 0;
+ }
+ }
+ /* verify channel version */
+ if (expected_version > 0) {
+ if (ch->version_id != expected_version) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
+ chname, &expected_uuid,
+ (unsigned long)expected_version,
+ ch->version_id);
+ return 0;
+ }
+ }
+ /* verify channel signature */
+ if (expected_signature > 0) {
+ if (ch->signature != expected_signature) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+ chname, &expected_uuid,
+ expected_signature, ch->signature);
+ return 0;
+ }
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(visor_check_channel);
+
+static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
{
struct visor_device *dev;
uuid_le guid;
@@ -94,8 +146,7 @@ visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
*
* Return: 1 iff the provided driver can control the specified device
*/
-static int
-visorbus_match(struct device *xdev, struct device_driver *xdrv)
+static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
{
uuid_le channel_type;
int i;
@@ -103,9 +154,8 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
struct visor_driver *drv;
dev = to_visor_device(xdev);
- drv = to_visor_driver(xdrv);
channel_type = visorchannel_get_uuid(dev->visorchannel);
-
+ drv = to_visor_driver(xdrv);
if (!drv->channel_types)
return 0;
@@ -122,7 +172,7 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
/*
* This describes the TYPE of bus.
- * (Don't confuse this with an INSTANCE of the bus.)
+ * (Don't confuse this with an INSTANCE of the bus.)
*/
struct bus_type visorbus_type = {
.name = "visorbus",
@@ -137,8 +187,7 @@ struct bus_type visorbus_type = {
* involved with destroying the dev are complete
* @xdev: struct device for the bus being released
*/
-static void
-visorbus_release_busdevice(struct device *xdev)
+static void visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
@@ -152,15 +201,11 @@ visorbus_release_busdevice(struct device *xdev)
* each child device instance
* @xdev: struct device for the visor device being released
*/
-static void
-visorbus_release_device(struct device *xdev)
+static void visorbus_release_device(struct device *xdev)
{
struct visor_device *dev = to_visor_device(xdev);
- if (dev->visorchannel) {
- visorchannel_destroy(dev->visorchannel);
- dev->visorchannel = NULL;
- }
+ visorchannel_destroy(dev->visorchannel);
kfree(dev);
}
@@ -229,7 +274,7 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
struct device_driver *xdrv = dev->driver;
struct visor_driver *drv = NULL;
- if (!xbus || !xdrv)
+ if (!xdrv)
return 0;
i = xbus->match(dev, xdrv);
if (!i)
@@ -240,23 +285,23 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(typename);
static struct attribute *channel_attrs[] = {
- &dev_attr_physaddr.attr,
- &dev_attr_nbytes.attr,
- &dev_attr_clientpartition.attr,
- &dev_attr_typeguid.attr,
- &dev_attr_zoneguid.attr,
- &dev_attr_typename.attr,
- NULL
+ &dev_attr_physaddr.attr,
+ &dev_attr_nbytes.attr,
+ &dev_attr_clientpartition.attr,
+ &dev_attr_typeguid.attr,
+ &dev_attr_zoneguid.attr,
+ &dev_attr_typename.attr,
+ NULL
};
static struct attribute_group channel_attr_grp = {
- .name = "channel",
- .attrs = channel_attrs,
+ .name = "channel",
+ .attrs = channel_attrs,
};
static const struct attribute_group *visorbus_channel_groups[] = {
- &channel_attr_grp,
- NULL
+ &channel_attr_grp,
+ NULL
};
/* end implementation of specific channel attributes */
@@ -270,7 +315,8 @@ static const struct attribute_group *visorbus_channel_groups[] = {
static ssize_t partition_handle_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
@@ -280,7 +326,8 @@ static DEVICE_ATTR_RO(partition_handle);
static ssize_t partition_guid_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
return sprintf(buf, "{%pUb}\n", &vdev->partition_uuid);
@@ -289,7 +336,8 @@ static DEVICE_ATTR_RO(partition_guid);
static ssize_t partition_name_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
return sprintf(buf, "%s\n", vdev->name);
@@ -298,7 +346,8 @@ static DEVICE_ATTR_RO(partition_name);
static ssize_t channel_addr_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
@@ -308,7 +357,8 @@ static DEVICE_ATTR_RO(channel_addr);
static ssize_t channel_bytes_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
@@ -318,7 +368,8 @@ static DEVICE_ATTR_RO(channel_bytes);
static ssize_t channel_id_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct visor_device *vdev = to_visor_device(dev);
int len = 0;
@@ -331,22 +382,22 @@ static ssize_t channel_id_show(struct device *dev,
static DEVICE_ATTR_RO(channel_id);
static struct attribute *dev_attrs[] = {
- &dev_attr_partition_handle.attr,
- &dev_attr_partition_guid.attr,
- &dev_attr_partition_name.attr,
- &dev_attr_channel_addr.attr,
- &dev_attr_channel_bytes.attr,
- &dev_attr_channel_id.attr,
- NULL
+ &dev_attr_partition_handle.attr,
+ &dev_attr_partition_guid.attr,
+ &dev_attr_partition_name.attr,
+ &dev_attr_channel_addr.attr,
+ &dev_attr_channel_bytes.attr,
+ &dev_attr_channel_id.attr,
+ NULL
};
static struct attribute_group dev_attr_grp = {
- .attrs = dev_attrs,
+ .attrs = dev_attrs,
};
static const struct attribute_group *visorbus_groups[] = {
- &dev_attr_grp,
- NULL
+ &dev_attr_grp,
+ NULL
};
/*
@@ -355,6 +406,7 @@ static const struct attribute_group *visorbus_groups[] = {
* define & implement display of debugfs attributes under
* /sys/kernel/debug/visorbus/visorbus<n>.
*/
+
/*
* vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
* and write it to a seq_file
@@ -365,12 +417,12 @@ static const struct attribute_group *visorbus_groups[] = {
*
* Reads @devInfo, and writes it in human-readable notation to @seq.
*/
-static void
-vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
- struct seq_file *seq, int devix)
+static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
+ struct seq_file *seq, int devix)
{
+ /* uninitialized vbus device entry */
if (!isprint(devinfo->devtype[0]))
- return; /* uninitialized vbus device entry */
+ return;
if (devix >= 0)
seq_printf(seq, "[%d]", devix);
@@ -392,12 +444,11 @@ vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
{
- struct visor_device *vdev = seq->private;
- struct visorchannel *channel = vdev->visorchannel;
-
- int i;
+ int i = 0;
unsigned long off;
struct visor_vbus_deviceinfo dev_info;
+ struct visor_device *vdev = seq->private;
+ struct visorchannel *channel = vdev->visorchannel;
if (!channel)
return 0;
@@ -406,6 +457,7 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
"Client device / client driver info for %s partition (vbus #%u):\n",
((vdev->name) ? (char *)(vdev->name) : ""),
vdev->chipset_bus_no);
+
if (visorchannel_read(channel,
offsetof(struct visor_vbus_channel, chp_info),
&dev_info, sizeof(dev_info)) >= 0)
@@ -414,8 +466,8 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
offsetof(struct visor_vbus_channel, bus_info),
&dev_info, sizeof(dev_info)) >= 0)
vbuschannel_print_devinfo(&dev_info, seq, -1);
+
off = offsetof(struct visor_vbus_channel, dev_info);
- i = 0;
while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
if (visorchannel_read(channel, off, &dev_info,
sizeof(dev_info)) >= 0)
@@ -441,8 +493,7 @@ static const struct file_operations client_bus_info_debugfs_fops = {
.release = single_release,
};
-static void
-dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(unsigned long __opaque)
{
struct visor_device *dev = (struct visor_device *)__opaque;
struct visor_driver *drv = to_visor_driver(dev->device.driver);
@@ -451,8 +502,7 @@ dev_periodic_work(unsigned long __opaque)
mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
}
-static int
-dev_start_periodic_work(struct visor_device *dev)
+static int dev_start_periodic_work(struct visor_device *dev)
{
if (dev->being_removed || dev->timer_active)
return -EINVAL;
@@ -464,8 +514,7 @@ dev_start_periodic_work(struct visor_device *dev)
return 0;
}
-static void
-dev_stop_periodic_work(struct visor_device *dev)
+static void dev_stop_periodic_work(struct visor_device *dev)
{
if (!dev->timer_active)
return;
@@ -484,40 +533,39 @@ dev_stop_periodic_work(struct visor_device *dev)
*
* Return: 0 iff successful
*/
-static int
-visordriver_remove_device(struct device *xdev)
+static int visordriver_remove_device(struct device *xdev)
{
struct visor_device *dev;
struct visor_driver *drv;
dev = to_visor_device(xdev);
drv = to_visor_driver(xdev->driver);
+
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = true;
- if (drv->remove)
- drv->remove(dev);
+ drv->remove(dev);
mutex_unlock(&dev->visordriver_callback_lock);
- dev_stop_periodic_work(dev);
+ dev_stop_periodic_work(dev);
put_device(&dev->device);
+
return 0;
}
-/**
+/*
* visorbus_unregister_visor_driver() - unregisters the provided driver
* @drv: the driver to unregister
*
* A visor function driver calls this function to unregister the driver,
* i.e., within its module_exit function.
*/
-void
-visorbus_unregister_visor_driver(struct visor_driver *drv)
+void visorbus_unregister_visor_driver(struct visor_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
-/**
+/*
* visorbus_read_channel() - reads from the designated channel into
* the provided buffer
* @dev: the device whose channel is read from
@@ -530,15 +578,14 @@ EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
-int
-visorbus_read_channel(struct visor_device *dev, unsigned long offset,
- void *dest, unsigned long nbytes)
+int visorbus_read_channel(struct visor_device *dev, unsigned long offset,
+ void *dest, unsigned long nbytes)
{
return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
}
EXPORT_SYMBOL_GPL(visorbus_read_channel);
-/**
+/*
* visorbus_write_channel() - writes the provided buffer into the designated
* channel
* @dev: the device whose channel is written to
@@ -551,15 +598,14 @@ EXPORT_SYMBOL_GPL(visorbus_read_channel);
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
-int
-visorbus_write_channel(struct visor_device *dev, unsigned long offset,
- void *src, unsigned long nbytes)
+int visorbus_write_channel(struct visor_device *dev, unsigned long offset,
+ void *src, unsigned long nbytes)
{
return visorchannel_write(dev->visorchannel, offset, src, nbytes);
}
EXPORT_SYMBOL_GPL(visorbus_write_channel);
-/**
+/*
* visorbus_enable_channel_interrupts() - enables interrupts on the
* designated device
* @dev: the device on which to enable interrupts
@@ -567,8 +613,7 @@ EXPORT_SYMBOL_GPL(visorbus_write_channel);
* Currently we don't yet have a real interrupt, so for now we just call the
* interrupt function periodically via a timer.
*/
-int
-visorbus_enable_channel_interrupts(struct visor_device *dev)
+int visorbus_enable_channel_interrupts(struct visor_device *dev)
{
struct visor_driver *drv = to_visor_driver(dev->device.driver);
@@ -581,13 +626,12 @@ visorbus_enable_channel_interrupts(struct visor_device *dev)
}
EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
-/**
+/*
* visorbus_disable_channel_interrupts() - disables interrupts on the
* designated device
* @dev: the device on which to disable interrupts
*/
-void
-visorbus_disable_channel_interrupts(struct visor_device *dev)
+void visorbus_disable_channel_interrupts(struct visor_device *dev)
{
dev_stop_periodic_work(dev);
}
@@ -616,8 +660,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
* Return: 0 if successful, otherwise the negative value returned by
* device_add() indicating the reason for failure
*/
-static int
-create_visor_device(struct visor_device *dev)
+static int create_visor_device(struct visor_device *dev)
{
int err;
u32 chipset_bus_no = dev->chipset_bus_no;
@@ -664,7 +707,8 @@ create_visor_device(struct visor_device *dev)
goto err_put;
list_add_tail(&dev->list_all, &list_all_device_instances);
- return 0; /* success: reference kept via unmatched get_device() */
+ /* success: reference kept via unmatched get_device() */
+ return 0;
err_put:
put_device(&dev->device);
@@ -672,17 +716,15 @@ err_put:
return err;
}
-static void
-remove_visor_device(struct visor_device *dev)
+static void remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
put_device(&dev->device);
device_unregister(&dev->device);
}
-static int
-get_vbus_header_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info)
+static int get_vbus_header_info(struct visorchannel *chan,
+ struct visor_vbus_headerinfo *hdr_info)
{
int err;
@@ -691,7 +733,7 @@ get_vbus_header_info(struct visorchannel *chan,
"vbus",
sizeof(struct visor_vbus_channel),
VISOR_VBUS_CHANNEL_VERSIONID,
- VISOR_VBUS_CHANNEL_SIGNATURE))
+ VISOR_CHANNEL_SIGNATURE))
return -EINVAL;
err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
@@ -722,10 +764,9 @@ get_vbus_header_info(struct visorchannel *chan,
* Returns no value since this is debug information and not needed for
* device functionality.
*/
-static void
-write_vbus_chp_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
+static void write_vbus_chp_info(struct visorchannel *chan,
+ struct visor_vbus_headerinfo *hdr_info,
+ struct visor_vbus_deviceinfo *info)
{
int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
@@ -748,10 +789,9 @@ write_vbus_chp_info(struct visorchannel *chan,
* Returns no value since this is debug information and not needed for
* device functionality.
*/
-static void
-write_vbus_bus_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info)
+static void write_vbus_bus_info(struct visorchannel *chan,
+ struct visor_vbus_headerinfo *hdr_info,
+ struct visor_vbus_deviceinfo *info)
{
int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
@@ -775,10 +815,10 @@ write_vbus_bus_info(struct visorchannel *chan,
* Returns no value since this is debug information and not needed for
* device functionality.
*/
-static void
-write_vbus_dev_info(struct visorchannel *chan,
- struct visor_vbus_headerinfo *hdr_info,
- struct visor_vbus_deviceinfo *info, unsigned int devix)
+static void write_vbus_dev_info(struct visorchannel *chan,
+ struct visor_vbus_headerinfo *hdr_info,
+ struct visor_vbus_deviceinfo *info,
+ unsigned int devix)
{
int off =
(sizeof(struct channel_header) + hdr_info->dev_info_offset) +
@@ -807,14 +847,13 @@ static void bus_device_info_init(
}
/*
- * fix_vbus_dev_info() - for a child device just created on a client bus, fill
- * in information about the driver that is controlling
- * this device into the appropriate slot within the
- * vbus channel of the bus instance
+ * publish_vbus_dev_info() - for a child device just created on a client bus,
+ * fill in information about the driver that is
+ * controlling this device into the appropriate slot
+ * within the vbus channel of the bus instance
* @visordev: struct visor_device for the desired device
*/
-static void
-fix_vbus_dev_info(struct visor_device *visordev)
+static void publish_vbus_dev_info(struct visor_device *visordev)
{
int i;
struct visor_device *bdev;
@@ -853,7 +892,6 @@ fix_vbus_dev_info(struct visor_device *visordev)
bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
-
write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
write_vbus_bus_info(bdev->visorchannel, hdr_info,
&clientbus_driverinfo);
@@ -874,18 +912,14 @@ fix_vbus_dev_info(struct visor_device *visordev)
* was successful with this device, otherwise a negative errno
* value indicating failure reason
*/
-static int
-visordriver_probe_device(struct device *xdev)
+static int visordriver_probe_device(struct device *xdev)
{
int res;
struct visor_driver *drv;
struct visor_device *dev;
- drv = to_visor_driver(xdev->driver);
dev = to_visor_device(xdev);
-
- if (!drv->probe)
- return -ENODEV;
+ drv = to_visor_driver(xdev->driver);
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = false;
@@ -894,14 +928,14 @@ visordriver_probe_device(struct device *xdev)
if (res >= 0) {
/* success: reference kept via unmatched get_device() */
get_device(&dev->device);
- fix_vbus_dev_info(dev);
+ publish_vbus_dev_info(dev);
}
mutex_unlock(&dev->visordriver_callback_lock);
return res;
}
-/**
+/*
* visorbus_register_visor_driver() - registers the provided visor driver
* for handling one or more visor device
* types (channel_types)
@@ -952,8 +986,21 @@ visordriver_probe_device(struct device *xdev)
*/
int visorbus_register_visor_driver(struct visor_driver *drv)
{
+ /* can't register on a nonexistent bus */
if (!initialized)
- return -ENODEV; /* can't register on a nonexistent bus */
+ return -ENODEV;
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ if (!drv->remove)
+ return -ENODEV;
+
+ if (!drv->pause)
+ return -ENODEV;
+
+ if (!drv->resume)
+ return -ENODEV;
drv->driver.name = drv->name;
drv->driver.bus = &visorbus_type;
@@ -985,8 +1032,7 @@ EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
* Return: 0 for success, otherwise negative errno value indicating reason for
* failure
*/
-static int
-visorbus_create_instance(struct visor_device *dev)
+static int visorbus_create_instance(struct visor_device *dev)
{
int id = dev->chipset_bus_no;
int err;
@@ -1030,7 +1076,7 @@ visorbus_create_instance(struct visor_device *dev)
err_debugfs_dir:
debugfs_remove_recursive(dev->debugfs_dir);
kfree(hdr_info);
- dev_err(&dev->device, "visorbus_create_instance failed: %d\n", err);
+ dev_err(&dev->device, "%s failed: %d\n", __func__, err);
return err;
}
@@ -1038,8 +1084,7 @@ err_debugfs_dir:
* visorbus_remove_instance() - remove a device instance for the visorbus itself
* @dev: struct visor_device indentifying the bus to remove
*/
-static void
-visorbus_remove_instance(struct visor_device *dev)
+static void visorbus_remove_instance(struct visor_device *dev)
{
/*
* Note that this will result in the release method for
@@ -1049,10 +1094,7 @@ visorbus_remove_instance(struct visor_device *dev)
* successfully been able to trace thru the code to see where/how
* release() gets called. But I know it does.
*/
- if (dev->visorchannel) {
- visorchannel_destroy(dev->visorchannel);
- dev->visorchannel = NULL;
- }
+ visorchannel_destroy(dev->visorchannel);
kfree(dev->vbus_hdr_info);
list_del(&dev->list_all);
device_unregister(&dev->device);
@@ -1061,8 +1103,7 @@ visorbus_remove_instance(struct visor_device *dev)
/*
* remove_all_visor_devices() - remove all child visorbus device instances
*/
-static void
-remove_all_visor_devices(void)
+static void remove_all_visor_devices(void)
{
struct list_head *listentry, *listtmp;
@@ -1074,13 +1115,11 @@ remove_all_visor_devices(void)
}
}
-int
-visorchipset_bus_create(struct visor_device *dev)
+int visorchipset_bus_create(struct visor_device *dev)
{
int err;
err = visorbus_create_instance(dev);
-
if (err < 0)
return err;
@@ -1089,15 +1128,13 @@ visorchipset_bus_create(struct visor_device *dev)
return 0;
}
-void
-visorchipset_bus_destroy(struct visor_device *dev)
+void visorchipset_bus_destroy(struct visor_device *dev)
{
visorbus_remove_instance(dev);
visorbus_destroy_response(dev, 0);
}
-int
-visorchipset_device_create(struct visor_device *dev_info)
+int visorchipset_device_create(struct visor_device *dev_info)
{
int err;
@@ -1110,11 +1147,9 @@ visorchipset_device_create(struct visor_device *dev_info)
return 0;
}
-void
-visorchipset_device_destroy(struct visor_device *dev_info)
+void visorchipset_device_destroy(struct visor_device *dev_info)
{
remove_visor_device(dev_info);
-
visorbus_device_destroy_response(dev_info, 0);
}
@@ -1127,14 +1162,12 @@ visorchipset_device_destroy(struct visor_device *dev_info)
* @status: 0 iff the pause state change completed successfully, otherwise
* a negative errno value indicating the reason for failure
*/
-static void
-pause_state_change_complete(struct visor_device *dev, int status)
+static void pause_state_change_complete(struct visor_device *dev, int status)
{
if (!dev->pausing)
return;
dev->pausing = false;
-
visorbus_device_pause_response(dev, status);
}
@@ -1147,8 +1180,7 @@ pause_state_change_complete(struct visor_device *dev, int status)
* @status: 0 iff the resume state change completed successfully, otherwise
* a negative errno value indicating the reason for failure
*/
-static void
-resume_state_change_complete(struct visor_device *dev, int status)
+static void resume_state_change_complete(struct visor_device *dev, int status)
{
if (!dev->resuming)
return;
@@ -1174,9 +1206,8 @@ resume_state_change_complete(struct visor_device *dev, int status)
* via a callback function; see pause_state_change_complete() and
* resume_state_change_complete().
*/
-static int
-visorchipset_initiate_device_pause_resume(struct visor_device *dev,
- bool is_pause)
+static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
+ bool is_pause)
{
int err;
struct visor_driver *drv = NULL;
@@ -1189,19 +1220,14 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev,
return -EBUSY;
if (is_pause) {
- if (!drv->pause)
- return -EINVAL;
-
dev->pausing = true;
err = drv->pause(dev, pause_state_change_complete);
} else {
- /* The vbus_dev_info structure in the channel was been
- * cleared, make sure it is valid.
+ /*
+ * The vbus_dev_info structure in the channel was been cleared,
+ * make sure it is valid.
*/
- fix_vbus_dev_info(dev);
- if (!drv->resume)
- return -EINVAL;
-
+ publish_vbus_dev_info(dev);
dev->resuming = true;
err = drv->resume(dev, resume_state_change_complete);
}
@@ -1209,7 +1235,7 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev,
return err;
}
-/**
+/*
* visorchipset_device_pause() - start a pause operation for a visor device
* @dev_info: struct visor_device identifying the device being paused
*
@@ -1217,13 +1243,11 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev,
* that device. Success/failure result is returned asynchronously
* via a callback function; see pause_state_change_complete().
*/
-int
-visorchipset_device_pause(struct visor_device *dev_info)
+int visorchipset_device_pause(struct visor_device *dev_info)
{
int err;
err = visorchipset_initiate_device_pause_resume(dev_info, true);
-
if (err < 0) {
dev_info->pausing = false;
return err;
@@ -1232,7 +1256,7 @@ visorchipset_device_pause(struct visor_device *dev_info)
return 0;
}
-/**
+/*
* visorchipset_device_resume() - start a resume operation for a visor device
* @dev_info: struct visor_device identifying the device being resumed
*
@@ -1240,13 +1264,11 @@ visorchipset_device_pause(struct visor_device *dev_info)
* that device. Success/failure result is returned asynchronously
* via a callback function; see resume_state_change_complete().
*/
-int
-visorchipset_device_resume(struct visor_device *dev_info)
+int visorchipset_device_resume(struct visor_device *dev_info)
{
int err;
err = visorchipset_initiate_device_pause_resume(dev_info, false);
-
if (err < 0) {
dev_info->resuming = false;
return err;
@@ -1255,8 +1277,7 @@ visorchipset_device_resume(struct visor_device *dev_info)
return 0;
}
-int
-visorbus_init(void)
+int visorbus_init(void)
{
int err;
@@ -1271,14 +1292,12 @@ visorbus_init(void)
return err;
initialized = true;
-
bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
return 0;
}
-void
-visorbus_exit(void)
+void visorbus_exit(void)
{
struct list_head *listentry, *listtmp;
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 98a5af19189d..7ccf7565eb2c 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -23,10 +23,6 @@
#include "controlvmchannel.h"
#include "vbuschannel.h"
-/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the
- * command line
- */
-
int visorchipset_bus_create(struct visor_device *bus_info);
void visorchipset_bus_destroy(struct visor_device *bus_info);
int visorchipset_device_create(struct visor_device *dev_info);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 6885c2cb7135..c7eea655a86e 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -26,7 +26,7 @@
#include "visorbus_private.h"
#include "controlvmchannel.h"
-#define MYDRVNAME "visorchannel"
+#define VISOR_DRV_NAME "visorchannel"
#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \
@@ -41,17 +41,20 @@ struct visorchannel {
bool requested;
struct channel_header chan_hdr;
uuid_le guid;
- bool needs_lock; /* channel creator knows if more than one */
- /* thread will be inserting or removing */
- spinlock_t insert_lock; /* protect head writes in chan_hdr */
- spinlock_t remove_lock; /* protect tail writes in chan_hdr */
-
+ /*
+ * channel creator knows if more than one
+ * thread will be inserting or removing
+ */
+ bool needs_lock;
+ /* protect head writes in chan_hdr */
+ spinlock_t insert_lock;
+ /* protect tail writes in chan_hdr */
+ spinlock_t remove_lock;
uuid_le type;
uuid_le inst;
};
-void
-visorchannel_destroy(struct visorchannel *channel)
+void visorchannel_destroy(struct visorchannel *channel)
{
if (!channel)
return;
@@ -63,46 +66,39 @@ visorchannel_destroy(struct visorchannel *channel)
kfree(channel);
}
-u64
-visorchannel_get_physaddr(struct visorchannel *channel)
+u64 visorchannel_get_physaddr(struct visorchannel *channel)
{
return channel->physaddr;
}
-ulong
-visorchannel_get_nbytes(struct visorchannel *channel)
+ulong visorchannel_get_nbytes(struct visorchannel *channel)
{
return channel->nbytes;
}
-char *
-visorchannel_uuid_id(uuid_le *guid, char *s)
+char *visorchannel_uuid_id(uuid_le *guid, char *s)
{
sprintf(s, "%pUL", guid);
return s;
}
-char *
-visorchannel_id(struct visorchannel *channel, char *s)
+char *visorchannel_id(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->guid, s);
}
-char *
-visorchannel_zoneid(struct visorchannel *channel, char *s)
+char *visorchannel_zoneid(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
}
-u64
-visorchannel_get_clientpartition(struct visorchannel *channel)
+u64 visorchannel_get_clientpartition(struct visorchannel *channel)
{
return channel->chan_hdr.partition_handle;
}
-int
-visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle)
+int visorchannel_set_clientpartition(struct visorchannel *channel,
+ u64 partition_handle)
{
channel->chan_hdr.partition_handle = partition_handle;
return 0;
@@ -114,16 +110,14 @@ visorchannel_set_clientpartition(struct visorchannel *channel,
*
* Return: the UUID of the provided channel
*/
-uuid_le
-visorchannel_get_uuid(struct visorchannel *channel)
+uuid_le visorchannel_get_uuid(struct visorchannel *channel)
{
return channel->guid;
}
EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
-int
-visorchannel_read(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes)
+int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
+ ulong nbytes)
{
if (offset + nbytes > channel->nbytes)
return -EIO;
@@ -133,9 +127,8 @@ visorchannel_read(struct visorchannel *channel, ulong offset,
return 0;
}
-int
-visorchannel_write(struct visorchannel *channel, ulong offset,
- void *dest, ulong nbytes)
+int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
+ ulong nbytes)
{
size_t chdr_size = sizeof(struct channel_header);
size_t copy_size;
@@ -154,8 +147,7 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
return 0;
}
-void *
-visorchannel_get_header(struct visorchannel *channel)
+void *visorchannel_get_header(struct visorchannel *channel)
{
return &channel->chan_hdr;
}
@@ -187,9 +179,8 @@ visorchannel_get_header(struct visorchannel *channel)
&((sig_hdr)->FIELD), \
sizeof((sig_hdr)->FIELD))
-static int
-sig_read_header(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr)
+static int sig_read_header(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr)
{
if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
return -EINVAL;
@@ -200,9 +191,9 @@ sig_read_header(struct visorchannel *channel, u32 queue,
sig_hdr, sizeof(struct signal_queue_header));
}
-static int
-sig_read_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot, void *data)
+static int sig_read_data(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr, u32 slot,
+ void *data)
{
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
sig_hdr, slot);
@@ -211,9 +202,9 @@ sig_read_data(struct visorchannel *channel, u32 queue,
data, sig_hdr->signal_size);
}
-static int
-sig_write_data(struct visorchannel *channel, u32 queue,
- struct signal_queue_header *sig_hdr, u32 slot, void *data)
+static int sig_write_data(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr, u32 slot,
+ void *data)
{
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
sig_hdr, slot);
@@ -222,8 +213,8 @@ sig_write_data(struct visorchannel *channel, u32 queue,
data, sig_hdr->signal_size);
}
-static int
-signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
+static int signalremove_inner(struct visorchannel *channel, u32 queue,
+ void *msg)
{
struct signal_queue_header sig_hdr;
int error;
@@ -246,9 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
/*
* For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory.
+ * update host memory. Required for channel sync.
*/
- mb(); /* required for channel synch */
+ mb();
error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
if (error)
@@ -269,8 +260,8 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
*
* Return: integer error code indicating the status of the removal
*/
-int
-visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
+int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
+ void *msg)
{
int rc;
unsigned long flags;
@@ -287,8 +278,7 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
}
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-static bool
-queue_empty(struct visorchannel *channel, u32 queue)
+static bool queue_empty(struct visorchannel *channel, u32 queue)
{
struct signal_queue_header sig_hdr;
@@ -307,8 +297,7 @@ queue_empty(struct visorchannel *channel, u32 queue)
* Return: boolean indicating whether any messages in the designated
* channel/queue are present
*/
-bool
-visorchannel_signalempty(struct visorchannel *channel, u32 queue)
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
{
bool rc;
unsigned long flags;
@@ -324,8 +313,8 @@ visorchannel_signalempty(struct visorchannel *channel, u32 queue)
}
EXPORT_SYMBOL_GPL(visorchannel_signalempty);
-static int
-signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
+static int signalinsert_inner(struct visorchannel *channel, u32 queue,
+ void *msg)
{
struct signal_queue_header sig_hdr;
int err;
@@ -351,9 +340,9 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
/*
* For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory.
+ * update host memory. Required for channel sync.
*/
- mb(); /* required for channel synch */
+ mb();
err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
if (err)
@@ -388,9 +377,11 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
* Return: pointer to visorchannel that was created if successful,
* otherwise NULL
*/
-static struct visorchannel *
-visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid, bool needs_lock)
+static struct visorchannel *visorchannel_create_guts(
+ u64 physaddr,
+ unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid,
+ bool needs_lock)
{
struct visorchannel *channel;
int err;
@@ -414,7 +405,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
* this. Remember that we haven't requested it so we don't try to
* release later on.
*/
- channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
+ channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
if (!channel->requested && uuid_le_cmp(guid, visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
@@ -444,7 +435,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
release_mem_region(channel->physaddr, channel->nbytes);
channel->mapped = NULL;
channel->requested = request_mem_region(channel->physaddr,
- channel_bytes, MYDRVNAME);
+ channel_bytes, VISOR_DRV_NAME);
if (!channel->requested && uuid_le_cmp(guid, visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
@@ -465,17 +456,17 @@ err_destroy_channel:
return NULL;
}
-struct visorchannel *
-visorchannel_create(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid)
+struct visorchannel *visorchannel_create(u64 physaddr,
+ unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
{
return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid,
false);
}
-struct visorchannel *
-visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid)
+struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
+ unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
{
return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid,
true);
@@ -490,8 +481,8 @@ visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes,
*
* Return: integer error code indicating the status of the insertion
*/
-int
-visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
+int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
+ void *msg)
{
int rc;
unsigned long flags;
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 22150564b4fb..87ea852bab01 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -15,11 +15,7 @@
*/
#include <linux/acpi.h>
-#include <linux/ctype.h>
-#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/nls.h>
-#include <linux/netdevice.h>
#include <linux/uuid.h>
#include <linux/crash_dump.h>
@@ -27,8 +23,6 @@
#include "visorbus_private.h"
#include "vmcallinterface.h"
-#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
-
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -124,7 +118,6 @@ static ssize_t toolaction_store(struct device *dev,
offsetof(struct visor_controlvm_channel,
tool_action),
&tool_action, sizeof(u8));
-
if (err)
return err;
return count;
@@ -143,7 +136,6 @@ static ssize_t boottotool_show(struct device *dev,
efi_visor_ind),
&efi_visor_indication,
sizeof(struct efi_visor_indication));
-
if (err)
return err;
return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
@@ -165,7 +157,6 @@ static ssize_t boottotool_store(struct device *dev,
efi_visor_ind),
&(efi_visor_indication),
sizeof(struct efi_visor_indication));
-
if (err)
return err;
return count;
@@ -302,18 +293,21 @@ parser_string_get(struct parser_context *ctx)
int i;
pscan = ctx->curr;
+ if (!pscan)
+ return NULL;
nscan = ctx->bytes_remaining;
if (nscan == 0)
return NULL;
- if (!pscan)
- return NULL;
+
for (i = 0, value_length = -1; i < nscan; i++)
if (pscan[i] == '\0') {
value_length = i;
break;
}
- if (value_length < 0) /* '\0' was not included in the length */
+ /* '\0' was not included in the length */
+ if (value_length < 0)
value_length = nscan;
+
value = kmalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
@@ -364,9 +358,9 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
struct device *dev_start = NULL;
struct visor_device *vdev = NULL;
struct visor_busdev id = {
- .bus_no = bus_no,
- .dev_no = dev_no
- };
+ .bus_no = bus_no,
+ .dev_no = dev_no
+ };
if (from)
dev_start = &from->device;
@@ -580,7 +574,7 @@ visorbus_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
dev_err(&chipset_dev->acpi_device->dev,
- "failed visorbus_create: already exists\n");
+ "failed %s: already exists\n", __func__);
err = -EEXIST;
goto err_respond;
}
@@ -618,11 +612,11 @@ visorbus_create(struct controlvm_message *inmsg)
cmd->create_bus.channel_bytes,
GFP_KERNEL,
cmd->create_bus.bus_data_type_uuid);
-
if (!visorchannel) {
err = -ENOMEM;
goto err_free_pending_msg;
}
+
bus_info->visorchannel = visorchannel;
/* Response will be handled by visorchipset_bus_create */
@@ -757,7 +751,6 @@ visorbus_device_create(struct controlvm_message *inmsg)
err = -ENODEV;
goto err_respond;
}
-
if (bus_info->state.created == 0) {
dev_err(&chipset_dev->acpi_device->dev,
"bus not created, id: %d\n", bus_no);
@@ -791,7 +784,6 @@ visorbus_device_create(struct controlvm_message *inmsg)
cmd->create_device.channel_bytes,
GFP_KERNEL,
cmd->create_device.data_type_uuid);
-
if (!visorchannel) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to create visorchannel: %d/%d\n",
@@ -918,7 +910,6 @@ visorbus_device_destroy(struct controlvm_message *inmsg)
err = -EINVAL;
goto err_respond;
}
-
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
err = -EIO;
@@ -936,6 +927,7 @@ visorbus_device_destroy(struct controlvm_message *inmsg)
dev_info->pending_msg_hdr = pmsg_hdr;
}
+ kfree(dev_info->name);
visorchipset_device_destroy(dev_info);
return 0;
@@ -954,8 +946,7 @@ err_respond:
* disable the specified device. The udev script then writes to
* /sys/devices/platform/visorchipset/parahotplug, which causes the
* parahotplug store functions to get called, at which point the
- * appropriate CONTROLVM message is retrieved from the list and responded
- * to.
+ * appropriate CONTROLVM message is retrieved from the list and responded to.
*/
#define PARAHOTPLUG_TIMEOUT_MS 2000
@@ -1023,7 +1014,8 @@ parahotplug_request_destroy(struct parahotplug_request *req)
}
static LIST_HEAD(parahotplug_request_list);
-static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
+/* lock for above */
+static DEFINE_SPINLOCK(parahotplug_request_list_lock);
/*
* parahotplug_request_complete() - mark request as complete
@@ -1201,7 +1193,6 @@ parahotplug_process_message(struct controlvm_message *inmsg)
int err;
req = parahotplug_request_create(inmsg);
-
if (!req)
return -ENOMEM;
@@ -1295,10 +1286,9 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
static int
chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
- int res;
-
- res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+ int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
KOBJ_OFFLINE);
+
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
@@ -1321,13 +1311,12 @@ static int unisys_vmcall(unsigned long tuple, unsigned long param)
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
-
if (result)
goto error;
return 0;
-
-error: /* Need to convert from VMCALL error codes to Linux */
+/* Need to convert from VMCALL error codes to Linux */
+error:
switch (result) {
case VMCALL_RESULT_INVALID_PARAM:
return -EINVAL;
@@ -1513,10 +1502,11 @@ visorbus_device_resume_response(struct visor_device *dev_info, int response)
}
static struct parser_context *
-parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
+parser_init_byte_stream(u64 addr, u32 bytes, bool *retry)
{
int allocbytes = sizeof(struct parser_context) + bytes;
struct parser_context *ctx;
+ void *mapping;
*retry = false;
@@ -1541,22 +1531,11 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
ctx->curr = NULL;
ctx->bytes_remaining = 0;
ctx->byte_stream = false;
- if (local) {
- void *p;
-
- if (addr > virt_to_phys(high_memory - 1))
- goto err_finish_ctx;
- p = __va((unsigned long)(addr));
- memcpy(ctx->data, p, bytes);
- } else {
- void *mapping = memremap(addr, bytes, MEMREMAP_WB);
-
- if (!mapping)
- goto err_finish_ctx;
- memcpy(ctx->data, mapping, bytes);
- memunmap(mapping);
- }
-
+ mapping = memremap(addr, bytes, MEMREMAP_WB);
+ if (!mapping)
+ goto err_finish_ctx;
+ memcpy(ctx->data, mapping, bytes);
+ memunmap(mapping);
ctx->byte_stream = true;
chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
@@ -1587,12 +1566,10 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
u64 parm_addr;
u32 parm_bytes;
struct parser_context *parser_ctx = NULL;
- bool local_addr;
struct controlvm_message ackmsg;
int err = 0;
/* create parsing context if necessary */
- local_addr = (inmsg.hdr.flags.test_message == 1);
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
@@ -1605,21 +1582,16 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
bool retry = false;
parser_ctx =
- parser_init_byte_stream(parm_addr, parm_bytes,
- local_addr, &retry);
+ parser_init_byte_stream(parm_addr, parm_bytes, &retry);
if (!parser_ctx && retry)
return -EAGAIN;
}
+ controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_ACK, &ackmsg);
+ if (err)
+ return err;
- if (!local_addr) {
- controlvm_init_response(&ackmsg, &inmsg.hdr,
- CONTROLVM_RESP_SUCCESS);
- err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_ACK,
- &ackmsg);
- if (err)
- return err;
- }
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
err = chipset_init(&inmsg);
@@ -1692,9 +1664,7 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
static int
read_controlvm_event(struct controlvm_message *msg)
{
- int err;
-
- err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+ int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_EVENT, msg);
if (err)
return err;
@@ -1829,12 +1799,10 @@ visorchipset_init(struct acpi_device *acpi_device)
goto error;
acpi_device->driver_data = chipset_dev;
-
chipset_dev->acpi_device = acpi_device;
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
controlvm_channel = visorchannel_create_with_lock(addr,
0, GFP_KERNEL, uuid);
-
if (!controlvm_channel)
goto error_free_chipset_dev;
@@ -1845,8 +1813,12 @@ visorchipset_init(struct acpi_device *acpi_device)
if (err < 0)
goto error_destroy_channel;
- if (!VISOR_CONTROLVM_CHANNEL_OK_CLIENT(
- visorchannel_get_header(controlvm_channel)))
+ if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
+ VISOR_CONTROLVM_CHANNEL_UUID,
+ "controlvm",
+ sizeof(struct visor_controlvm_channel),
+ VISOR_CONTROLVM_CHANNEL_VERSIONID,
+ VISOR_CHANNEL_SIGNATURE))
goto error_delete_groups;
/* if booting in a crash kernel */
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index cc70e1b16bda..541911bcb6f0 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -15,20 +16,19 @@
#ifndef __VMCALLINTERFACE_H__
#define __VMCALLINTERFACE_H__
-enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
- /* Note: when a new VMCALL is added:
- * - the 1st 2 hex digits correspond to one of the
- * VMCALL_MONITOR_INTERFACE types and
- * - the next 2 hex digits are the nth relative instance of within a
- * type
- * E.G. for VMCALL_VIRTPART_RECYCLE_PART,
- * - the 0x02 identifies it as a VMCALL_VIRTPART type and
- * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART
- * type of VMCALL
- */
- /* used by all Guests, not just IO */
- VMCALL_CONTROLVM_ADDR = 0x0501,
-};
+/*
+ * VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO.
+ *
+ * Note: When a new VMCALL is added:
+ * - The 1st 2 hex digits correspond to one of the VMCALL_MONITOR_INTERFACE
+ * types.
+ * - The next 2 hex digits are the nth relative instance of within a type.
+ * E.G. for VMCALL_VIRTPART_RECYCLE_PART,
+ * - The 0x02 identifies it as a VMCALL_VIRTPART type.
+ * - The 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART type of
+ * VMCALL.
+ */
+#define VMCALL_CONTROLVM_ADDR 0x0501
enum vmcall_result {
VMCALL_RESULT_SUCCESS = 0,
@@ -39,16 +39,23 @@ enum vmcall_result {
VMCALL_RESULT_DEVICE_NOT_READY = 5
};
-/* Structures for IO VMCALLs */
-/* Parameters to VMCALL_CONTROLVM_ADDR interface */
+/*
+ * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
+ * parameters to VMCALL_CONTROLVM_ADDR
+ * interface.
+ * @address: The Guest-relative physical address of the ControlVm channel.
+ * This VMCall fills this in with the appropriate address.
+ * Contents provided by this VMCALL (OUT).
+ * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
+ * this in with the appropriate address. Contents provided by
+ * this VMCALL (OUT).
+ * @unused: Unused Bytes in the 64-Bit Aligned Struct.
+ */
struct vmcall_io_controlvm_addr_params {
- /* The Guest-relative physical address of the ControlVm channel. */
- /* This VMCall fills this in with the appropriate address. */
- u64 address; /* contents provided by this VMCALL (OUT) */
- /* the size of the ControlVm channel in bytes This VMCall fills this */
- /* in with the appropriate address. */
- u32 channel_bytes; /* contents provided by this VMCALL (OUT) */
- u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+ u64 address;
+ u32 channel_bytes;
+ u8 unused[4];
} __packed;
-#endif /* __VMCALLINTERFACE_H__ */
+/* __VMCALLINTERFACE_H__ */
+#endif
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index a6e7a6bbc428..178d0227aa80 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -48,7 +48,8 @@ MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_UUID_STR);
struct visordisk_info {
u32 valid;
- u32 channel, id, lun; /* Disk Path */
+ /* Disk Path */
+ u32 channel, id, lun;
atomic_t ios_threshold;
atomic_t error_count;
struct visordisk_info *next;
@@ -56,8 +57,10 @@ struct visordisk_info {
struct scsipending {
struct uiscmdrsp cmdrsp;
- void *sent; /* The Data being tracked */
- char cmdtype; /* Type of pointer that is being stored */
+ /* The Data being tracked */
+ void *sent;
+ /* Type of pointer that is being stored */
+ char cmdtype;
};
/* Each scsi_host has a host_data area that contains this struct. */
@@ -71,7 +74,8 @@ struct visorhba_devdata {
struct scsipending pending[MAX_PENDING_REQUESTS];
/* Start search for next pending free slot here */
unsigned int nextinsert;
- spinlock_t privlock; /* lock to protect data in devdata */
+ /* lock to protect data in devdata */
+ spinlock_t privlock;
bool serverdown;
bool serverchangingstate;
unsigned long long acquire_failed_cnt;
@@ -108,18 +112,18 @@ struct visorhba_devices_open {
(iter->lun == match->lun))
/*
- * visor_thread_start - starts a thread for the device
- * @threadfn: Function the thread starts
- * @thrcontext: Context to pass to the thread, i.e. devdata
- * @name: string describing name of thread
+ * visor_thread_start - Starts a thread for the device
+ * @threadfn: Function the thread starts
+ * @thrcontext: Context to pass to the thread, i.e. devdata
+ * @name: String describing name of thread
*
- * Starts a thread for the device.
+ * Starts a thread for the device.
*
- * Return the task_struct * denoting the thread on success,
- * or NULL on failure
+ * Return: The task_struct * denoting the thread on success,
+ * or NULL on failure
*/
-static struct task_struct *visor_thread_start
-(int (*threadfn)(void *), void *thrcontext, char *name)
+static struct task_struct *visor_thread_start(int (*threadfn)(void *),
+ void *thrcontext, char *name)
{
struct task_struct *task;
@@ -132,27 +136,27 @@ static struct task_struct *visor_thread_start
}
/*
- * visor_thread_stop - stops the thread if it is running
+ * visor_thread_stop - Stops the thread if it is running
+ * @task: Description of process to stop
*/
static void visor_thread_stop(struct task_struct *task)
{
- if (!task)
- return; /* no thread running */
kthread_stop(task);
}
/*
- * add_scsipending_entry - save off io command that is pending in
- * Service Partition
- * @devdata: Pointer to devdata
- * @cmdtype: Specifies the type of command pending
- * @new: The command to be saved
+ * add_scsipending_entry - Save off io command that is pending in
+ * Service Partition
+ * @devdata: Pointer to devdata
+ * @cmdtype: Specifies the type of command pending
+ * @new: The command to be saved
+ *
+ * Saves off the io command that is being handled by the Service
+ * Partition so that it can be handled when it completes. If new is
+ * NULL it is assumed the entry refers only to the cmdrsp.
*
- * Saves off the io command that is being handled by the Service
- * Partition so that it can be handled when it completes. If new is
- * NULL it is assumed the entry refers only to the cmdrsp.
- * Returns insert_location where entry was added,
- * -EBUSY if it can't
+ * Return: Insert_location where entry was added on success,
+ * -EBUSY if it can't
*/
static int add_scsipending_entry(struct visorhba_devdata *devdata,
char cmdtype, void *new)
@@ -176,7 +180,8 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
entry->cmdtype = cmdtype;
if (new)
entry->sent = new;
- else /* wants to send cmdrsp */
+ /* wants to send cmdrsp */
+ else
entry->sent = &entry->cmdrsp;
devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
spin_unlock_irqrestore(&devdata->privlock, flags);
@@ -185,15 +190,15 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
}
/*
- * del_scsipending_ent - removes an entry from the pending array
- * @devdata: Device holding the pending array
- * @del: Entry to remove
+ * del_scsipending_ent - Removes an entry from the pending array
+ * @devdata: Device holding the pending array
+ * @del: Entry to remove
*
- * Removes the entry pointed at by del and returns it.
- * Returns the scsipending entry pointed at
+ * Removes the entry pointed at by del and returns it.
+ *
+ * Return: The scsipending entry pointed to on success, NULL on failure
*/
-static void *del_scsipending_ent(struct visorhba_devdata *devdata,
- int del)
+static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
{
unsigned long flags;
void *sent;
@@ -203,7 +208,6 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
spin_lock_irqsave(&devdata->privlock, flags);
sent = devdata->pending[del].sent;
-
devdata->pending[del].cmdtype = 0;
devdata->pending[del].sent = NULL;
spin_unlock_irqrestore(&devdata->privlock, flags);
@@ -212,13 +216,14 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
}
/*
- * get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
- * @ddata: Device holding the pending array
- * @ent: Entry that stores the cmdrsp
+ * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
+ * @ddata: Device holding the pending array
+ * @ent: Entry that stores the cmdrsp
+ *
+ * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
+ * if the "sent" field is not NULL.
*
- * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
- * if the "sent" field is not NULL
- * Returns a pointer to the cmdrsp.
+ * Return: A pointer to the cmdrsp, NULL on failure
*/
static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
int ent)
@@ -230,13 +235,15 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
}
/*
- * simple_idr_get - associate a provided pointer with an int value
- * 1 <= value <= INT_MAX, and return this int value;
- * the pointer value can be obtained later by passing
- * this int value to idr_find()
- * @idrtable: the data object maintaining the pointer<-->int mappings
- * @p: the pointer value to be remembered
- * @lock: a spinlock used when exclusive access to idrtable is needed
+ * simple_idr_get - Associate a provided pointer with an int value
+ * 1 <= value <= INT_MAX, and return this int value;
+ * the pointer value can be obtained later by passing
+ * this int value to idr_find()
+ * @idrtable: The data object maintaining the pointer<-->int mappings
+ * @p: The pointer value to be remembered
+ * @lock: A spinlock used when exclusive access to idrtable is needed
+ *
+ * Return: The id number mapped to pointer 'p', 0 on failure
*/
static unsigned int simple_idr_get(struct idr *idrtable, void *p,
spinlock_t *lock)
@@ -249,16 +256,23 @@ static unsigned int simple_idr_get(struct idr *idrtable, void *p,
id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
spin_unlock_irqrestore(lock, flags);
idr_preload_end();
+ /* failure */
if (id < 0)
- return 0; /* failure */
- return (unsigned int)(id); /* idr_alloc() guarantees > 0 */
+ return 0;
+ /* idr_alloc() guarantees > 0 */
+ return (unsigned int)(id);
}
/*
- * setup_scsitaskmgmt_handles - stash the necessary handles so that the
- * completion processing logic for a taskmgmt
- * cmd will be able to find who to wake up
- * and where to stash the result
+ * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
+ * completion processing logic for a taskmgmt
+ * cmd will be able to find who to wake up
+ * and where to stash the result
+ * @idrtable: The data object maintaining the pointer<-->int mappings
+ * @lock: A spinlock used when exclusive access to idrtable is needed
+ * @cmdrsp: Response from the IOVM
+ * @event: The event handle to associate with an id
+ * @result: The location to place the result of the event handle into
*/
static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
struct uiscmdrsp *cmdrsp,
@@ -273,8 +287,10 @@ static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
}
/*
- * cleanup_scsitaskmgmt_handles - forget handles created by
- * setup_scsitaskmgmt_handles()
+ * cleanup_scsitaskmgmt_handles - Forget handles created by
+ * setup_scsitaskmgmt_handles()
+ * @idrtable: The data object maintaining the pointer<-->int mappings
+ * @cmdrsp: Response from the IOVM
*/
static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
struct uiscmdrsp *cmdrsp)
@@ -286,14 +302,15 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
}
/*
- * forward_taskmgmt_command - send taskmegmt command to the Service
- * Partition
- * @tasktype: Type of taskmgmt command
- * @scsidev: Scsidev that issued command
+ * forward_taskmgmt_command - Send taskmegmt command to the Service
+ * Partition
+ * @tasktype: Type of taskmgmt command
+ * @scsidev: Scsidev that issued command
+ *
+ * Create a cmdrsp packet and send it to the Serivce Partition
+ * that will service this request.
*
- * Create a cmdrsp packet and send it to the Serivce Partition
- * that will service this request.
- * Returns whether the command was queued successfully or not.
+ * Return: Int representing whether command was queued successfully or not
*/
static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
struct scsi_cmnd *scsicmd)
@@ -365,11 +382,10 @@ err_del_scsipending_ent:
}
/*
- * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
- * @scsicmd: The scsicmd that needs aborted
- *
- * Returns SUCCESS if inserted, failure otherwise
+ * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
+ * @scsicmd: The scsicmd that needs aborted
*
+ * Return: SUCCESS if inserted, FAILED otherwise
*/
static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
{
@@ -390,10 +406,10 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
}
/*
- * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
- * @scsicmd: The scsicmd that needs aborted
+ * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
+ * @scsicmd: The scsicmd that needs aborted
*
- * Returns SUCCESS if inserted, failure otherwise
+ * Return: SUCCESS if inserted, FAILED otherwise
*/
static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
{
@@ -414,11 +430,11 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
}
/*
- * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
- * target on the bus
- * @scsicmd: The scsicmd that needs aborted
+ * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
+ * target on the bus
+ * @scsicmd: The scsicmd that needs aborted
*
- * Returns SUCCESS
+ * Return: SUCCESS if inserted, FAILED otherwise
*/
static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
{
@@ -438,24 +454,22 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
}
/*
- * visorhba_host_reset_handler - Not supported
- * @scsicmd: The scsicmd that needs aborted
+ * visorhba_host_reset_handler - Not supported
+ * @scsicmd: The scsicmd that needs to be aborted
*
- * Not supported, return SUCCESS
- * Returns SUCCESS
+ * Return: Not supported, return SUCCESS
*/
-static int
-visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
+static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
{
/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
return SUCCESS;
}
/*
- * visorhba_get_info
- * @shp: Scsi host that is requesting information
+ * visorhba_get_info - Get information about SCSI device
+ * @shp: Scsi host that is requesting information
*
- * Returns string with info
+ * Return: String with visorhba information
*/
static const char *visorhba_get_info(struct Scsi_Host *shp)
{
@@ -464,19 +478,19 @@ static const char *visorhba_get_info(struct Scsi_Host *shp)
}
/*
- * visorhba_queue_command_lck -- queues command to the Service Partition
- * @scsicmd: Command to be queued
- * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
+ * visorhba_queue_command_lck - Queues command to the Service Partition
+ * @scsicmd: Command to be queued
+ * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
*
- * Queues to scsicmd to the ServicePartition after converting it to a
- * uiscmdrsp structure.
+ * Queues to scsicmd to the ServicePartition after converting it to a
+ * uiscmdrsp structure.
*
- * Returns success if queued to the Service Partition, otherwise
- * failure.
+ * Return: 0 if successfully queued to the Service Partition, otherwise
+ * error code
*/
-static int
-visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
- void (*visorhba_cmnd_done)(struct scsi_cmnd *))
+static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
+ void (*visorhba_cmnd_done)
+ (struct scsi_cmnd *))
{
struct uiscmdrsp *cmdrsp;
struct scsi_device *scsidev = scsicmd->device;
@@ -494,12 +508,10 @@ visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
(void *)scsicmd);
-
if (insert_location < 0)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
-
cmdrsp->cmdtype = CMD_SCSI_TYPE;
/* save the pending insertion location. Deletion from pending
* will return the scsicmd pointer for completion
@@ -515,7 +527,6 @@ visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
/* save datadir */
cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
-
cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
/* keep track of the max buffer length so far. */
@@ -555,13 +566,13 @@ static DEF_SCSI_QCMD(visorhba_queue_command)
#endif
/*
- * visorhba_slave_alloc - called when new disk is discovered
- * @scsidev: New disk
+ * visorhba_slave_alloc - Called when new disk is discovered
+ * @scsidev: New disk
*
- * Create a new visordisk_info structure and add it to our
- * list of vdisks.
+ * Create a new visordisk_info structure and add it to our
+ * list of vdisks.
*
- * Returns success when created, otherwise error.
+ * Return: 0 on success, -ENOMEM on failure.
*/
static int visorhba_slave_alloc(struct scsi_device *scsidev)
{
@@ -573,12 +584,14 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev)
struct visorhba_devdata *devdata;
struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
+ /* even though we errored, treat as success */
devdata = (struct visorhba_devdata *)scsihost->hostdata;
if (!devdata)
- return 0; /* even though we errored, treat as success */
+ return 0;
+ /* already allocated return success */
for_each_vdisk_match(vdisk, devdata, scsidev)
- return 0; /* already allocated return success */
+ return 0;
tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
if (!tmpvdisk)
@@ -592,11 +605,8 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev)
}
/*
- * visorhba_slave_destroy - disk is going away
- * @scsidev: scsi device going away
- *
- * Disk is going away, clean up resources.
- * Returns void.
+ * visorhba_slave_destroy - Disk is going away, clean up resources.
+ * @scsidev: Scsi device to destroy
*/
static void visorhba_slave_destroy(struct scsi_device *scsidev)
{
@@ -635,10 +645,13 @@ static struct scsi_host_template visorhba_driver_template = {
};
/*
- * info_debugfs_show - debugfs interface to dump visorhba states
+ * info_debugfs_show - Debugfs interface to dump visorhba states
+ * @seq: The sequence file to write information to
+ * @v: Unused, but needed for use with seq file single_open invocation
+ *
+ * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
*
- * This presents a file in the debugfs tree named:
- * /visorhba/vbus<x>:dev<y>/info
+ * Return: SUCCESS
*/
static int info_debugfs_show(struct seq_file *seq, void *v)
{
@@ -679,12 +692,13 @@ static const struct file_operations info_debugfs_fops = {
};
/*
- * complete_taskmgmt_command - complete task management
- * @cmdrsp: Response from the IOVM
+ * complete_taskmgmt_command - Complete task management
+ * @idrtable: The data object maintaining the pointer<-->int mappings
+ * @cmdrsp: Response from the IOVM
+ * @result: The result of the task management command
*
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- * Returns void
+ * Service Partition returned the result of the task management
+ * command. Wake up anyone waiting for it.
*/
static void complete_taskmgmt_command(struct idr *idrtable,
struct uiscmdrsp *cmdrsp, int result)
@@ -693,7 +707,6 @@ static void complete_taskmgmt_command(struct idr *idrtable,
idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
int *scsi_result_ptr =
idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
-
if (unlikely(!(wq && scsi_result_ptr))) {
pr_err("visorhba: no completion context; cmd will time out\n");
return;
@@ -708,13 +721,12 @@ static void complete_taskmgmt_command(struct idr *idrtable,
}
/*
- * visorhba_serverdown_complete - Called when we are done cleaning up
- * from serverdown
- * @work: work structure for this serverdown request
+ * visorhba_serverdown_complete - Called when we are done cleaning up
+ * from serverdown
+ * @devdata: Visorhba instance on which to complete serverdown
*
- * Called when we are done cleanning up from serverdown, stop processing
- * queue, fail pending IOs.
- * Returns void when finished cleaning up
+ * Called when we are done cleanning up from serverdown, stop processing
+ * queue, fail pending IOs.
*/
static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
{
@@ -758,12 +770,13 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
}
/*
- * visorhba_serverdown - Got notified that the IOVM is down
- * @devdata: visorhba that is being serviced by downed IOVM.
+ * visorhba_serverdown - Got notified that the IOVM is down
+ * @devdata: Visorhba that is being serviced by downed IOVM
*
- * Something happened to the IOVM, return immediately and
- * schedule work cleanup work.
- * Return SUCCESS or EINVAL
+ * Something happened to the IOVM, return immediately and
+ * schedule cleanup work.
+ *
+ * Return: 0 on success, -EINVAL on failure
*/
static int visorhba_serverdown(struct visorhba_devdata *devdata)
{
@@ -777,15 +790,14 @@ static int visorhba_serverdown(struct visorhba_devdata *devdata)
}
/*
- * do_scsi_linuxstat - scsi command returned linuxstat
- * @cmdrsp: response from IOVM
- * @scsicmd: Command issued.
+ * do_scsi_linuxstat - Scsi command returned linuxstat
+ * @cmdrsp: Response from IOVM
+ * @scsicmd: Command issued
*
- * Don't log errors for disk-not-present inquiries
- * Returns void
+ * Don't log errors for disk-not-present inquiries.
*/
-static void
-do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
+ struct scsi_cmnd *scsicmd)
{
struct visorhba_devdata *devdata;
struct visordisk_info *vdisk;
@@ -809,10 +821,10 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
}
-static int set_no_disk_inquiry_result(unsigned char *buf,
- size_t len, bool is_lun0)
+static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
+ bool is_lun0)
{
- if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
+ if (len < NO_DISK_INQUIRY_RESULT_LEN)
return -EINVAL;
memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
buf[2] = SCSI_SPC2_VER;
@@ -828,15 +840,14 @@ static int set_no_disk_inquiry_result(unsigned char *buf,
}
/*
- * do_scsi_nolinuxstat - scsi command didn't have linuxstat
- * @cmdrsp: response from IOVM
- * @scsicmd: Command issued.
+ * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
+ * @cmdrsp: Response from IOVM
+ * @scsicmd: Command issued
*
- * Handle response when no linuxstat was returned
- * Returns void
+ * Handle response when no linuxstat was returned.
*/
-static void
-do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
+ struct scsi_cmnd *scsicmd)
{
struct scsi_device *scsidev;
unsigned char *buf;
@@ -895,16 +906,15 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
/*
- * complete_scsi_command - complete a scsi command
- * @uiscmdrsp: Response from Service Partition
- * @scsicmd: The scsi command
+ * complete_scsi_command - Complete a scsi command
+ * @uiscmdrsp: Response from Service Partition
+ * @scsicmd: The scsi command
*
- * Response returned by the Service Partition, finish it and send
- * completion to the scsi midlayer.
- * Returns void.
+ * Response was returned by the Service Partition. Finish it and send
+ * completion to the scsi midlayer.
*/
-static void
-complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
+ struct scsi_cmnd *scsicmd)
{
/* take what we need out of cmdrsp and complete the scsicmd */
scsicmd->result = cmdrsp->scsi.linuxstat;
@@ -917,24 +927,23 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
/*
- * drain_queue - pull responses out of iochannel
- * @cmdrsp: Response from the IOSP
- * @devdata: device that owns this iochannel
+ * drain_queue - Pull responses out of iochannel
+ * @cmdrsp: Response from the IOSP
+ * @devdata: Device that owns this iochannel
*
- * Pulls responses out of the iochannel and process the responses.
- * Restuns void
+ * Pulls responses out of the iochannel and process the responses.
*/
-static void
-drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
+static void drain_queue(struct uiscmdrsp *cmdrsp,
+ struct visorhba_devdata *devdata)
{
struct scsi_cmnd *scsicmd;
while (1) {
+ /* queue empty */
if (visorchannel_signalremove(devdata->dev->visorchannel,
IOCHAN_FROM_IOPART,
cmdrsp))
- break; /* queue empty */
-
+ break;
if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
/* scsicmd location is returned by the
* deletion
@@ -959,12 +968,14 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
}
/*
- * process_incoming_rsps - Process responses from IOSP
- * @v: void pointer to visorhba_devdata
+ * process_incoming_rsps - Process responses from IOSP
+ * @v: Void pointer to visorhba_devdata
*
- * Main function for the thread that processes the responses
- * from the IO Service Partition. When the queue is empty, wait
- * to check to see if it is full again.
+ * Main function for the thread that processes the responses
+ * from the IO Service Partition. When the queue is empty, wait
+ * to check to see if it is full again.
+ *
+ * Return: 0 on success, -ENOMEM on failure
*/
static int process_incoming_rsps(void *v)
{
@@ -991,14 +1002,15 @@ static int process_incoming_rsps(void *v)
}
/*
- * visorhba_pause - function to handle visorbus pause messages
- * @dev: device that is pausing.
- * @complete_func: function to call when finished
+ * visorhba_pause - Function to handle visorbus pause messages
+ * @dev: Device that is pausing
+ * @complete_func: Function to call when finished
+ *
+ * Something has happened to the IO Service Partition that is
+ * handling this device. Quiet this device and reset commands
+ * so that the Service Partition can be corrected.
*
- * Something has happened to the IO Service Partition that is
- * handling this device. Quiet this device and reset commands
- * so that the Service Partition can be corrected.
- * Returns SUCCESS
+ * Return: SUCCESS
*/
static int visorhba_pause(struct visor_device *dev,
visorbus_state_complete_func complete_func)
@@ -1011,13 +1023,14 @@ static int visorhba_pause(struct visor_device *dev,
}
/*
- * visorhba_resume - function called when the IO Service Partition is back
- * @dev: device that is pausing.
- * @complete_func: function to call when finished
+ * visorhba_resume - Function called when the IO Service Partition is back
+ * @dev: Device that is pausing
+ * @complete_func: Function to call when finished
+ *
+ * Yay! The IO Service Partition is back, the channel has been wiped
+ * so lets re-establish connection and start processing responses.
*
- * Yay! The IO Service Partition is back, the channel has been wiped
- * so lets re-establish connection and start processing responses.
- * Returns 0 on success, error on failure.
+ * Return: 0 on success, -EINVAL on failure
*/
static int visorhba_resume(struct visor_device *dev,
visorbus_state_complete_func complete_func)
@@ -1033,7 +1046,6 @@ static int visorhba_resume(struct visor_device *dev,
devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
"vhba_incming");
-
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1041,11 +1053,12 @@ static int visorhba_resume(struct visor_device *dev,
}
/*
- * visorhba_probe - device has been discovered, do acquire
- * @dev: visor_device that was discovered
+ * visorhba_probe - Device has been discovered; do acquire
+ * @dev: visor_device that was discovered
+ *
+ * A new HBA was discovered; do the initial connections of it.
*
- * A new HBA was discovered, do the initial connections of it.
- * Return 0 on success, otherwise error.
+ * Return: 0 on success, otherwise error code
*/
static int visorhba_probe(struct visor_device *dev)
{
@@ -1139,11 +1152,10 @@ err_scsi_host_put:
}
/*
- * visorhba_remove - remove a visorhba device
- * @dev: Device to remove
+ * visorhba_remove - Remove a visorhba device
+ * @dev: Device to remove
*
- * Removes the visorhba device.
- * Returns void.
+ * Removes the visorhba device.
*/
static void visorhba_remove(struct visor_device *dev)
{
@@ -1181,10 +1193,12 @@ static struct visor_driver visorhba_driver = {
};
/*
- * visorhba_init - driver init routine
+ * visorhba_init - Driver init routine
+ *
+ * Initialize the visorhba driver and register it with visorbus
+ * to handle s-Par virtual host bus adapter.
*
- * Initialize the visorhba driver and register it with visorbus
- * to handle s-Par virtual host bus adapter.
+ * Return: 0 on success, error code otherwise
*/
static int visorhba_init(void)
{
@@ -1207,9 +1221,9 @@ cleanup_debugfs:
}
/*
- * visorhba_exit - driver exit routine
+ * visorhba_exit - Driver exit routine
*
- * Unregister driver from the bus and free up memory.
+ * Unregister driver from the bus and free up memory.
*/
static void visorhba_exit(void)
{
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index a4baea53c518..3ffad83100c4 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -20,44 +20,35 @@
/* These defines identify mouse and keyboard activity which is specified by the
* firmware to the host using the cmsimpleinput protocol. @ingroup coretypes
*/
-#define INPUTACTION_XY_MOTION 1 /* only motion; arg1=x, arg2=y */
-#define INPUTACTION_MOUSE_BUTTON_DOWN 2 /* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_UP 3 /* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_CLICK 4 /* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_DCLICK 5 /* arg1: 1=left,2=center,
- * 3=right
- */
-#define INPUTACTION_WHEEL_ROTATE_AWAY 6 /* arg1: wheel rotation away from
- * user
- */
-#define INPUTACTION_WHEEL_ROTATE_TOWARD 7 /* arg1: wheel rotation toward
- * user
- */
-#define INPUTACTION_KEY_DOWN 64 /* arg1: scancode, as follows:
- * If arg1 <= 0xff, it's a 1-byte
- * scancode and arg1 is that scancode.
- * If arg1 > 0xff, it's a 2-byte
- * scanecode, with the 1st byte in the
- * low 8 bits, and the 2nd byte in the
- * high 8 bits. E.g., the right ALT key
- * would appear as x'38e0'.
- */
-#define INPUTACTION_KEY_UP 65 /* arg1: scancode (in same format as
- * inputaction_keyDown)
- */
+ /* only motion; arg1=x, arg2=y */
+#define INPUTACTION_XY_MOTION 1
+/* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_DOWN 2
+/* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_UP 3
+/* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_CLICK 4
+/* arg1: 1=left,2=center 3=right */
+#define INPUTACTION_MOUSE_BUTTON_DCLICK 5
+/* arg1: wheel rotation away from user */
+#define INPUTACTION_WHEEL_ROTATE_AWAY 6
+/* arg1: wheel rotation toward user */
+#define INPUTACTION_WHEEL_ROTATE_TOWARD 7
+/* arg1: scancode, as follows: If arg1 <= 0xff, it's a 1-byte scancode and arg1
+ * is that scancode. If arg1 > 0xff, it's a 2-byte scanecode, with the 1st
+ * byte in the low 8 bits, and the 2nd byte in the high 8 bits.
+ * E.g., the right ALT key would appear as x'38e0'.
+ */
+#define INPUTACTION_KEY_DOWN 64
+/* arg1: scancode (in same format as inputaction_keyDown) */
+#define INPUTACTION_KEY_UP 65
+/* arg1: scancode (in same format as inputaction_keyDown); MUST refer to one of
+ * the locking keys, like capslock, numlock, or scrolllock.
+ * arg2: 1 iff locking key should be in the LOCKED position (e.g., light is ON)
+ */
#define INPUTACTION_SET_LOCKING_KEY_STATE 66
- /* arg1: scancode (in same format
- * as inputaction_keyDown);
- * MUST refer to one of the
- * locking keys, like capslock,
- * numlock, or scrolllock
- * arg2: 1 iff locking key should be
- * in the LOCKED position
- * (e.g., light is ON)
- */
-#define INPUTACTION_KEY_DOWN_UP 67 /* arg1: scancode (in same format
- * as inputaction_keyDown)
- */
+/* arg1: scancode (in same format as inputaction_keyDown */
+#define INPUTACTION_KEY_DOWN_UP 67
struct visor_inputactivity {
u16 action;
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 45bc340d4e9d..65060e9b6132 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -60,11 +60,13 @@ enum visorinput_device_type {
*/
struct visorinput_devdata {
struct visor_device *dev;
- struct mutex lock_visor_dev; /* lock for dev */
+ /* lock for dev */
+ struct mutex lock_visor_dev;
struct input_dev *visorinput_dev;
bool paused;
bool interrupts_enabled;
- unsigned int keycode_table_bytes; /* size of following array */
+ /* size of following array */
+ unsigned int keycode_table_bytes;
/* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
unsigned char keycode_table[0];
};
@@ -162,9 +164,8 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[81] = KEY_KP3,
[82] = KEY_KP0,
[83] = KEY_KPDOT,
- [86] = KEY_102ND, /* enables UK backslash+pipe key,
- * and FR lessthan+greaterthan key
- */
+ /* enables UK backslash+pipe key and FR lessthan+greaterthan key */
+ [86] = KEY_102ND,
[87] = KEY_F11,
[88] = KEY_F12,
[90] = KEY_KPLEFTPAREN,
@@ -260,7 +261,6 @@ static void visorinput_close(struct input_dev *visorinput_dev)
* interrupts should be disabled so when we resume we will
* not re-enable them.
*/
-
mutex_lock(&devdata->lock_visor_dev);
devdata->interrupts_enabled = false;
if (devdata->paused)
@@ -276,15 +276,13 @@ out_unlock:
* we can use to deliver keyboard inputs to Linux. We of course do this when
* we see keyboard inputs coming in on a keyboard channel.
*/
-static struct input_dev *
-setup_client_keyboard(void *devdata, /* opaque on purpose */
- unsigned char *keycode_table)
+static struct input_dev *setup_client_keyboard(void *devdata,
+ unsigned char *keycode_table)
{
int i;
- struct input_dev *visorinput_dev;
+ struct input_dev *visorinput_dev = input_allocate_device();
- visorinput_dev = input_allocate_device();
if (!visorinput_dev)
return NULL;
@@ -302,7 +300,8 @@ setup_client_keyboard(void *devdata, /* opaque on purpose */
BIT_MASK(LED_SCROLLL) |
BIT_MASK(LED_NUML);
visorinput_dev->keycode = keycode_table;
- visorinput_dev->keycodesize = 1; /* sizeof(unsigned char) */
+ /* sizeof(unsigned char) */
+ visorinput_dev->keycodesize = 1;
visorinput_dev->keycodemax = KEYCODE_TABLE_BYTES;
for (i = 1; i < visorinput_dev->keycodemax; i++)
@@ -313,19 +312,18 @@ setup_client_keyboard(void *devdata, /* opaque on purpose */
visorinput_dev->open = visorinput_open;
visorinput_dev->close = visorinput_close;
- input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */
+ /* pre input_register! */
+ input_set_drvdata(visorinput_dev, devdata);
return visorinput_dev;
}
-static struct input_dev *
-setup_client_mouse(void *devdata /* opaque on purpose */)
+static struct input_dev *setup_client_mouse(void *devdata)
{
- struct input_dev *visorinput_dev = NULL;
int xres, yres;
struct fb_info *fb0;
+ struct input_dev *visorinput_dev = input_allocate_device();
- visorinput_dev = input_allocate_device();
if (!visorinput_dev)
return NULL;
@@ -354,14 +352,16 @@ setup_client_mouse(void *devdata /* opaque on purpose */)
visorinput_dev->open = visorinput_open;
visorinput_dev->close = visorinput_close;
- input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */
+ /* pre input_register! */
+ input_set_drvdata(visorinput_dev, devdata);
input_set_capability(visorinput_dev, EV_REL, REL_WHEEL);
return visorinput_dev;
}
-static struct visorinput_devdata *
-devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
+static struct visorinput_devdata *devdata_create(
+ struct visor_device *dev,
+ enum visorinput_device_type devtype)
{
struct visorinput_devdata *devdata = NULL;
unsigned int extra_bytes = 0;
@@ -446,8 +446,7 @@ err_kfree_devdata:
return NULL;
}
-static int
-visorinput_probe(struct visor_device *dev)
+static int visorinput_probe(struct visor_device *dev)
{
uuid_le guid;
enum visorinput_device_type devtype;
@@ -465,15 +464,13 @@ visorinput_probe(struct visor_device *dev)
return 0;
}
-static void
-unregister_client_input(struct input_dev *visorinput_dev)
+static void unregister_client_input(struct input_dev *visorinput_dev)
{
if (visorinput_dev)
input_unregister_device(visorinput_dev);
}
-static void
-visorinput_remove(struct visor_device *dev)
+static void visorinput_remove(struct visor_device *dev)
{
struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
@@ -499,9 +496,8 @@ visorinput_remove(struct visor_device *dev)
* Make it so the current locking state of the locking key indicated by
* <keycode> is as indicated by <desired_state> (1=locked, 0=unlocked).
*/
-static void
-handle_locking_key(struct input_dev *visorinput_dev,
- int keycode, int desired_state)
+static void handle_locking_key(struct input_dev *visorinput_dev, int keycode,
+ int desired_state)
{
int led;
@@ -533,17 +529,15 @@ handle_locking_key(struct input_dev *visorinput_dev,
* with 0xE0 in the low byte and the extended scancode value in the next
* higher byte.
*/
-static int
-scancode_to_keycode(int scancode)
+static int scancode_to_keycode(int scancode)
{
if (scancode > 0xff)
return visorkbd_ext_keycode[(scancode >> 8) & 0xff];
- return visorkbd_keycode[scancode];
+ return visorkbd_keycode[scancode];
}
-static int
-calc_button(int x)
+static int calc_button(int x)
{
switch (x) {
case 1:
@@ -562,15 +556,13 @@ calc_button(int x)
* client guest partition. It is called periodically so we can obtain inputs
* from the channel, and deliver them to the guest OS.
*/
-static void
-visorinput_channel_interrupt(struct visor_device *dev)
+static void visorinput_channel_interrupt(struct visor_device *dev)
{
struct visor_inputreport r;
int scancode, keycode;
struct input_dev *visorinput_dev;
int xmotion, ymotion, button;
int i;
-
struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
if (!devdata)
@@ -626,7 +618,6 @@ visorinput_channel_interrupt(struct visor_device *dev)
if (button < 0)
break;
input_report_key(visorinput_dev, button, 1);
-
input_sync(visorinput_dev);
input_report_key(visorinput_dev, button, 0);
input_sync(visorinput_dev);
@@ -657,9 +648,8 @@ visorinput_channel_interrupt(struct visor_device *dev)
}
}
-static int
-visorinput_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
+static int visorinput_pause(struct visor_device *dev,
+ visorbus_state_complete_func complete_func)
{
int rc;
struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
@@ -681,7 +671,6 @@ visorinput_pause(struct visor_device *dev,
* due to above, at this time no thread of execution will be
* in visorinput_channel_interrupt()
*/
-
devdata->paused = true;
complete_func(dev, 0);
rc = 0;
@@ -691,9 +680,8 @@ out:
return rc;
}
-static int
-visorinput_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func)
+static int visorinput_resume(struct visor_device *dev,
+ visorbus_state_complete_func complete_func)
{
int rc;
struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
@@ -743,14 +731,12 @@ static struct visor_driver visorinput_driver = {
.resume = visorinput_resume,
};
-static int
-visorinput_init(void)
+static int visorinput_init(void)
{
return visorbus_register_visor_driver(&visorinput_driver);
}
-static void
-visorinput_cleanup(void)
+static void visorinput_cleanup(void)
{
visorbus_unregister_visor_driver(&visorinput_driver);
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 2891622eef18..0b39676e0daf 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -84,7 +84,8 @@ struct visornic_devdata {
u64 incarnation_id;
/* flags as they were prior to set_multicast_list */
unsigned short old_flags;
- atomic_t usage; /* count of users */
+ /* count of users */
+ atomic_t usage;
/* number of rcv buffers the vnic will post */
int num_rcv_bufs;
@@ -108,31 +109,43 @@ struct visornic_devdata {
struct uiscmdrsp *cmdrsp_rcv;
/* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
struct uiscmdrsp *xmit_cmdrsp;
-
- bool server_down; /* IOPART is down */
- bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
- bool going_away; /* device is being torn down */
+ /* IOPART is down */
+ bool server_down;
+ /* Processing SERVER_CHANGESTATE msg */
+ bool server_change_state;
+ /* device is being torn down */
+ bool going_away;
struct dentry *eth_debugfs_dir;
u64 interrupts_rcvd;
u64 interrupts_notme;
u64 interrupts_disabled;
u64 busy_cnt;
- spinlock_t priv_lock; /* spinlock to access devdata structures */
+ /* spinlock to access devdata structures */
+ spinlock_t priv_lock;
/* flow control counter */
u64 flow_control_upper_hits;
u64 flow_control_lower_hits;
/* debug counters */
- unsigned long n_rcv0; /* # rcvs of 0 buffers */
- unsigned long n_rcv1; /* # rcvs of 1 buffers */
- unsigned long n_rcv2; /* # rcvs of 2 buffers */
- unsigned long n_rcvx; /* # rcvs of >2 buffers */
- unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
- unsigned long repost_found_skb_cnt; /* # of found the skb */
- unsigned long n_repost_deficit; /* # of lost rcv buffers */
- unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
- unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
+ /* # rcvs of 0 buffers */
+ unsigned long n_rcv0;
+ /* # rcvs of 1 buffers */
+ unsigned long n_rcv1;
+ /* # rcvs of 2 buffers */
+ unsigned long n_rcv2;
+ /* # rcvs of >2 buffers */
+ unsigned long n_rcvx;
+ /* # repost_rcvbuf_cnt */
+ unsigned long found_repost_rcvbuf_cnt;
+ /* # of found the skb */
+ unsigned long repost_found_skb_cnt;
+ /* # of lost rcv buffers */
+ unsigned long n_repost_deficit;
+ /* # of unknown rcv skb not freed */
+ unsigned long bad_rcv_buf;
+ /* # bogs rcv packets */
+ unsigned long n_rcv_packets_not_accepted;
int queuefullmsg_logged;
struct chanstat chstat;
@@ -142,9 +155,9 @@ struct visornic_devdata {
};
/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
-static u16
-add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
- u16 max_pi_arr_entries, struct phys_info pi_arr[])
+static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len,
+ u16 index, u16 max_pi_arr_entries,
+ struct phys_info pi_arr[])
{
u32 len;
u16 i, firstlen;
@@ -190,10 +203,10 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
* Return value indicates number of entries filled in frags
* Negative values indicate an error.
*/
-static int
-visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
- unsigned int frags_max,
- struct phys_info frags[])
+static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
+ unsigned int firstfraglen,
+ unsigned int frags_max,
+ struct phys_info frags[])
{
unsigned int count = 0, frag, size, offset = 0, numfrags;
unsigned int total_count;
@@ -296,12 +309,9 @@ static const struct file_operations debugfs_enable_ints_fops = {
* being down.
* Returns void.
*/
-static void
-visornic_serverdown_complete(struct visornic_devdata *devdata)
+static void visornic_serverdown_complete(struct visornic_devdata *devdata)
{
- struct net_device *netdev;
-
- netdev = devdata->netdev;
+ struct net_device *netdev = devdata->netdev;
/* Stop polling for interrupts */
del_timer_sync(&devdata->irq_poll_timer);
@@ -330,9 +340,8 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
* sure we haven't already handled the server change state event.
* Returns 0 if we scheduled the work, -EINVAL on error.
*/
-static int
-visornic_serverdown(struct visornic_devdata *devdata,
- visorbus_state_complete_func complete_func)
+static int visornic_serverdown(struct visornic_devdata *devdata,
+ visorbus_state_complete_func complete_func)
{
unsigned long flags;
int err;
@@ -377,8 +386,7 @@ err_unlock:
* so that it can write rcv data into our memory space.
* Return pointer to sk_buff
*/
-static struct sk_buff *
-alloc_rcv_buf(struct net_device *netdev)
+static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
{
struct sk_buff *skb;
@@ -409,9 +417,8 @@ alloc_rcv_buf(struct net_device *netdev)
* Send the skb to the IO Partition.
* Returns 0 or error
*/
-static int
-post_skb(struct uiscmdrsp *cmdrsp,
- struct visornic_devdata *devdata, struct sk_buff *skb)
+static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
+ struct sk_buff *skb)
{
int err;
@@ -437,7 +444,6 @@ post_skb(struct uiscmdrsp *cmdrsp,
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
-
return 0;
}
@@ -451,9 +457,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
* Send the enable/disable message to the IO Partition.
* Returns 0 or error
*/
-static int
-send_enbdis(struct net_device *netdev, int state,
- struct visornic_devdata *devdata)
+static int send_enbdis(struct net_device *netdev, int state,
+ struct visornic_devdata *devdata)
{
int err;
@@ -479,10 +484,9 @@ send_enbdis(struct net_device *netdev, int state,
* are disabled, reclaim memory from rcv bufs.
* Returns 0 on success, negative for failure of IO Partition
* responding.
- *
*/
-static int
-visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
+static int visornic_disable_with_timeout(struct net_device *netdev,
+ const int timeout)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
int i;
@@ -493,7 +497,8 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
/* send a msg telling the other end we are stopping incoming pkts */
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = 0;
- devdata->enab_dis_acked = 0; /* must wait for ack */
+ /* must wait for ack */
+ devdata->enab_dis_acked = 0;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
/* send disable and wait for ack -- don't hold lock when sending
@@ -568,8 +573,8 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
* Allocate rcv buffers and post them to the IO Partition.
* Return 0 for success, and negative for failure.
*/
-static int
-init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
+static int init_rcv_bufs(struct net_device *netdev,
+ struct visornic_devdata *devdata)
{
int i, j, count, err;
@@ -578,10 +583,12 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
*/
for (i = 0; i < devdata->num_rcv_bufs; i++) {
devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
+ /* if we failed to allocate one let us stop */
if (!devdata->rcvbuf[i])
- break; /* if we failed to allocate one let us stop */
+ break;
}
- if (i == 0) /* couldn't even allocate one -- bail out */
+ /* couldn't even allocate one -- bail out */
+ if (i == 0)
return -ENOMEM;
count = i;
@@ -633,8 +640,8 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
* timeout is defined in msecs (timeout of 0 specifies infinite wait)
* Return 0 for success, negative for failure.
*/
-static int
-visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
+static int visornic_enable_with_timeout(struct net_device *netdev,
+ const int timeout)
{
int err = 0;
struct visornic_devdata *devdata = netdev_priv(netdev);
@@ -695,7 +702,6 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
}
netif_start_queue(netdev);
-
return 0;
}
@@ -707,8 +713,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
* device for our virtual NIC we will send a Disable and Enable
* to the IOVM. If it doesn't respond we will trigger a serverdown.
*/
-static void
-visornic_timeout_reset(struct work_struct *work)
+static void visornic_timeout_reset(struct work_struct *work)
{
struct visornic_devdata *devdata;
struct net_device *netdev;
@@ -749,11 +754,9 @@ call_serverdown:
* Enable the device and start the transmit queue.
* Return 0 for success
*/
-static int
-visornic_open(struct net_device *netdev)
+static int visornic_open(struct net_device *netdev)
{
visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
-
return 0;
}
@@ -764,11 +767,9 @@ visornic_open(struct net_device *netdev)
* Disable the device and stop the transmit queue.
* Return 0 for success
*/
-static int
-visornic_close(struct net_device *netdev)
+static int visornic_close(struct net_device *netdev)
{
visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
-
return 0;
}
@@ -830,8 +831,7 @@ static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
* can be called again.
* Returns NETDEV_TX_OK.
*/
-static int
-visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
+static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct visornic_devdata *devdata;
int len, firstfraglen, padlen;
@@ -938,6 +938,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
* - everything else will be pass in frags & DMA'ed
*/
memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
+
/* copy frags info - from skb->data we need to only provide access
* beyond eth header
*/
@@ -997,8 +998,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
*
* Returns the net_device_stats for the device
*/
-static struct net_device_stats *
-visornic_get_stats(struct net_device *netdev)
+static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
@@ -1016,8 +1016,7 @@ visornic_get_stats(struct net_device *netdev)
* Currently not supported.
* Returns EINVAL
*/
-static int
-visornic_change_mtu(struct net_device *netdev, int new_mtu)
+static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
{
return -EINVAL;
}
@@ -1029,8 +1028,7 @@ visornic_change_mtu(struct net_device *netdev, int new_mtu)
* Only flag we support currently is IFF_PROMISC
* Returns void
*/
-static void
-visornic_set_multi(struct net_device *netdev)
+static void visornic_set_multi(struct net_device *netdev)
{
struct uiscmdrsp *cmdrsp;
struct visornic_devdata *devdata = netdev_priv(netdev);
@@ -1070,8 +1068,7 @@ out_save_flags:
* been informed the IO Partition is gone, if it is gone
* we will already timeout the xmits.
*/
-static void
-visornic_xmit_timeout(struct net_device *netdev)
+static void visornic_xmit_timeout(struct net_device *netdev)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags;
@@ -1108,9 +1105,9 @@ visornic_xmit_timeout(struct net_device *netdev)
* we are finished with them.
* Returns 0 for success, -1 for error.
*/
-static int
-repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
- struct sk_buff *skb, struct net_device *netdev)
+static int repost_return(struct uiscmdrsp *cmdrsp,
+ struct visornic_devdata *devdata,
+ struct sk_buff *skb, struct net_device *netdev)
{
struct net_pkt_rcv copy;
int i = 0, cc, numreposted;
@@ -1182,8 +1179,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
* it up the stack.
* Returns 1 iff an skb was received, otherwise 0
*/
-static int
-visornic_rx(struct uiscmdrsp *cmdrsp)
+static int visornic_rx(struct uiscmdrsp *cmdrsp)
{
struct visornic_devdata *devdata;
struct sk_buff *skb, *prev, *curr;
@@ -1236,7 +1232,8 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* firstfrag & set data_len to show rest see if we have to chain
* frag_list.
*/
- if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
+ /* do PRECAUTIONARY check */
+ if (skb->len > RCVPOST_BUF_SIZE) {
if (cmdrsp->net.rcv.numrcvbufs < 2) {
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
@@ -1244,23 +1241,24 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
return 0;
}
/* length rcvd is greater than firstfrag in this skb rcv buf */
- skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
- skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
- * will be in
- * frag_list
- */
+ /* amount in skb->data */
+ skb->tail += RCVPOST_BUF_SIZE;
+ /* amount that will be in frag_list */
+ skb->data_len = skb->len - RCVPOST_BUF_SIZE;
} else {
/* data fits in this skb - no chaining - do
* PRECAUTIONARY check
*/
- if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
+ /* should be 1 */
+ if (cmdrsp->net.rcv.numrcvbufs != 1) {
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
return 0;
}
skb->tail += skb->len;
- skb->data_len = 0; /* nothing rcvd in frag_list */
+ /* nothing rcvd in frag_list */
+ skb->data_len = 0;
}
off = skb_tail_pointer(skb) - skb->data;
@@ -1286,7 +1284,8 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
curr->next = NULL;
- if (!prev) /* start of list- set head */
+ /* start of list- set head */
+ if (!prev)
skb_shinfo(skb)->frag_list = curr;
else
prev->next = curr;
@@ -1314,18 +1313,18 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* sets up skb->pkt_type & it also PULLS out the eth header
*/
skb->protocol = eth_type_trans(skb, netdev);
-
eth = eth_hdr(skb);
-
skb->csum = 0;
skb->ip_summed = CHECKSUM_NONE;
do {
+ /* accept all packets */
if (netdev->flags & IFF_PROMISC)
- break; /* accept all packets */
+ break;
if (skb->pkt_type == PACKET_BROADCAST) {
+ /* accept all broadcast packets */
if (netdev->flags & IFF_BROADCAST)
- break; /* accept all broadcast packets */
+ break;
} else if (skb->pkt_type == PACKET_MULTICAST) {
if ((netdev->flags & IFF_MULTICAST) &&
(netdev_mc_count(netdev))) {
@@ -1385,8 +1384,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* values.
* Returns a pointer to the devdata structure
*/
-static struct visornic_devdata *
-devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
+static struct visornic_devdata *devdata_initialize(
+ struct visornic_devdata *devdata,
+ struct visor_device *dev)
{
devdata->dev = dev;
devdata->incarnation_id = get_jiffies_64();
@@ -1577,8 +1577,7 @@ static const struct file_operations debugfs_info_fops = {
* Send receive buffers to the IO Partition.
* Returns void
*/
-static int
-send_rcv_posts_if_needed(struct visornic_devdata *devdata)
+static int send_rcv_posts_if_needed(struct visornic_devdata *devdata)
{
int i;
struct net_device *netdev;
@@ -1625,8 +1624,8 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
* @cmdrsp: io channel command response message
* @devdata: visornic device to drain
*/
-static void
-drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
+static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
+ struct visornic_devdata *devdata)
{
while (!visorchannel_signalremove(devdata->dev->visorchannel,
IOCHAN_FROM_IOPART,
@@ -1643,21 +1642,22 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
* Process the responses as we get them.
* Returns when response queue is empty or when the thread stops.
*/
-static void
-service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
- int *rx_work_done, int budget)
+static void service_resp_queue(struct uiscmdrsp *cmdrsp,
+ struct visornic_devdata *devdata,
+ int *rx_work_done, int budget)
{
unsigned long flags;
struct net_device *netdev;
while (*rx_work_done < budget) {
- /* TODO: CLIENT ACQUIRE -- Don't really need this at the
- * moment
- */
+ /* TODO: CLIENT ACQUIRE -- Don't really need this at the
+ * moment
+ */
+ /* queue empty */
if (visorchannel_signalremove(devdata->dev->visorchannel,
IOCHAN_FROM_IOPART,
cmdrsp))
- break; /* queue empty */
+ break;
switch (cmdrsp->net.type) {
case NET_RCV:
@@ -1763,8 +1763,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
* response queue and drain it if needed.
* Returns when thread has stopped.
*/
-static void
-poll_for_irq(unsigned long v)
+static void poll_for_irq(unsigned long v)
{
struct visornic_devdata *devdata = (struct visornic_devdata *)v;
@@ -1831,7 +1830,8 @@ static int visornic_probe(struct visor_device *dev)
dev_set_drvdata(&dev->device, devdata);
init_waitqueue_head(&devdata->rsp_queue);
spin_lock_init(&devdata->priv_lock);
- devdata->enabled = 0; /* not yet */
+ /* not yet */
+ devdata->enabled = 0;
atomic_set(&devdata->usage, 1);
/* Setup rcv bufs */
@@ -1984,7 +1984,8 @@ static void host_side_disappeared(struct visornic_devdata *devdata)
unsigned long flags;
spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->dev = NULL; /* indicate device destroyed */
+ /* indicate device destroyed */
+ devdata->dev = NULL;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
@@ -2023,8 +2024,8 @@ static void visornic_remove(struct visor_device *dev)
cancel_work_sync(&devdata->timeout_reset);
debugfs_remove_recursive(devdata->eth_debugfs_dir);
-
- unregister_netdev(netdev); /* this will call visornic_close() */
+ /* this will call visornic_close() */
+ unregister_netdev(netdev);
del_timer_sync(&devdata->irq_poll_timer);
netif_napi_del(&devdata->napi);
@@ -2160,7 +2161,6 @@ static int visornic_init(void)
cleanup_debugfs:
debugfs_remove_recursive(visornic_debugfs_dir);
-
return err;
}
@@ -2172,7 +2172,6 @@ cleanup_debugfs:
static void visornic_cleanup(void)
{
visorbus_unregister_visor_driver(&visornic_driver);
-
debugfs_remove_recursive(visornic_debugfs_dir);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 7fa0310e7b9e..2e52f07bbaa9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -51,7 +51,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
sema_init(&queue->pop, 0);
sema_init(&queue->push, 0);
- queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
+ queue->storage = kcalloc(size, sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
if (!queue->storage) {
vchiu_queue_delete(queue);
return 0;
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index f5db2b3d9045..14034e342aa6 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -649,19 +649,19 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *priv,
pr_debug("BASIC RATE: %X\n", priv->basic_rates);
if (!CARDbIsOFDMinBasicRate((void *)priv)) {
- pr_debug("CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
+ pr_debug("%s:(NO OFDM) %d\n", __func__, wRateIdx);
if (wRateIdx > RATE_24M)
wRateIdx = RATE_24M;
return wRateIdx;
}
while (ui > RATE_11M) {
if (priv->basic_rates & ((u32)0x1 << ui)) {
- pr_debug("CARDwGetOFDMControlRate : %d\n", ui);
+ pr_debug("%s : %d\n", __func__, ui);
return (unsigned short)ui;
}
ui--;
}
- pr_debug("CARDwGetOFDMControlRate: 6M\n");
+ pr_debug("%s: 6M\n", __func__);
return (unsigned short)RATE_24M;
}
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 4aaa99bafcda..f7550b215f72 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -809,7 +809,7 @@ void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
if (byLocalID <= 1)
return;
- pr_debug("MACvSetKeyEntry\n");
+ pr_debug("%s\n", __func__);
offset = MISCFIFO_KEYETRY0;
offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 4832666cc580..74715c854856 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -83,7 +83,7 @@
#define CONFIG_PATH "/etc/vntconfiguration.dat"
#define MAX_UINTS 8
-#define OPTION_DEFAULT { [0 ... MAX_UINTS-1] = -1}
+#define OPTION_DEFAULT { [0 ... MAX_UINTS - 1] = -1}
#define DUPLICATE_RX_CACHE_LENGTH 5
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 282f665aacfa..093a6048bd22 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -65,7 +65,7 @@ int vnt_download_firmware(struct vnt_private *priv)
status = vnt_control_out(priv,
0,
- 0x1200+ii,
+ 0x1200 + ii,
0x0000,
length,
buffer);
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index 906d3454591d..cfc6c2131536 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -46,6 +46,6 @@
int vnt_key_init_table(struct vnt_private *priv);
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif, struct ieee80211_key_conf *key);
+ struct ieee80211_vif *vif, struct ieee80211_key_conf *key);
#endif /* __KEY_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 095b85567306..cc6d8778fe5b 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -419,8 +419,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
int ii;
for (ii = 0; ii < priv->num_tx_context; ii++) {
- tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
- GFP_KERNEL);
+ tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
if (!tx_context)
goto free_tx;
@@ -437,7 +436,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
}
for (ii = 0; ii < priv->num_rcb; ii++) {
- priv->rcb[ii] = kzalloc(sizeof(struct vnt_rcb), GFP_KERNEL);
+ priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
if (!priv->rcb[ii]) {
dev_err(&priv->usb->dev,
"failed to allocate rcb no %d\n", ii);
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index e322b7d8c617..c466e0614bc4 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -74,16 +74,15 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval)
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_GO2DOZE);
if (listen_interval >= 2) {
-
/* clear always listen beacon */
vnt_mac_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN);
- } else
-
+ } else {
/* always listen beacon */
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
+ }
dev_dbg(&priv->usb->dev, "PS:Power Saving Mode Enable...\n");
}
@@ -100,7 +99,6 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval)
void vnt_disable_power_saving(struct vnt_private *priv)
{
-
/* disable power saving hw function */
vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
0, 0, NULL);
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 23581afb4211..3a9d19a0b842 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -611,7 +611,7 @@ int vnt_rf_write_embedded(struct vnt_private *priv, u32 data)
reg_data[3] = (u8)(data >> 24);
vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF,
- 0, 0, ARRAY_SIZE(reg_data), reg_data);
+ 0, 0, ARRAY_SIZE(reg_data), reg_data);
return true;
}
@@ -643,9 +643,9 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
case RATE_48M:
case RATE_54M:
if (channel > CB_MAX_CHANNEL_24G)
- power = priv->ofdm_a_pwr_tbl[channel-15];
+ power = priv->ofdm_a_pwr_tbl[channel - 15];
else
- power = priv->ofdm_pwr_tbl[channel-1];
+ power = priv->ofdm_pwr_tbl[channel - 1];
break;
}
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index dc11a05be8c4..23eaef458556 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -44,7 +44,7 @@
#define USB_CTL_WAIT 500 /* ms */
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer)
+ u16 index, u16 length, u8 *buffer)
{
int status = 0;
u8 *usb_buffer;
@@ -82,7 +82,7 @@ void vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
}
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer)
+ u16 index, u16 length, u8 *buffer)
{
int status;
u8 *usb_buffer;
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 2568dfc15181..7b620658ec38 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1963,7 +1963,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
if (result) {
- netdev_err(vif->ndev, "Failed to SET incative time\n");
+ netdev_err(vif->ndev, "Failed to SET inactive time\n");
return -EFAULT;
}
@@ -1976,7 +1976,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
if (result) {
- netdev_err(vif->ndev, "Failed to get incative time\n");
+ netdev_err(vif->ndev, "Failed to get inactive time\n");
return -EFAULT;
}
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index dbb3e24615be..1266dcccad30 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -283,7 +283,8 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
static int linux_wlan_txq_task(void *vp)
{
- int ret, txq_count;
+ int ret;
+ u32 txq_count;
struct wilc_vif *vif;
struct wilc *wl;
struct net_device *dev = vp;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 68fd5b3b8b2d..ac5aaafa461c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -214,48 +214,39 @@ static u32 get_rssi_avg(struct network_info *network_info)
return rssi_v;
}
-static void refresh_scan(void *user_void, u8 all, bool direct_scan)
+static void refresh_scan(struct wilc_priv *priv, bool direct_scan)
{
- struct wilc_priv *priv;
- struct wiphy *wiphy;
- struct cfg80211_bss *bss = NULL;
+ struct wiphy *wiphy = priv->dev->ieee80211_ptr->wiphy;
int i;
- int rssi = 0;
-
- priv = user_void;
- wiphy = priv->dev->ieee80211_ptr->wiphy;
for (i = 0; i < last_scanned_cnt; i++) {
struct network_info *network_info;
+ s32 freq;
+ struct ieee80211_channel *channel;
+ int rssi;
+ struct cfg80211_bss *bss;
network_info = &last_scanned_shadow[i];
- if (!network_info->found || all) {
- s32 freq;
- struct ieee80211_channel *channel;
-
- if (network_info) {
- freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
- channel = ieee80211_get_channel(wiphy, freq);
-
- rssi = get_rssi_avg(network_info);
- if (memcmp("DIRECT-", network_info->ssid, 7) ||
- direct_scan) {
- bss = cfg80211_inform_bss(wiphy,
- channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- network_info->bssid,
- network_info->tsf_hi,
- network_info->cap_info,
- network_info->beacon_period,
- (const u8 *)network_info->ies,
- (size_t)network_info->ies_len,
- (s32)rssi * 100,
- GFP_KERNEL);
- cfg80211_put_bss(wiphy, bss);
- }
- }
- }
+ if (!memcmp("DIRECT-", network_info->ssid, 7) && !direct_scan)
+ continue;
+
+ freq = ieee80211_channel_to_frequency((s32)network_info->ch,
+ NL80211_BAND_2GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ rssi = get_rssi_avg(network_info);
+ bss = cfg80211_inform_bss(wiphy,
+ channel,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ network_info->bssid,
+ network_info->tsf_hi,
+ network_info->cap_info,
+ network_info->beacon_period,
+ (const u8 *)network_info->ies,
+ (size_t)network_info->ies_len,
+ (s32)rssi * 100,
+ GFP_KERNEL);
+ cfg80211_put_bss(wiphy, bss);
}
}
@@ -442,7 +433,7 @@ static void CfgScanResult(enum scan_event scan_event,
}
}
} else if (scan_event == SCAN_EVENT_DONE) {
- refresh_scan(priv, 1, false);
+ refresh_scan(priv, false);
mutex_lock(&priv->scan_req_lock);
@@ -466,7 +457,7 @@ static void CfgScanResult(enum scan_event scan_event,
};
update_scan_time();
- refresh_scan(priv, 1, false);
+ refresh_scan(priv, false);
cfg80211_scan_done(priv->pstrScanReq, &info);
priv->bCfgScanning = false;
@@ -540,7 +531,7 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
}
if (bNeedScanRefresh)
- refresh_scan(priv, 1, true);
+ refresh_scan(priv, true);
}
cfg80211_connect_result(dev, pstrConnectInfo->bssid,
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index c89bf4301096..7a36561a599e 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -227,8 +227,8 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc, int flag);
void wilc_netdev_cleanup(struct wilc *wilc);
-int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
- const struct wilc_hif_func *ops);
+int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
+ int gpio, const struct wilc_hif_func *ops);
void wilc1000_wlan_deinit(struct net_device *dev);
void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
int wilc_wlan_get_firmware(struct net_device *dev);
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 018db2299d0c..9837a591e7e3 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -413,8 +413,8 @@ struct hfa384x_join_request_data {
/*-- Configuration Record: authenticateStation (data portion only) --*/
struct hfa384x_authenticate_station_data {
u8 address[ETH_ALEN];
- u16 status;
- u16 algorithm;
+ __le16 status;
+ __le16 algorithm;
} __packed;
/*-- Configuration Record: WPAData (data portion only) --*/
@@ -445,9 +445,9 @@ struct hfa384x_downloadbuffer {
/*-- Information Record: commsquality --*/
struct hfa384x_commsquality {
- u16 cq_curr_bss;
- u16 asl_curr_bss;
- u16 anl_curr_fc;
+ __le16 cq_curr_bss;
+ __le16 asl_curr_bss;
+ __le16 anl_curr_fc;
} __packed;
/*-- Information Record: dmbcommsquality --*/
@@ -733,7 +733,7 @@ struct hfa384x_assoc_status {
struct hfa384x_auth_request {
u8 sta_addr[ETH_ALEN];
- u16 algorithm;
+ __le16 algorithm;
} __packed;
/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index e16da34389cd..250af0de9c3e 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1561,7 +1561,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
*/
ether_addr_copy(rec.address, inf->info.authreq.sta_addr);
- rec.status = P80211ENUM_status_unspec_failure;
+ rec.status = cpu_to_le16(P80211ENUM_status_unspec_failure);
/*
* Authenticate based on the access mode.
@@ -1578,7 +1578,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
for (i = 0; i < hw->authlist.cnt; i++)
if (ether_addr_equal(rec.address,
hw->authlist.addr[i])) {
- rec.status = P80211ENUM_status_successful;
+ rec.status = cpu_to_le16(P80211ENUM_status_successful);
break;
}
@@ -1590,7 +1590,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
* Allow all authentications.
*/
- rec.status = P80211ENUM_status_successful;
+ rec.status = cpu_to_le16(P80211ENUM_status_successful);
break;
case WLAN_ACCESS_ALLOW:
@@ -1615,7 +1615,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
for (i = 0; i < cnt; i++, addr += ETH_ALEN)
if (ether_addr_equal(rec.address, addr)) {
- rec.status = P80211ENUM_status_successful;
+ rec.status = cpu_to_le16(P80211ENUM_status_successful);
break;
}
@@ -1641,11 +1641,11 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
addr = hw->deny.addr1[0];
}
- rec.status = P80211ENUM_status_successful;
+ rec.status = cpu_to_le16(P80211ENUM_status_successful);
for (i = 0; i < cnt; i++, addr += ETH_ALEN)
if (ether_addr_equal(rec.address, addr)) {
- rec.status = P80211ENUM_status_unspec_failure;
+ rec.status = cpu_to_le16(P80211ENUM_status_unspec_failure);
break;
}
@@ -1663,7 +1663,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
added = 0;
- if (rec.status == P80211ENUM_status_successful) {
+ if (rec.status == cpu_to_le16(P80211ENUM_status_successful)) {
for (i = 0; i < hw->authlist.cnt; i++)
if (ether_addr_equal(rec.address,
hw->authlist.addr[i]))
@@ -1671,7 +1671,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
if (i >= hw->authlist.cnt) {
if (hw->authlist.cnt >= WLAN_AUTH_MAX) {
- rec.status = P80211ENUM_status_ap_full;
+ rec.status = cpu_to_le16(P80211ENUM_status_ap_full);
} else {
ether_addr_copy(
hw->authlist.addr[hw->authlist.cnt],
@@ -1688,7 +1688,6 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
* it was added.
*/
- rec.status = cpu_to_le16(rec.status);
rec.algorithm = inf->info.authreq.algorithm;
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_AUTHENTICATESTA,