summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcin Juszkiewicz <marcin.juszkiewicz@linaro.org>2017-09-08 07:17:09 +0100
committerMarcin Juszkiewicz <marcin.juszkiewicz@linaro.org>2017-09-08 07:17:09 +0100
commit77658ec40daba6344d0d54976dc9e08a5717d1cd (patch)
tree6c32a4d540b6b4b7d1e3a7dc468a0426d67c2d57
parentf26b4a32d483efc65cb0a327e2e73d6f3ddfc218 (diff)
Imported upstream version 17.05.217.05.x
-rw-r--r--app/test-crypto-perf/cperf_ops.c15
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c2
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c3
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c8
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c2
-rw-r--r--app/test-pmd/cmdline_flow.c36
-rw-r--r--app/test-pmd/config.c16
-rw-r--r--debian/changelog6
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst2
-rw-r--r--doc/guides/cryptodevs/qat.rst4
-rw-r--r--doc/guides/nics/i40e.rst10
-rw-r--r--doc/guides/nics/mlx5.rst20
-rw-r--r--doc/guides/rel_notes/release_17_05.rst119
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c3
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c11
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c24
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c58
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c12
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile4
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c4
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h24
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c24
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c6
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h26
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c14
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h1
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c2
-rw-r--r--drivers/event/octeontx/ssovf_worker.c1
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c4
-rw-r--r--drivers/net/ark/ark_ethdev.c2
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c55
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c29
-rw-r--r--drivers/net/e1000/e1000_ethdev.h2
-rw-r--r--drivers/net/e1000/igb_ethdev.c15
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h2
-rw-r--r--drivers/net/ena/ena_ethdev.c8
-rw-r--r--drivers/net/enic/enic_rxtx.c3
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c3
-rw-r--r--drivers/net/i40e/i40e_ethdev.c112
-rw-r--r--drivers/net/i40e/i40e_ethdev.h8
-rw-r--r--drivers/net/i40e/i40e_flow.c6
-rw-r--r--drivers/net/i40e/i40e_pf.c29
-rw-r--r--drivers/net/i40e/i40e_rxtx.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c11
-rw-r--r--drivers/net/mlx4/mlx4.c40
-rw-r--r--drivers/net/mlx4/mlx4_flow.c4
-rw-r--r--drivers/net/mlx5/mlx5.c2
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c28
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c142
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c83
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h12
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c16
-rw-r--r--drivers/net/qede/qede_ethdev.c5
-rw-r--r--drivers/net/sfc/sfc_port.c13
-rw-r--r--drivers/net/virtio/virtio_ethdev.c6
-rw-r--r--drivers/net/virtio/virtio_pci.c15
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c6
-rw-r--r--examples/l2fwd-crypto/main.c7
-rw-r--r--examples/l3fwd/l3fwd_em.c2
-rw-r--r--examples/qos_sched/main.h5
-rw-r--r--lib/librte_cmdline/cmdline_parse.c85
-rw-r--r--lib/librte_cmdline/cmdline_parse.h50
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c8
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h7
-rw-r--r--lib/librte_eal/bsdapp/contigmem/contigmem.c197
-rw-r--r--lib/librte_eal/common/eal_common_proc.c8
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h2
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/rte_malloc.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c9
-rw-r--r--lib/librte_ether/rte_ethdev.c1
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h6
-rw-r--r--lib/librte_ether/rte_ether_version.map1
-rw-r--r--lib/librte_eventdev/rte_eventdev.c2
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c13
-rw-r--r--lib/librte_mbuf/rte_mbuf.h5
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h4
-rw-r--r--lib/librte_mempool/rte_mempool.c2
-rw-r--r--lib/librte_metrics/rte_metrics.c2
-rw-r--r--lib/librte_metrics/rte_metrics.h3
-rw-r--r--lib/librte_ring/rte_ring.c3
-rw-r--r--lib/librte_ring/rte_ring.h4
-rw-r--r--lib/librte_vhost/vhost.c2
-rw-r--r--lib/librte_vhost/virtio_net.c26
-rw-r--r--pkg/dpdk.spec2
-rw-r--r--test/test/test_cryptodev.c3
-rw-r--r--test/test/test_cryptodev_perf.c15
-rw-r--r--test/test/test_link_bonding.c70
-rw-r--r--test/test/test_link_bonding_mode4.c8
-rw-r--r--test/test/test_link_bonding_rssconf.c2
95 files changed, 1087 insertions, 629 deletions
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index c2c3db5..17df2eb 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -151,14 +151,13 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
@@ -230,14 +229,13 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
@@ -308,14 +306,13 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index d172671..63ba37c 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -312,7 +312,7 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
&opts->min_buffer_size,
&opts->max_buffer_size);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ RTE_LOG(ERR, USER1, "failed to parse buffer size/s\n");
return -1;
}
opts->buffer_size_count = ret;
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index e61ac97..fd974c4 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -547,6 +547,7 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ rte_cryptodev_stop(ctx->dev_id);
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 61b27ea..73cee65 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -471,14 +471,14 @@ cperf_throughput_test_runner(void *test_ctx)
cycles_per_packet);
} else {
if (!only_once)
- printf("# lcore id, Buffer Size(B),"
+ printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
"Cycles/Buf\n\n");
only_once = 1;
- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%.f3;%.f3;%.f3\n",
+ printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.3f;%.3f;%.3f\n",
ctx->lcore_id,
ctx->options->test_buffer_size,
test_burst_size,
@@ -514,5 +514,7 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_throughput_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 454221e..a842f59 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -575,5 +575,7 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_verify_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 0fd69f9..4d47e79 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -220,7 +220,6 @@ struct context {
enum index prev; /**< Index of the last token seen. */
int next_num; /**< Number of entries in next[]. */
int args_num; /**< Number of entries in args[]. */
- uint32_t reparse:1; /**< Start over from the beginning. */
uint32_t eol:1; /**< EOL has been detected. */
uint32_t last:1; /**< No more arguments. */
uint16_t port; /**< Current port ID (for completions). */
@@ -1574,6 +1573,19 @@ arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
return len;
}
+/** Compare a string with a partial one of a given length. */
+static int
+strcmp_partial(const char *full, const char *partial, size_t partial_len)
+{
+ int r = strncmp(full, partial, partial_len);
+
+ if (r)
+ return r;
+ if (strlen(full) <= partial_len)
+ return 0;
+ return full[partial_len];
+}
+
/**
* Parse a prefix length and generate a bit-mask.
*
@@ -1656,7 +1668,7 @@ parse_default(struct context *ctx, const struct token *token,
(void)ctx;
(void)buf;
(void)size;
- if (strncmp(str, token->name, len))
+ if (strcmp_partial(token->name, str, len))
return -1;
return len;
}
@@ -1899,7 +1911,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (ctx->curr != ACTION_RSS_QUEUE)
return -1;
i = ctx->objdata >> 16;
- if (!strncmp(str, "end", len)) {
+ if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
return len;
}
@@ -2034,7 +2046,7 @@ parse_action(struct context *ctx, const struct token *token,
const struct parse_action_priv *priv;
token = &token_list[next_action[i]];
- if (strncmp(token->name, str, len))
+ if (strcmp_partial(token->name, str, len))
continue;
priv = token->priv;
if (!priv)
@@ -2374,7 +2386,7 @@ parse_boolean(struct context *ctx, const struct token *token,
if (!arg)
return -1;
for (i = 0; boolean_name[i]; ++i)
- if (!strncmp(str, boolean_name[i], len))
+ if (!strcmp_partial(boolean_name[i], str, len))
break;
/* Process token as integer. */
if (boolean_name[i])
@@ -2534,7 +2546,6 @@ cmd_flow_context_init(struct context *ctx)
ctx->prev = ZERO;
ctx->next_num = 0;
ctx->args_num = 0;
- ctx->reparse = 0;
ctx->eol = 0;
ctx->last = 0;
ctx->port = 0;
@@ -2555,9 +2566,6 @@ cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
int i;
(void)hdr;
- /* Restart as requested. */
- if (ctx->reparse)
- cmd_flow_context_init(ctx);
token = &token_list[ctx->curr];
/* Check argument length. */
ctx->eol = 0;
@@ -2633,8 +2641,6 @@ cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2668,8 +2674,6 @@ cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2704,8 +2708,6 @@ cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
const struct token *token = &token_list[ctx->prev];
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
if (!size)
return -1;
/* Set token type and update global help with details. */
@@ -2731,12 +2733,12 @@ static struct cmdline_token_hdr cmd_flow_token_hdr = {
/** Populate the next dynamic token. */
static void
cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
- cmdline_parse_token_hdr_t *(*hdrs)[])
+ cmdline_parse_token_hdr_t **hdr_inst)
{
struct context *ctx = &cmd_flow_context;
/* Always reinitialize context before requesting the first token. */
- if (!(hdr - *hdrs))
+ if (!(hdr_inst - cmd_flow.tokens))
cmd_flow_context_init(ctx);
/* Return NULL when no more tokens are expected. */
if (!ctx->next_num && ctx->curr) {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 83a8f52..1c20661 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -979,8 +979,10 @@ static void
flow_item_spec_size(const struct rte_flow_item *item,
size_t *size, size_t *pad)
{
- if (!item->spec)
+ if (!item->spec) {
+ *size = 0;
goto empty;
+ }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
@@ -992,10 +994,10 @@ flow_item_spec_size(const struct rte_flow_item *item,
spec.raw->length * sizeof(*spec.raw->pattern);
break;
default:
-empty:
- *size = 0;
+ *size = flow_item[item->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
@@ -1030,8 +1032,10 @@ static void
flow_action_conf_size(const struct rte_flow_action *action,
size_t *size, size_t *pad)
{
- if (!action->conf)
+ if (!action->conf) {
+ *size = 0;
goto empty;
+ }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
@@ -1043,10 +1047,10 @@ flow_action_conf_size(const struct rte_flow_action *action,
conf.rss->num * sizeof(*conf.rss->queue);
break;
default:
-empty:
- *size = 0;
+ *size = flow_action[action->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
diff --git a/debian/changelog b/debian/changelog
index a0396a5..e32b30d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+dpdk (17.05.2-0linaro1) UNRELEASED; urgency=medium
+
+ * New upstream release 17.05.2.
+
+ -- Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Fri, 08 Sep 2017 06:14:39 +0100
+
dpdk (17.05.1-1~git1.1) UNRELEASED; urgency=medium
* To ensure backward compatibility symlink aarch64 headers in /usr/include/dpdk.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index ecb52a1..42905c1 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -68,7 +68,7 @@ Limitations
* Chained mbufs are not supported.
* Only in-place is currently supported (destination address is the same as source address).
-* Only supports session-oriented API implementation (session-less APIs are not supported).
+
Installation
------------
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 21b56fc..7ab5959 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -84,8 +84,8 @@ Limitations
* Hash only is not supported except SNOW 3G UIA2 and KASUMI F9.
* Only supports the session-oriented API implementation (session-less APIs are not supported).
-* SNOW 3G (UEA2) and KASUMI (F8) supported only if cipher length, cipher offset fields are byte-aligned.
-* SNOW 3G (UIA2) and KASUMI (F9) supported only if hash length, hash offset fields are byte-aligned.
+* SNOW 3G (UEA2), KASUMI (F8) and ZUC (EEA3) supported only if cipher length and offset fields are byte-multiple.
+* SNOW 3G (UIA2), KASUMI (F9) and ZUC (EIA3) supported only if hash length and offset fields are byte-multiple.
* No BSD support as BSD QAT kernel driver not available.
* ZUC EEA3/EIA3 is not supported by dh895xcc devices
* Maximum additional authenticated data (AAD) for GCM is 240 bytes long.
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 4d3c7ca..387a588 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -404,16 +404,6 @@ is used as the VF driver, DPDK cannot choose 16 byte receive descriptor. That
is to say, user should keep ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n`` in
config file.
-Link down with i40e kernel driver after DPDK application exit
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-After DPDK application quit, and the device is bound back to Linux i40e
-kernel driver, the link cannot be up after ``ifconfig <dev> up``.
-To work around this issue, ``ethtool -s <dev> autoneg on`` should be
-set first and then the link can be brought up through ``ifconfig <dev> up``.
-
-NOTE: requires Linux kernel i40e driver version >= 1.4.X
-
Receive packets with Ethertype 0x88A8
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index da6dc27..a68b7ad 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -330,6 +330,26 @@ Supported NICs
* Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
* Mellanox(R) ConnectX(R)-5 Ex EN 100G MCX516A-CDAT (2x100G)
+Known issues
+------------
+
+* **Flow pattern without any specific vlan will match for vlan packets as well.**
+
+ When VLAN spec is not specified in the pattern, the matching rule will be created with VLAN as a wild card.
+ Meaning, the flow rule::
+
+ flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
+
+ Will only match vlan packets with vid=3. and the flow rules::
+
+ flow create 0 ingress pattern eth / ipv4 / end ...
+
+ Or::
+
+ flow create 0 ingress pattern eth / vlan / ipv4 / end ...
+
+ Will match any ipv4 packet (VLAN included).
+
Notes for testpmd
-----------------
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index ef6211b..ef9251c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -873,3 +873,122 @@ Fixes in 17.05 Stable Release
* vhost: fix crash on NUMA
* vhost: fix guest pages memory leak
* vhost: fix malloc size too small
+
+17.05.2
+~~~~~~~
+
+* app/crypto-perf: fix CSV output
+* app/crypto-perf: fix digest data for chained mbufs
+* app/crypto-perf: fix error message
+* app/crypto-perf: stop crypto devices after test
+* app/testpmd: fix flow rule copy functions
+* app/testpmd: fix token matching in flow command
+* bus/fslmc: fix the failure loop condition
+* cmdline: fix dynamic tokens initialization
+* cmdline: fix dynamic tokens interface
+* contigmem: do not zero pages during each mmap
+* contigmem: free allocated memory on error
+* crypto/aesni_mb: fix HMAC supported key sizes
+* crypto/aesni_mb: fix possible crypto job leak
+* crypto/aesni_mb: fix zero burst dequeue
+* crypto/aesni_mb: remove assert checks
+* crypto/armv8: fix authentication session configuration
+* crypto/armv8: fix HMAC supported key sizes
+* cryptodev: fix device stop function
+* cryptodev: rename device retrieval argument
+* crypto/dpaa2_sec: fix build with gcc 7.1
+* crypto/dpaa2_sec: fix free usage for dpsec
+* crypto/dpaa2_sec: fix HMAC supported key sizes
+* crypto/dpaa2_sec: fix the return of supported API
+* crypto/openssl: fix HMAC supported key sizes
+* crypto/qat: fix HMAC supported key sizes
+* crypto/qat: fix NULL authentication hang
+* crypto/qat: fix SHA384-HMAC block size
+* crypto/scheduler: fix slave name parsing
+* crypto/scheduler: fix strings not null terminated
+* doc: add missing algorithm in limitations for QAT
+* doc: add VLAN flow limitation on mlx5 PMD
+* doc: remove incorrect limitation on AESNI-MB PMD
+* eal: fix config file path when checking process
+* ethdev: add missing symbol in map
+* ethdev: fix build with gcc 5.4.0
+* ethdev: fix secondary process crash on unused virtio
+* eventdev: fix memory realloc check in port config
+* event/octeontx: fix missing enqueue SMP barrier
+* examples/l2fwd-crypto: fix application help
+* examples/l2fwd-crypto: fix auth info display
+* examples/l2fwd-crypto: fix option parsing
+* examples/l3fwd: fix IPv6 packet type parse
+* examples/qos_sched: fix build for less lcores
+* hash: fix lock release on add
+* ip_frag: free mbufs on reassembly table destroy
+* mbuf: fix debug checks for headroom and tailroom
+* mbuf: fix doxygen comment of bulk alloc
+* mbuf: fix VXLAN port in comment
+* mem: do not advertise physical address when no hugepages
+* mempool/dpaa2: fix error code for allocation failure
+* mempool/dpaa2: fix freeing bp list
+* metrics: fix name string termination
+* net/ark: fix stats reset
+* net/bnxt: check invalid L2 filter id
+* net/bnxt: fix autoneg on 10GBase-T links
+* net/bnxt: fix get link config
+* net/bnxt: fix set link config
+* net/bnxt: fix set link config
+* net/bnxt: free filter before reusing it
+* net/bonding: change link status check to no-wait
+* net/bonding: fix number of bonding Tx/Rx queues
+* net/bonding: fix when NTT flag updated
+* net/e1000: fix LSC interrupt
+* net/ena/base: initialize memory in the allocation macros
+* net/ena: fix cleanup of the Tx bufs
+* net/enic: fix crash when freeing 0 packet to mempool
+* net/fm10k: initialize link status in device start
+* net/i40e: fix division by 0
+* net/i40e: fix ethertype filter for new FW
+* net/i40e: fix incorrect PF Rx bytes
+* net/i40e: fix link down and negotiation
+* net/i40e: fix LSC interrupt
+* net/i40e: fix parsing QinQ pattern
+* net/i40e: fix PF notify when VF is not up
+* net/i40e: fix Rx data segment buffer length
+* net/i40e: fix VF Tx bytes
+* net/i40e: revert fix of PF notify when VF not up
+* net/igb: fix flex filter length
+* net/ixgbe: fix LSC interrupt
+* net/ixgbe: fix mask flag on flow rule creation
+* net/ixgbe: fix mirror rule index overflow
+* net/ixgbe: fix Rx/Tx queue interrupt for x550 devices
+* net/mlx4: fix assertion failure on link update
+* net/mlx4: fix flow creation before start
+* net/mlx4: fix mbuf poisoning in debug code
+* net/mlx4: fix probe failure report
+* net/mlx5: fix inconsistent link status query
+* net/mlx5: fix misplaced Rx interrupts functions
+* net/mlx5: fix missing packet type calculation
+* net/mlx5: fix return value in Rx interrupts code
+* net/mlx5: fix Rx interrupts management
+* net/mlx5: fix Rx interrupts support checks
+* net/mlx5: fix TSO segment size
+* net/qede: fix chip details print
+* net/sfc: request MAC stats upload immediately on port start
+* net/virtio: fix MAC address read
+* net/virtio: fix Rx interrupt setup
+* net/virtio-user: fix crash when detaching device
+* net/vmxnet3: fix filtering on promiscuous disabling
+* net/vmxnet3: fix receive queue memory leak
+* Revert "ip_frag: free mbufs on reassembly table destroy"
+* Revert "net/i40e: revert fix of PF notify when VF not up"
+* ring: fix return value for dequeue
+* ring: use aligned memzone allocation
+* test/bonding: fix device name
+* test/bonding: fix memory corruptions
+* test/bonding: fix mode 4 names
+* test/bonding: fix namespace of the RSS tests
+* test/bonding: fix parameters of a balance Tx
+* test/crypto: fix overflow
+* test/crypto: fix wrong AAD setting
+* vhost: fix checking of device features
+* vhost: fix IP checksum
+* vhost: fix MTU device feature check
+* vhost: fix TCP checksum
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index a1a58b9..31c8c4c 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -437,8 +437,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
}
return 0;
fail:
- i -= 1;
- while (i >= 0)
+ while (--i >= 0)
rte_free(q_storage->dq_storage[i]);
return -1;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 45b25c9..06a6435 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -494,8 +494,6 @@ static inline void
verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
- RTE_ASSERT(m_dst == NULL);
-
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
@@ -522,8 +520,6 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
struct aesni_mb_session *sess;
- RTE_ASSERT(op == NULL);
-
if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
switch (job->status) {
case STS_COMPLETED:
@@ -569,7 +565,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job != NULL && processed_jobs < nb_ops) {
+ while (job != NULL) {
op = post_process_mb_job(qp, job);
if (op) {
@@ -579,6 +575,8 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
qp->stats.dequeue_err_count++;
break;
}
+ if (processed_jobs == nb_ops)
+ break;
job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
@@ -624,6 +622,9 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
int retval, processed_jobs = 0;
+ if (unlikely(nb_ops == 0))
+ return 0;
+
do {
/* Get next operation to process from ingress queue */
retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d1bc28e..cb2893a 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
@@ -69,9 +69,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
@@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 14,
@@ -111,9 +111,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
@@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 24,
@@ -153,9 +153,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 3d603a5..8371245 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -288,27 +288,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 160 bits
- * the algorithm will use SHA1(key) instead.
- */
- error = sha1_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA1_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
@@ -334,27 +321,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 256 bits
- * the algorithm will use SHA256(key) instead.
- */
- error = sha256_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA256_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 4d9ccbf..d171069 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -50,9 +50,9 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 20,
@@ -71,9 +71,9 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 32,
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index b75107f..34ab9de 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -192,8 +192,8 @@ struct armv8_crypto_session {
uint8_t o_key_pad[SHA_BLOCK_MAX]
__rte_cache_aligned;
/**< outer pad (max supported block length) */
- uint8_t key[SHA_AUTH_KEY_MAX];
- /**< HMAC key (max supported length)*/
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
} hmac;
};
} auth;
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index 11c7c78..067f022 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -46,6 +46,10 @@ CFLAGS += $(WERROR_FLAGS)
endif
CFLAGS += -D _GNU_SOURCE
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+CFLAGS += -Wno-implicit-fallthrough
+endif
+
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 4e01fe8..18a141a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1263,7 +1263,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
{
PMD_INIT_FUNC_TRACE();
- return -ENOTSUP;
+ return 0;
}
static int
@@ -1366,7 +1366,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Free the allocated memory for ethernet private data and dpseci*/
priv->hw = NULL;
- free(dpseci);
+ rte_free(dpseci);
return 0;
}
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f5c6169..e02bf72 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -204,9 +204,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
@@ -225,9 +225,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
@@ -246,9 +246,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
@@ -267,9 +267,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
@@ -288,9 +288,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
@@ -309,9 +309,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 22a6873..d6f1ce3 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
@@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
@@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
@@ -174,9 +174,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
@@ -216,9 +216,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
@@ -258,9 +258,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 154e1dd..10b25ff 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -121,6 +121,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -868,6 +871,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 1294f24..42ede7b 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -43,9 +43,9 @@
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 20, \
@@ -64,9 +64,9 @@
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 28, \
@@ -85,9 +85,9 @@
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 32, \
@@ -104,11 +104,11 @@
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
{.auth = { \
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
- .block_size = 64, \
+ .block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 48, \
@@ -127,9 +127,9 @@
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
.block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 64, \
@@ -148,9 +148,9 @@
.algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 8, \
+ .min = 1, \
.max = 64, \
- .increment = 8 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 16, \
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 319dcf0..88f8fc0 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -467,8 +467,22 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx = dev->data->dev_private;
+ if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", scheduler->name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+ return -EINVAL;
+ }
strncpy(sched_ctx->name, scheduler->name,
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ if (strlen(scheduler->description) >
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid description %s, should be less than "
+ "%u bytes.\n", scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
+ return -EINVAL;
+ }
strncpy(sched_ctx->description, scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 2ba6e47..b250a58 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -116,6 +116,7 @@ struct rte_cryptodev_scheduler;
* - 0 if the scheduler is successfully loaded
* - -ENOTSUP if the operation is not supported.
* - -EBUSY if device is started.
+ * - -EINVAL if input values are invalid.
*/
int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 0b63c20..8188484 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -277,7 +277,7 @@ parse_slave_arg(const char *key __rte_unused,
{
struct scheduler_init_params *param = extra_args;
- if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
CS_LOG_ERR("Too many slaves.\n");
return -ENOMEM;
}
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ad3fe68..74e8901 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -179,6 +179,7 @@ ssows_enq(void *port, const struct rte_event *ev)
switch (ev->op) {
case RTE_EVENT_OP_NEW:
+ rte_smp_wmb();
ssows_new_event(ws, ev);
break;
case RTE_EVENT_OP_FORWARD:
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 5a5d6aa..62a2d25 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -161,7 +161,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
while (temp) {
if (temp == bp) {
prev->next = temp->next;
- free(bp);
+ rte_free(bp);
break;
}
prev = temp;
@@ -294,7 +294,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
bp_info->meta_data_size, n);
- return ret;
+ return -ENOBUFS;
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 017817e..ac25c1d 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -824,7 +824,7 @@ eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
for (i = 0; i < dev->data->nb_tx_queues; i++)
- eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
if (ark->user_ext.stats_reset)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d898723..f64cb80 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -196,6 +196,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (filter->fw_l2_filter_id == UINT64_MAX)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -218,6 +221,9 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ if (filter->fw_l2_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_filter(bp, filter);
+
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
req.flags = rte_cpu_to_le_32(filter->flags);
@@ -478,6 +484,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ uint32_t link_speed_mask =
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
@@ -489,14 +497,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
* any auto mode, even "none".
*/
if (!conf->link_speed) {
- req.auto_mode |= conf->auto_mode;
- enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
- req.auto_link_speed_mask = conf->auto_link_speed_mask;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- req.auto_link_speed = bp->link_info.auto_link_speed;
- enables |=
+ req.auto_mode = conf->auto_mode;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ if (conf->auto_mode ==
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |= link_speed_mask;
+ }
+ if (bp->link_info.auto_link_speed) {
+ req.auto_link_speed =
+ bp->link_info.auto_link_speed;
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+ }
}
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
@@ -536,13 +550,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
- link_info->link_up = 1;
- link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
- } else {
- link_info->link_up = 0;
- link_info->link_speed = 0;
- }
+ link_info->link_up =
+ (link_info->phy_link_status ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
link_info->duplex = resp->duplex;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
@@ -1337,12 +1348,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
return 0;
}
-static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+static uint16_t
+bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (bp->link_info.support_speeds)
+ return bp->link_info.support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
+ }
if (link_speed & ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
@@ -1434,7 +1449,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
"Get link config failed with rc %d\n", rc);
goto exit;
}
- if (link_info->link_up)
+ if (link_info->link_speed)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
@@ -1443,7 +1458,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+ ETH_LINK_FIXED : ETH_LINK_AUTONEG;
exit:
return rc;
}
@@ -1476,7 +1491,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
link_req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
- bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
+ bnxt_parse_eth_link_speed_mask(bp,
+ dev_conf->link_speeds);
} else {
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
@@ -1493,7 +1509,6 @@ port_phy_cfg:
"Set link config failed with rc %d\n", rc);
}
- rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 7b863d6..6f7e985 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -435,7 +435,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
* In other case (was fast and now it is slow) just switch
* timeout to slow without forcing send of LACP (because standard
* say so)*/
- if (!is_partner_fast)
+ if (is_partner_fast)
SM_FLAG_SET(port, NTT);
} else
return; /* Nothing changed */
@@ -758,7 +758,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
uint16_t key;
slave_id = internals->active_slaves[i];
- rte_eth_link_get(slave_id, &link_info);
+ rte_eth_link_get_nowait(slave_id, &link_info);
rte_eth_macaddr_get(slave_id, &slave_addr);
if (link_info.link_status != 0) {
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 82959ab..4f3b5be 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -654,7 +654,7 @@ bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
{
struct rte_eth_link link_status;
- rte_eth_link_get(port_id, &link_status);
+ rte_eth_link_get_nowait(port_id, &link_status);
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
@@ -1690,6 +1690,8 @@ static void
bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint16_t max_nb_rx_queues = UINT16_MAX;
+ uint16_t max_nb_tx_queues = UINT16_MAX;
dev_info->max_mac_addrs = 1;
@@ -1697,8 +1699,29 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
? internals->candidate_max_rx_pktlen
: ETHER_MAX_JUMBO_FRAME_LEN;
- dev_info->max_rx_queues = (uint16_t)128;
- dev_info->max_tx_queues = (uint16_t)512;
+ if (internals->slave_count > 0) {
+ /* Max number of tx/rx queues that the bonded device can
+ * support is the minimum values of the bonded slaves, as
+ * all slaves must be capable of supporting the same number
+ * of tx/rx queues.
+ */
+ struct rte_eth_dev_info slave_info;
+ uint8_t idx;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (slave_info.max_rx_queues < max_nb_rx_queues)
+ max_nb_rx_queues = slave_info.max_rx_queues;
+
+ if (slave_info.max_tx_queues < max_nb_tx_queues)
+ max_nb_tx_queues = slave_info.max_tx_queues;
+ }
+ }
+
+ dev_info->max_rx_queues = max_nb_rx_queues;
+ dev_info->max_tx_queues = max_nb_tx_queues;
dev_info->min_rx_bufsize = 0;
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 8352d0a..9f1c6e7 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -82,7 +82,7 @@
#define E1000_MAX_FLEX_FILTER_DWDS \
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
#define E1000_FLEX_FILTERS_MASK_SIZE \
- (E1000_MAX_FLEX_FILTER_DWDS / 4)
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index d18dd48..3474793 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -138,7 +138,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
@@ -1417,7 +1417,9 @@ eth_igb_start(struct rte_eth_dev *dev)
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- eth_igb_lsc_interrupt_setup(dev);
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
} else {
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler,
@@ -2716,18 +2718,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- intr->mask |= E1000_ICR_LSC;
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
return 0;
}
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 7eaebf4..71a8c1e 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -207,6 +207,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
phys = mz->phys_addr; \
} while (0)
@@ -219,6 +220,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
} while (0)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 806073c..ec1573f 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -689,11 +689,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
{
- unsigned int ring_mask = ring->ring_size - 1;
+ unsigned int i;
- while (ring->next_to_clean != ring->next_to_use) {
- struct ena_tx_buffer *tx_buf =
- &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
@@ -1772,6 +1771,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Free whole mbuf chain */
mbuf = tx_info->mbuf;
rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index ba0cfd0..2862205 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -491,7 +491,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
tail_idx = enic_ring_incr(desc_count, tail_idx);
}
- rte_mempool_put_bulk(pool, (void **)free, nb_free);
+ if (nb_free > 0)
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
wq->tail_idx = tail_idx;
wq->ring.desc_avail += nb_to_free;
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7363def..0519a08 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -84,6 +84,7 @@ static void fm10k_rx_queue_release(void *queue);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -1166,6 +1167,8 @@ fm10k_dev_start(struct rte_eth_dev *dev)
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+ fm10k_link_update(dev, 0);
+
return 0;
}
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fd7d347..223f2d8 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1806,11 +1806,15 @@ i40e_parse_link_speeds(uint16_t link_speeds)
static int
i40e_phy_conf_link(struct i40e_hw *hw,
uint8_t abilities,
- uint8_t force_speed)
+ uint8_t force_speed,
+ bool is_up)
{
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint32_t phy_type_mask = 0;
+
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
@@ -1828,6 +1832,10 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (status)
return ret;
+ /* If link already up, no need to set up again */
+ if (is_up && phy_ab.phy_type != 0)
+ return I40E_SUCCESS;
+
memset(&phy_conf, 0, sizeof(phy_conf));
/* bits 0-2 use the values from get_phy_abilities_resp */
@@ -1838,13 +1846,21 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (abilities & I40E_AQ_PHY_AN_ENABLED)
phy_conf.link_speed = advt;
else
- phy_conf.link_speed = force_speed;
+ phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
phy_conf.abilities = abilities;
+
+
+ /* To enable link, phy_type mask needs to include each type */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ phy_type_mask |= 1 << cnt;
+
/* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
@@ -1876,13 +1892,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
- /* Skip changing speed on 40G interfaces, FW does not support */
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
- speed = I40E_LINK_SPEED_UNKNOWN;
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- }
-
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, true);
}
static int
@@ -2008,7 +2018,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO,
"lsc won't enable because of no intr multiplex");
- } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ } else {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL |
@@ -2016,7 +2026,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable LSE */
+ /* Call get_link_info aq commond to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
@@ -2225,7 +2235,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, false);
}
int
@@ -2352,9 +2362,6 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
- /* exclude CRC bytes */
- nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
- nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2390,14 +2397,35 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
I40E_GLV_GORCL(hw->port),
pf->offset_loaded,
- &pf->internal_rx_bytes_offset,
- &pf->internal_rx_bytes);
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
I40E_GLV_GOTCL(hw->port),
pf->offset_loaded,
- &pf->internal_tx_bytes_offset,
- &pf->internal_tx_bytes);
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes);
+ /* Get total internal rx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
+ I40E_GLV_UPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_unicast,
+ &pf->internal_stats.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
+ I40E_GLV_MPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_multicast,
+ &pf->internal_stats.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
+ I40E_GLV_BPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_broadcast,
+ &pf->internal_stats.rx_broadcast);
+
+ /* exclude CRC size */
+ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
+ pf->internal_stats.rx_multicast +
+ pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
@@ -2420,7 +2448,17 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ * value.
+ */
+ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
+ ns->eth.rx_bytes = 0;
+ /* exlude internal rx bytes */
+ else
+ ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2448,7 +2486,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal tx bytes */
+ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
+ ns->eth.tx_bytes = 0;
+ else
+ ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -4284,6 +4329,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
if (enabled_tcmap & (1 << i))
total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
vsi->enabled_tc = enabled_tcmap;
/* Number of queues per enabled TC */
@@ -5216,10 +5263,8 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
- pf->internal_rx_bytes = 0;
- pf->internal_tx_bytes = 0;
- pf->internal_rx_bytes_offset = 0;
- pf->internal_tx_bytes_offset = 0;
+ memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
@@ -9150,8 +9195,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
*/
/* For both X710 and XL710 */
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
@@ -9203,8 +9249,12 @@ i40e_configure_registers(struct i40e_hw *hw)
reg_table[i].val =
I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
else /* For X710/XL710/XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
}
if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0d963c..6332b01 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -639,11 +639,9 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
- /* internal packet byte count, it should be excluded from the total */
- uint64_t internal_rx_bytes;
- uint64_t internal_tx_bytes;
- uint64_t internal_rx_bytes_offset;
- uint64_t internal_tx_bytes_offset;
+ /* internal packet statistics, it should be excluded from the total */
+ struct i40e_eth_stats internal_stats_offset;
+ struct i40e_eth_stats internal_stats;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 24e1c65..c1dac38 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1821,8 +1821,10 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Get filter specification */
- if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
- (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask != NULL) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
& I40E_TCI_MASK;
filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 0758503..65d93a8 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -152,22 +152,22 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
I40E_WRITE_FLUSH(hw);
- }
#define VFRESET_MAX_WAIT_CNT 100
- /* Wait until VF reset is done */
- for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
- rte_delay_us(10);
- val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
- if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
- break;
- }
+ /* Wait until VF reset is done */
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(10);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
+ if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
- if (i >= VFRESET_MAX_WAIT_CNT) {
- PMD_DRV_LOG(ERR, "VF reset timeout");
- return -ETIMEDOUT;
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "VF reset timeout");
+ return -ETIMEDOUT;
+ }
+ vf->state = I40E_VF_ACTIVE;
}
-
/* This is not first time to do reset, do cleanup job first */
if (vf->vsi) {
/* Disable queues */
@@ -262,7 +262,10 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
- int ret;
+ int ret = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ if (vf->state == I40E_VF_INACTIVE)
+ return ret;
ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
msg, msglen, NULL);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 351cb94..07b09b2 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2474,7 +2474,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index aeaa432..626c5ee 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -240,7 +240,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -2672,7 +2672,9 @@ skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
@@ -3916,19 +3918,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
ixgbe_dev_link_status_print(dev);
- intr->mask |= IXGBE_EICR_LSC;
+ if (on)
+ intr->mask |= IXGBE_EICR_LSC;
+ else
+ intr->mask &= ~IXGBE_EICR_LSC;
return 0;
}
@@ -5316,6 +5323,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
@@ -5473,7 +5483,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
tmp |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
if (direction == -1) {
/* other causes */
idx = ((queue & 1) * 8);
@@ -5581,6 +5592,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 9aeb71e..3a5f979 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -2414,6 +2414,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
if (!flow) {
@@ -2510,6 +2511,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
goto out;
fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
} else {
/**
* Only support one global mask,
@@ -2540,8 +2542,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
return flow;
}
- if (ret)
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
goto out;
+ }
}
goto out;
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index ec4419a..02e5a1f 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -2987,6 +2987,13 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NB_SEGS(rep) = 0x2a;
PORT(rep) = 0x2a;
rep->ol_flags = -1;
+ /*
+ * Clear special flags in mbuf to avoid
+ * crashing while freeing.
+ */
+ rep->ol_flags &=
+ ~(uint64_t)(IND_ATTACHED_MBUF |
+ CTRL_MBUF_FLAG);
#endif
assert(rep->buf_len == seg->buf_len);
/* Reconfigure sge to use rep instead of seg. */
@@ -5308,6 +5315,7 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
{
struct ibv_async_event event;
int port_change = 0;
+ struct rte_eth_link *link = &dev->data->dev_link;
int ret = 0;
*events = 0;
@@ -5329,22 +5337,20 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
-
- if (port_change ^ priv->pending_alarm) {
- struct rte_eth_link *link = &dev->data->dev_link;
-
- priv->pending_alarm = 0;
- mlx4_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
+ if (!port_change)
+ return ret;
+ mlx4_link_update(dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ if (!priv->pending_alarm) {
/* Inconsistent status, check again later. */
priv->pending_alarm = 1;
rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
mlx4_dev_link_status_handler,
dev);
- } else {
- *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
+ } else {
+ *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
return ret;
}
@@ -5365,6 +5371,7 @@ mlx4_dev_link_status_handler(void *arg)
priv_lock(priv);
assert(priv->pending_alarm == 1);
+ priv->pending_alarm = 0;
ret = priv_dev_status_handler(priv, dev, &events);
priv_unlock(priv);
if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
@@ -5760,12 +5767,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr))
+ if (ibv_query_device(attr_ctx, &device_attr)) {
+ err = ENODEV;
goto error;
+ }
INFO("%u port(s) detected", device_attr.phys_port_cnt);
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
@@ -5799,19 +5809,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
- if (ctx == NULL)
+ if (ctx == NULL) {
+ err = ENODEV;
goto port_error;
+ }
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
ERROR("port query failed: %s", strerror(err));
+ err = ENODEV;
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
+ err = EINVAL;
goto port_error;
}
@@ -5848,6 +5862,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
+ err = ENODEV;
goto port_error;
}
#ifdef RSS_SUPPORT
@@ -5923,6 +5938,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
" (errno: %s)", strerror(errno));
+ err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index edfac03..b9a024b 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -829,7 +829,7 @@ priv_flow_create_action_queue(struct priv *priv,
return NULL;
}
if (action->drop) {
- qp = priv->flow_drop_queue->qp;
+ qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
} else {
struct rxq *rxq = (*priv->rxqs)[action->queue_id];
@@ -837,6 +837,8 @@ priv_flow_create_action_queue(struct priv *priv,
rte_flow->qp = qp;
}
rte_flow->ibv_attr = ibv_attr;
+ if (!priv->started)
+ return rte_flow;
rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index bcb2c1b..49d4dba 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -246,8 +246,10 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+#ifdef HAVE_UPDATE_CQ_CI
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
+#endif
};
static struct {
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 3fd22cb..27ec3ef 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -119,6 +119,7 @@ struct ethtool_link_settings {
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
+#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
@@ -806,9 +807,12 @@ static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- struct ethtool_link_settings edata = {
- .cmd = ETHTOOL_GLINKSETTINGS,
- };
+ __extension__ struct {
+ struct ethtool_link_settings edata;
+ uint32_t link_mode_data[3 *
+ ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
+ } ecmd;
+
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
@@ -821,15 +825,23 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
+ memset(&ecmd, 0, sizeof(ecmd));
+ ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
+ ifr.ifr_data = (void *)&ecmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = edata.speed;
- sc = edata.link_mode_masks[0] |
- ((uint64_t)edata.link_mode_masks[1] << 32);
+ ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ dev_link.link_speed = ecmd.edata.speed;
+ sc = ecmd.edata.link_mode_masks[0] |
+ ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -865,7 +877,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 2a26839..5aa8121 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -978,10 +978,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (dev->data->dev_conf.intr_conf.rxq) {
tmpl.channel = ibv_create_comp_channel(priv->ctx);
if (tmpl.channel == NULL) {
- dev->data->dev_conf.intr_conf.rxq = 0;
ret = ENOMEM;
- ERROR("%p: Comp Channel creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(ret));
goto error;
}
}
@@ -1310,111 +1310,159 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
}
/**
- * Fill epoll fd list for rxq interrupts.
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_intr_efd_enable(struct priv *priv)
+priv_rx_intr_vec_enable(struct priv *priv)
{
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
- if (n == 0)
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
- if (n < rxqs_n) {
- WARN("rxqs num is larger than EAL max interrupt vector "
- "%u > %u unable to supprt rxq interrupts",
- rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- return -EINVAL;
+ priv_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -ENOMEM;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
- int fd = rxq_ctrl->channel->fd;
+ int fd;
int flags;
int rc;
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq_ctrl->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ priv_rx_intr_vec_disable(priv);
+ return -1;
+ }
+ fd = rxq_ctrl->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
- WARN("failed to change rxq interrupt file "
- "descriptor %d for queue index %d", fd, i);
+ ERROR("failed to make Rx interrupt file descriptor"
+ " %d non-blocking for queue index %d", fd, i);
+ priv_rx_intr_vec_disable(priv);
return -1;
}
- intr_handle->efds[i] = fd;
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
}
- intr_handle->nb_efd = n;
+ if (!count)
+ priv_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
return 0;
}
/**
- * Clean epoll fd list for rxq interrupts.
+ * Clean up Rx interrupts handler.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*/
void
-priv_intr_efd_disable(struct priv *priv)
+priv_rx_intr_vec_disable(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
}
+#ifdef HAVE_UPDATE_CQ_CI
+
/**
- * Create and init interrupt vector array.
+ * DPDK callback for Rx queue interrupt enable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_create_intr_vec(struct priv *priv)
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- unsigned int rxqs_n = priv->rxqs_n;
- unsigned int i;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ int ret;
- if (rxqs_n == 0)
- return 0;
- intr_handle->intr_vec = (int *)
- rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
- WARN("Failed to allocate memory for intr_vec "
- "rxq interrupt will not be supported");
- return -ENOMEM;
- }
- for (i = 0; i != rxqs_n; ++i) {
- /* 1:1 mapping between rxq and interrupt. */
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);
+ ret = ibv_req_notify_cq(rxq_ctrl->cq, 0);
}
- return 0;
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
+ return -ret;
}
/**
- * Destroy init interrupt vector array.
+ * DPDK callback for Rx queue interrupt disable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
-void
-priv_destroy_intr_vec(struct priv *priv)
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
- rte_free(intr_handle->intr_vec);
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_ctrl->cq)
+ ret = EINVAL;
+ }
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ rx_queue_id);
+ else
+ ibv_ack_cq_events(rxq_ctrl->cq, 1);
+ return -ret;
}
+
+#endif /* HAVE_UPDATE_CQ_CI */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index de6e0fa..c416009 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -533,6 +533,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ehdr;
uint8_t cs_flags = 0;
uint64_t tso = 0;
+ uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
@@ -628,6 +629,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
tso_header_sz = buf->l2_len + vlan_sz +
buf->l3_len + buf->l4_len;
+ tso_segsz = buf->tso_segsz;
if (is_tunneled && txq->tunnel_en) {
tso_header_sz += buf->outer_l2_len +
@@ -827,7 +829,7 @@ next_pkt:
};
wqe->eseg = (rte_v128u32_t){
0,
- cs_flags | (htons(buf->tso_segsz) << 16),
+ cs_flags | (htons(tso_segsz) << 16),
0,
(ehdr << 16) | htons(tso_header_sz),
};
@@ -2018,7 +2020,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt = seg;
assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
- pkt->packet_type = 0;
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
pkt->ol_flags = 0;
if (rss_hash_res && rxq->rss_hash) {
pkt->hash.rss = rss_hash_res;
@@ -2036,10 +2038,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mlx5_flow_mark_get(mark);
}
}
- if (rxq->csum | rxq->csum_l2tun) {
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ if (rxq->csum | rxq->csum_l2tun)
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
- }
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
htons(MLX5_CQE_VLAN_STRIPPED))) {
@@ -2150,76 +2150,3 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
(void)pkts_n;
return 0;
}
-
-/**
- * DPDK callback for rx queue interrupt enable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- uint16_t ci = rxq->cq_ci;
- int ret = 0;
-
- ibv_mlx5_exp_update_cq_ci(cq, ci);
- ret = ibv_req_notify_cq(cq, 0);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return ret;
-}
-
-/**
- * DPDK callback for rx queue interrupt disable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- struct ibv_cq *ev_cq;
- void *ev_ctx;
- int ret = 0;
-
- ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != cq)
- ret = -1;
- else
- ibv_ack_cq_events(cq, 1);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return ret;
-}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 8db8eb1..450a569 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -298,10 +298,6 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-int priv_intr_efd_enable(struct priv *priv);
-void priv_intr_efd_disable(struct priv *priv);
-int priv_create_intr_vec(struct priv *priv);
-void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
@@ -311,6 +307,12 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
+int priv_rx_intr_vec_enable(struct priv *priv);
+void priv_rx_intr_vec_disable(struct priv *priv);
+#ifdef HAVE_UPDATE_CQ_CI
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+#endif /* HAVE_UPDATE_CQ_CI */
/* mlx5_txq.c */
@@ -333,8 +335,6 @@ uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
-int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_mr.c */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 8c5aa69..40f23da 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -94,12 +94,13 @@ mlx5_dev_start(struct rte_eth_dev *dev)
(void *)priv, strerror(err));
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
- if (dev->data->dev_conf.intr_conf.rxq) {
- err = priv_intr_efd_enable(priv);
- if (!err)
- err = priv_create_intr_vec(priv);
+ err = priv_rx_intr_vec_enable(priv);
+ if (err) {
+ ERROR("%p: RX interrupt vector creation failed",
+ (void *)priv);
+ goto error;
}
+ priv_dev_interrupt_handler_install(priv, dev);
priv_xstats_init(priv);
priv_unlock(priv);
return 0;
@@ -140,11 +141,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
priv_flow_stop(priv);
+ priv_rx_intr_vec_disable(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
- if (priv->dev->data->dev_conf.intr_conf.rxq) {
- priv_destroy_intr_vec(priv);
- priv_intr_efd_disable(priv);
- }
priv->started = 0;
priv_unlock(priv);
}
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9fae40b..bf3c2c8 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -308,9 +308,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index ee96bcd..87affc9 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -242,6 +242,18 @@ sfc_port_start(struct sfc_adapter *sa)
}
}
+ if ((port->mac_stats_update_period_ms != 0) &&
+ port->mac_stats_periodic_dma_supported) {
+ /*
+ * Request an explicit MAC stats upload immediately to
+ * preclude bogus figures readback if the user decides
+ * to read stats before periodic DMA is really started
+ */
+ rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_upload;
+ }
+
sfc_log_init(sa, "disable MAC drain");
rc = efx_mac_drain(sa->nic, B_FALSE);
if (rc != 0)
@@ -262,6 +274,7 @@ fail_mac_drain:
(void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
0, B_FALSE);
+fail_mac_stats_upload:
fail_mac_stats_periodic:
fail_mcast_address_list_set:
fail_mac_filter_set:
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 88118f1..6c18a53 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1678,6 +1678,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
+ if (ret < 0)
+ return ret;
+ }
+
if (rxmode->hw_ip_checksum &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(NOTICE,
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index b7b3d61..f91ecb4 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -579,6 +579,8 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
return base + offset;
}
+#define PCI_MSIX_ENABLE 0x8000
+
static int
virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
{
@@ -605,8 +607,17 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
break;
}
- if (cap.cap_vndr == PCI_CAP_ID_MSIX)
- hw->use_msix = 1;
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = 1;
+ }
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 280406c..e9af946 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -556,7 +556,6 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
virtio_user_dev_uninit(dev);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 2b8092d..723a336 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -995,7 +995,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
- memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index e865c67..98445ee 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -800,6 +800,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(int)(rcd - (struct Vmxnet3_RxCompDesc *)
rxq->comp_ring.base), rcd->rxdIdx);
rte_pktmbuf_free_seg(rxm);
+ if (rxq->start_seg) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ rxq->start_seg = NULL;
+ rte_pktmbuf_free(start);
+ }
goto rcd_done;
}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 9492193..779b4fb 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -842,7 +842,8 @@ l2fwd_crypto_usage(const char *prgname)
" (0 to disable, 10 default, 86400 maximum)\n"
" --cdev_type HW / SW / ANY\n"
- " --chain HASH_CIPHER / CIPHER_HASH\n"
+ " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
+ " HASH_ONLY\n"
" --cipher_algo ALGO\n"
" --cipher_op ENCRYPT / DECRYPT\n"
@@ -1262,7 +1263,7 @@ display_auth_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Authentication information ---\n");
printf("Algorithm: %s\n",
- rte_crypto_auth_algorithm_strings[options->auth_xform.cipher.algo]);
+ rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]);
rte_hexdump(stdout, "Auth key:",
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
@@ -1372,7 +1373,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
l2fwd_crypto_default_options(options);
- while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts,
+ while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
&option_index)) != EOF) {
switch (opt) {
/* long options */
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 9cc4460..46b327e 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -614,7 +614,7 @@ em_parse_ptype(struct rte_mbuf *m)
packet_type |= RTE_PTYPE_L4_UDP;
} else
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
- } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
ipv6_hdr = (struct ipv6_hdr *)l3;
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index c7490c6..8d02e1a 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -69,8 +69,13 @@ extern "C" {
#define BURST_TX_DRAIN_US 100
#ifndef APP_MAX_LCORE
+#if (RTE_MAX_LCORE > 64)
#define APP_MAX_LCORE 64
+#else
+#define APP_MAX_LCORE RTE_MAX_LCORE
+#endif
#endif
+
#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index b814880..56491ea 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -139,6 +139,21 @@ nb_common_chars(const char * s1, const char * s2)
return i;
}
+/** Retrieve either static or dynamic token at a given index. */
+static cmdline_parse_token_hdr_t *
+get_token(cmdline_parse_inst_t *inst, unsigned int index)
+{
+ cmdline_parse_token_hdr_t *token_p;
+
+ /* check presence of static tokens first */
+ if (inst->tokens[0] || !inst->f)
+ return inst->tokens[index];
+ /* generate dynamic token */
+ token_p = NULL;
+ inst->f(&token_p, NULL, &inst->tokens[index]);
+ return token_p;
+}
+
/**
* try to match the buffer with an instruction (only the first
* nb_match_token tokens if != 0). Return 0 if we match all the
@@ -146,27 +161,20 @@ nb_common_chars(const char * s1, const char * s2)
*/
static int
match_inst(cmdline_parse_inst_t *inst, const char *buf,
- unsigned int nb_match_token, void *resbuf, unsigned resbuf_size,
- cmdline_parse_token_hdr_t
- *(*dyn_tokens)[CMDLINE_PARSE_DYNAMIC_TOKENS])
+ unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
{
- unsigned int token_num=0;
cmdline_parse_token_hdr_t * token_p;
unsigned int i=0;
int n = 0;
struct cmdline_token_hdr token_hdr;
- token_p = inst->tokens[token_num];
- if (!token_p && dyn_tokens && inst->f) {
- if (!(*dyn_tokens)[0])
- inst->f(&(*dyn_tokens)[0], NULL, dyn_tokens);
- token_p = (*dyn_tokens)[0];
- }
- if (token_p)
+ /* check if we match all tokens of inst */
+ while (!nb_match_token || i < nb_match_token) {
+ token_p = get_token(inst, i);
+ if (!token_p)
+ break;
memcpy(&token_hdr, token_p, sizeof(token_hdr));
- /* check if we match all tokens of inst */
- while (token_p && (!nb_match_token || i<nb_match_token)) {
debug_printf("TK\n");
/* skip spaces */
while (isblank2(*buf)) {
@@ -201,21 +209,6 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf,
debug_printf("TK parsed (len=%d)\n", n);
i++;
buf += n;
-
- token_num ++;
- if (!inst->tokens[0]) {
- if (token_num < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!(*dyn_tokens)[token_num])
- inst->f(&(*dyn_tokens)[token_num],
- NULL,
- dyn_tokens);
- token_p = (*dyn_tokens)[token_num];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[token_num];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
}
/* does not match */
@@ -259,7 +252,6 @@ cmdline_parse(struct cmdline *cl, const char * buf)
char buf[CMDLINE_PARSE_RESULT_BUFSIZE];
long double align; /* strong alignment constraint for buf */
} result, tmp_result;
- cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
void (*f)(void *, struct cmdline *, void *) = NULL;
void *data = NULL;
int comment = 0;
@@ -276,7 +268,6 @@ cmdline_parse(struct cmdline *cl, const char * buf)
return CMDLINE_PARSE_BAD_ARGS;
ctx = cl->ctx;
- memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/*
* - look if the buffer contains at least one line
@@ -322,7 +313,7 @@ cmdline_parse(struct cmdline *cl, const char * buf)
/* fully parsed */
tok = match_inst(inst, buf, 0, tmp_result.buf,
- sizeof(tmp_result.buf), &dyn_tokens);
+ sizeof(tmp_result.buf));
if (tok > 0) /* we matched at least one token */
err = CMDLINE_PARSE_BAD_ARGS;
@@ -380,7 +371,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
cmdline_parse_token_hdr_t *token_p;
struct cmdline_token_hdr token_hdr;
char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
- cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
unsigned int partial_tok_len;
int comp_len = -1;
int tmp_len = -1;
@@ -400,7 +390,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
debug_printf("%s called\n", __func__);
memset(&token_hdr, 0, sizeof(token_hdr));
- memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/* count the number of complete token to parse */
for (i=0 ; buf[i] ; i++) {
@@ -424,23 +413,11 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
while (inst) {
/* parse the first tokens of the inst */
if (nb_token &&
- match_inst(inst, buf, nb_token, NULL, 0,
- &dyn_tokens))
+ match_inst(inst, buf, nb_token, NULL, 0))
goto next;
debug_printf("instruction match\n");
- if (!inst->tokens[0]) {
- if (nb_token <
- (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!dyn_tokens[nb_token])
- inst->f(&dyn_tokens[nb_token],
- NULL,
- &dyn_tokens);
- token_p = dyn_tokens[nb_token];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[nb_token];
+ token_p = get_token(inst, nb_token);
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
@@ -531,20 +508,10 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
inst = ctx[inst_num];
if (nb_token &&
- match_inst(inst, buf, nb_token, NULL, 0, &dyn_tokens))
+ match_inst(inst, buf, nb_token, NULL, 0))
goto next2;
- if (!inst->tokens[0]) {
- if (nb_token < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!dyn_tokens[nb_token])
- inst->f(&dyn_tokens[nb_token],
- NULL,
- &dyn_tokens);
- token_p = dyn_tokens[nb_token];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[nb_token];
+ token_p = get_token(inst, nb_token);
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h
index 65b18d4..13e086f 100644
--- a/lib/librte_cmdline/cmdline_parse.h
+++ b/lib/librte_cmdline/cmdline_parse.h
@@ -83,9 +83,6 @@ extern "C" {
/* maximum buffer size for parsed result */
#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
-/* maximum number of dynamic tokens */
-#define CMDLINE_PARSE_DYNAMIC_TOKENS 128
-
/**
* Stores a pointer to the ops struct, and the offset: the place to
* write the parsed result in the destination structure.
@@ -137,20 +134,53 @@ struct cmdline;
* When no tokens are defined (tokens[0] == NULL), they are retrieved
* dynamically by calling f() as follows:
*
- * f((struct cmdline_token_hdr **)&token_hdr,
- * NULL,
- * (struct cmdline_token_hdr *[])tokens));
+ * @code
+ *
+ * f((struct cmdline_token_hdr **)&token_p,
+ * NULL,
+ * (struct cmdline_token_hdr **)&inst->tokens[num]);
+ *
+ * @endcode
*
* The address of the resulting token is expected at the location pointed by
* the first argument. Can be set to NULL to end the list.
*
* The cmdline argument (struct cmdline *) is always NULL.
*
- * The last argument points to the NULL-terminated list of dynamic tokens
- * defined so far. Since token_hdr points to an index of that list, the
- * current index can be derived as follows:
+ * The last argument points to the inst->tokens[] entry to retrieve, which
+ * is not necessarily inside allocated memory and should neither be read nor
+ * written. Its sole purpose is to deduce the token entry index of interest
+ * as described in the example below.
+ *
+ * Note about constraints:
+ *
+ * - Only the address of these tokens is dynamic, their storage should be
+ * static like normal tokens.
+ * - Dynamic token lists that need to maintain an internal context (e.g. in
+ * order to determine the next token) must store it statically also. This
+ * context must be reinitialized when the first token is requested, that
+ * is, when &inst->tokens[0] is provided as the third argument.
+ * - Dynamic token lists must be NULL-terminated to generate usable
+ * commands.
+ *
+ * @code
+ *
+ * // Assuming first and third arguments are respectively named "token_p"
+ * // and "token":
+ *
+ * int index = token - inst->tokens;
+ *
+ * if (!index) {
+ * [...] // Clean up internal context if any.
+ * }
+ * [...] // Then set up dyn_token according to index.
+ *
+ * if (no_more_tokens)
+ * *token_p = NULL;
+ * else
+ * *token_p = &dyn_token;
*
- * int index = token_hdr - &(*tokens)[0];
+ * @endcode
*/
struct cmdline_inst {
/* f(parsed_struct, data) */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index b65cd9c..bac6bdc 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -523,7 +523,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
}
uint8_t
-rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices)
{
uint8_t i, count = 0;
@@ -538,10 +538,10 @@ rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
if (drv)
cmp = strncmp(drv->pci_drv.driver.name,
- dev_name, strlen(dev_name));
+ driver_name, strlen(driver_name));
else
cmp = strncmp(devs[i].data->name,
- dev_name, strlen(dev_name));
+ driver_name, strlen(driver_name));
if (cmp == 0)
devices[count++] = devs[i].data->dev_id;
@@ -1032,8 +1032,8 @@ rte_cryptodev_stop(uint8_t dev_id)
return;
}
- dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
+ dev->data->dev_started = 0;
}
int
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 88aeb87..af935ab 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -463,9 +463,10 @@ extern uint8_t
rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
/**
- * Get number and identifiers of attached crypto device.
+ * Get number and identifiers of attached crypto devices that
+ * use the same crypto driver.
*
- * @param dev_name device name.
+ * @param driver_name driver name.
* @param devices output devices identifiers.
* @param nb_devices maximal number of devices.
*
@@ -473,7 +474,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
* Returns number of attached crypto device.
*/
uint8_t
-rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices);
/*
* Return the NUMA socket to which a device is connected
diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/lib/librte_eal/bsdapp/contigmem/contigmem.c
index da971de..e8fb908 100644
--- a/lib/librte_eal/bsdapp/contigmem/contigmem.c
+++ b/lib/librte_eal/bsdapp/contigmem/contigmem.c
@@ -50,24 +50,37 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+
+struct contigmem_buffer {
+ void *addr;
+ int refcnt;
+ struct mtx mtx;
+};
+
+struct contigmem_vm_handle {
+ int buffer_index;
+};
static int contigmem_load(void);
static int contigmem_unload(void);
static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
-static d_mmap_t contigmem_mmap;
static d_mmap_single_t contigmem_mmap_single;
static d_open_t contigmem_open;
+static d_close_t contigmem_close;
static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
static eventhandler_tag contigmem_eh_tag;
-static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
+static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
static struct cdev *contigmem_cdev = NULL;
+static int contigmem_refcnt;
TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
@@ -78,6 +91,8 @@ SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
&contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
&contigmem_buffer_size, 0, "Size of each contiguous buffer");
+SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
+ &contigmem_refcnt, 0, "Number of references to contigmem");
static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
"physaddr");
@@ -114,42 +129,49 @@ MODULE_VERSION(contigmem, 1);
static struct cdevsw contigmem_ops = {
.d_name = "contigmem",
.d_version = D_VERSION,
- .d_mmap = contigmem_mmap,
+ .d_flags = D_TRACKCLOSE,
.d_mmap_single = contigmem_mmap_single,
.d_open = contigmem_open,
+ .d_close = contigmem_close,
};
static int
contigmem_load()
{
char index_string[8], description[32];
- int i;
+ int i, error = 0;
+ void *addr;
if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
printf("%d buffers requested is greater than %d allowed\n",
contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
if (contigmem_buffer_size < PAGE_SIZE ||
(contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
"power of two\n", contigmem_buffer_size);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
for (i = 0; i < contigmem_num_buffers; i++) {
- contigmem_buffers[i] =
- contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
- BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
-
- if (contigmem_buffers[i] == NULL) {
+ addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
+ 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
+ if (addr == NULL) {
printf("contigmalloc failed for buffer %d\n", i);
- return ENOMEM;
+ error = ENOMEM;
+ goto error;
}
- printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
- (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i]));
+ printf("%2u: virt=%p phys=%p\n", i, addr,
+ (void *)pmap_kextract((vm_offset_t)addr));
+
+ mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
+ contigmem_buffers[i].addr = addr;
+ contigmem_buffers[i].refcnt = 0;
snprintf(index_string, sizeof(index_string), "%d", i);
snprintf(description, sizeof(description),
@@ -165,6 +187,17 @@ contigmem_load()
GID_WHEEL, 0600, "contigmem");
return 0;
+
+error:
+ for (i = 0; i < contigmem_num_buffers; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
+
+ return error;
}
static int
@@ -172,16 +205,22 @@ contigmem_unload()
{
int i;
+ if (contigmem_refcnt > 0)
+ return EBUSY;
+
if (contigmem_cdev != NULL)
destroy_dev(contigmem_cdev);
if (contigmem_eh_tag != NULL)
EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
- for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++)
- if (contigmem_buffers[i] != NULL)
- contigfree(contigmem_buffers[i], contigmem_buffer_size,
- M_CONTIGMEM);
+ for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
return 0;
}
@@ -192,7 +231,7 @@ contigmem_physaddr(SYSCTL_HANDLER_ARGS)
uint64_t physaddr;
int index = (int)(uintptr_t)arg1;
- physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
+ physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
return sysctl_handle_64(oidp, &physaddr, 0, req);
}
@@ -200,22 +239,121 @@ static int
contigmem_open(struct cdev *cdev, int fflags, int devtype,
struct thread *td)
{
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
-contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
+contigmem_close(struct cdev *cdev, int fflags, int devtype,
+ struct thread *td)
{
- *paddr = offset;
+ atomic_subtract_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
+contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
+ mtx_lock(&buf->mtx);
+ if (buf->refcnt == 0)
+ memset(buf->addr, 0, contigmem_buffer_size);
+ buf->refcnt++;
+ mtx_unlock(&buf->mtx);
+
+ return 0;
+}
+
+static void
+contigmem_cdev_pager_dtor(void *handle)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ mtx_lock(&buf->mtx);
+ buf->refcnt--;
+ mtx_unlock(&buf->mtx);
+
+ free(vmh, M_CONTIGMEM);
+
+ atomic_subtract_int(&contigmem_refcnt, 1);
+}
+
+static int
+contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
+ vm_page_t *mres)
+{
+ vm_paddr_t paddr;
+ vm_page_t m_paddr, page;
+ vm_memattr_t memattr, memattr1;
+
+ memattr = object->memattr;
+
+ VM_OBJECT_WUNLOCK(object);
+
+ paddr = offset;
+
+ m_paddr = vm_phys_paddr_to_vm_page(paddr);
+ if (m_paddr != NULL) {
+ memattr1 = pmap_page_get_memattr(m_paddr);
+ if (memattr1 != memattr)
+ memattr = memattr1;
+ }
+
+ if (((*mres)->flags & PG_FICTITIOUS) != 0) {
+ /*
+ * If the passed in result page is a fake page, update it with
+ * the new physical address.
+ */
+ page = *mres;
+ VM_OBJECT_WLOCK(object);
+ vm_page_updatefake(page, paddr, memattr);
+ } else {
+ vm_page_t mret;
+ /*
+ * Replace the passed in reqpage page with our own fake page and
+ * free up the original page.
+ */
+ page = vm_page_getfake(paddr, memattr);
+ VM_OBJECT_WLOCK(object);
+ mret = vm_page_replace(page, object, (*mres)->pindex);
+ KASSERT(mret == *mres,
+ ("invalid page replacement, old=%p, ret=%p", *mres, mret));
+ vm_page_lock(mret);
+ vm_page_free(mret);
+ vm_page_unlock(mret);
+ *mres = page;
+ }
+
+ page->valid = VM_PAGE_BITS_ALL;
+
+ return VM_PAGER_OK;
+}
+
+static struct cdev_pager_ops contigmem_cdev_pager_ops = {
+ .cdev_pg_ctor = contigmem_cdev_pager_ctor,
+ .cdev_pg_dtor = contigmem_cdev_pager_dtor,
+ .cdev_pg_fault = contigmem_cdev_pager_fault,
+};
+
+static int
contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj, int nprot)
{
+ struct contigmem_vm_handle *vmh;
uint64_t buffer_index;
/*
@@ -227,10 +365,17 @@ contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
if (buffer_index >= contigmem_num_buffers)
return EINVAL;
- memset(contigmem_buffers[buffer_index], 0, contigmem_buffer_size);
- *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index]);
- *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
- curthread->td_ucred);
+ if (size > contigmem_buffer_size)
+ return EINVAL;
+
+ vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
+ if (vmh == NULL)
+ return ENOMEM;
+ vmh->buffer_index = buffer_index;
+
+ *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
+ *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
+ size, nprot, *offset, curthread->td_ucred);
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index 12e0fca..60526ca 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -46,10 +46,10 @@ rte_eal_primary_proc_alive(const char *config_file_path)
if (config_file_path)
config_fd = open(config_file_path, O_RDONLY);
else {
- char default_path[PATH_MAX+1];
- snprintf(default_path, PATH_MAX, RUNTIME_CONFIG_FMT,
- default_config_dir, "rte");
- config_fd = open(default_path, O_RDONLY);
+ const char *path;
+
+ path = eal_runtime_config_path();
+ config_fd = open(path, O_RDONLY);
}
if (config_fd < 0)
return 0;
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 008ce13..946db6f 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -329,7 +329,7 @@ rte_malloc_set_limit(const char *type, size_t max);
* @param addr
* Adress obtained from a previous rte_malloc call
* @return
- * NULL on error
+ * RTE_BAD_PHYS_ADDR on error
* otherwise return physical address of the buffer
*/
phys_addr_t
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index c36d852..93cc099 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -66,7 +66,7 @@ extern "C" {
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 1
+#define RTE_VER_MINOR 2
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index f4a8835..5c0627b 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -253,6 +253,8 @@ rte_malloc_virt2phy(const void *addr)
{
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
- return 0;
+ return RTE_BAD_PHYS_ADDR;
+ if (elem->ms->phys_addr == RTE_BAD_PHYS_ADDR)
+ return RTE_BAD_PHYS_ADDR;
return elem->ms->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->ms->addr);
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index ebe0683..cdce845 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -137,6 +137,13 @@ test_phys_addrs_available(void)
if (rte_xen_dom0_supported())
return;
+ if (!rte_eal_has_hugepages()) {
+ RTE_LOG(ERR, EAL,
+ "Started without hugepages support, physical addresses not available\n");
+ phys_addrs_available = false;
+ return;
+ }
+
physaddr = rte_mem_virt2phy(&tmp);
if (physaddr == RTE_BAD_PHYS_ADDR) {
RTE_LOG(ERR, EAL,
@@ -995,7 +1002,7 @@ rte_eal_hugepage_init(void)
strerror(errno));
return -1;
}
- mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].phys_addr = RTE_BAD_PHYS_ADDR;
mcfg->memseg[0].addr = addr;
mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 83898a8..2d442a9 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -2351,6 +2351,7 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
index d3bc03c..6073007 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -134,6 +134,12 @@ rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_private = NULL;
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
eth_dev->device = NULL;
eth_dev->intr_handle = NULL;
}
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index d6726bb..894b534 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -151,6 +151,7 @@ DPDK_17.05 {
rte_eth_dev_attach_secondary;
rte_eth_find_next;
+ rte_eth_tx_done_cleanup;
rte_eth_xstats_get_by_id;
rte_eth_xstats_get_id_by_name;
rte_eth_xstats_get_names_by_id;
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 20afc3f..755f9f7 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -301,7 +301,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
sizeof(dev->data->links_map[0]) * nb_ports *
RTE_EVENT_MAX_QUEUES_PER_DEV,
RTE_CACHE_LINE_SIZE);
- if (dev->data->links_map == NULL) {
+ if (links_map == NULL) {
dev->data->nb_ports = 0;
RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
"nb_ports %u", nb_ports);
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 645c0cf..37a8110 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -538,8 +538,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
cached_free_slots->objs,
LCORE_CACHE_SIZE, NULL);
- if (n_slots == 0)
- return -ENOSPC;
+ if (n_slots == 0) {
+ ret = -ENOSPC;
+ goto failure;
+ }
cached_free_slots->len += n_slots;
}
@@ -548,8 +550,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
cached_free_slots->len--;
slot_id = cached_free_slots->objs[cached_free_slots->len];
} else {
- if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
- return -ENOSPC;
+ if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
+ ret = -ENOSPC;
+ goto failure;
+ }
}
new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
@@ -659,6 +663,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
/* Error in addition, store new slot back in the ring and return error */
enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
+failure:
if (h->add_key == ADD_KEY_MULTIWRITER)
rte_spinlock_unlock(h->multiwriter_lock);
return ret;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 1cb0310..1276b3c 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1136,6 +1136,7 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
* Array size
* @return
* - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved.
*/
static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
struct rte_mbuf **mbufs, unsigned count)
@@ -1453,7 +1454,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
*/
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return m->data_off;
}
@@ -1467,7 +1468,7 @@ static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
*/
static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
m->data_len);
}
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index a3269c4..acd70bb 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -341,11 +341,11 @@ extern "C" {
* Packet format:
* <'ether type'=0x0800
* | 'version'=4, 'protocol'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
* or,
* <'ether type'=0x86DD
* | 'version'=6, 'next header'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
*/
#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
/**
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f65310f..6fc3c9c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -476,7 +476,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
/* required for xen_dom0 to get the machine address */
paddr = rte_mem_phy2mch(-1, paddr);
- if (paddr == RTE_BAD_PHYS_ADDR) {
+ if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL;
goto fail;
}
diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c
index e9a122c..dbbad32 100644
--- a/lib/librte_metrics/rte_metrics.c
+++ b/lib/librte_metrics/rte_metrics.c
@@ -144,6 +144,8 @@ rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
entry = &stats->metadata[idx_name + stats->cnt_stats];
strncpy(entry->name, names[idx_name],
RTE_METRICS_MAX_NAME_LEN);
+ /* Enforce NULL-termination */
+ entry->name[RTE_METRICS_MAX_NAME_LEN - 1] = '\0';
memset(entry->value, 0, sizeof(entry->value));
entry->idx_next_stat = idx_name + stats->cnt_stats + 1;
}
diff --git a/lib/librte_metrics/rte_metrics.h b/lib/librte_metrics/rte_metrics.h
index 0fa3104..297300a 100644
--- a/lib/librte_metrics/rte_metrics.h
+++ b/lib/librte_metrics/rte_metrics.h
@@ -118,7 +118,8 @@ void rte_metrics_init(int socket_id);
* is required for updating said metric's value.
*
* @param name
- * Metric name
+ * Metric name. If this exceeds RTE_METRICS_MAX_NAME_LEN (including
+ * the NULL terminator), it is truncated.
*
* @return
* - Zero or positive: Success (index key of new metric)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 5f98c33..6f58faf 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -189,7 +189,8 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
/* reserve a memory zone for this ring. If we can't get rte_config or
* we are secondary process, the memzone_reserve function will set
* rte_errno for us appropriately - hence no check in this this function */
- mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ mz = rte_memzone_reserve_aligned(mz_name, ring_size, socket_id,
+ mz_flags, __alignof__(*r));
if (mz != NULL) {
r = mz->addr;
/* no need to check return value here, we already checked the
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 97f025a..3400ed8 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -801,7 +801,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -819,7 +819,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 1f565fb..9ba4d3d 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -272,7 +272,7 @@ rte_vhost_get_mtu(int vid, uint16_t *mtu)
if (!(dev->flags & VIRTIO_DEV_READY))
return -EAGAIN;
- if (!(dev->features & VIRTIO_NET_F_MTU))
+ if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
return -ENOTSUP;
*mtu = dev->mtu;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 48219e0..f8da78a 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -114,11 +114,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- if (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+
+ if (m_buf->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
- switch (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ switch (csum_l4) {
case PKT_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct tcp_hdr,
cksum));
@@ -138,6 +143,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
}
+ /* IP cksum verification cannot be bypassed, then calculate here */
+ if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ struct ipv4_hdr *ipv4_hdr;
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
+ m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ }
+
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
if (m_buf->ol_flags & PKT_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -601,9 +615,11 @@ static inline bool
virtio_net_with_host_offload(struct virtio_net *dev)
{
if (dev->features &
- (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
- VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
- VIRTIO_NET_F_HOST_UFO))
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
return true;
return false;
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index f2e1836..7e8a6fe 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -30,7 +30,7 @@
# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 17.05.1
+Version: 17.05.2
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 029ce8a..2ca489a 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -3581,8 +3581,7 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
rte_hexdump(stdout, "ciphertext:", ciphertext, plaintext_len);
#endif
- expected_ciphertext_shifted = rte_malloc(NULL,
- ceil_byte_length(plaintext_len + extra_offset), 0);
+ expected_ciphertext_shifted = rte_malloc(NULL, plaintext_len, 8);
TEST_ASSERT_NOT_NULL(expected_ciphertext_shifted,
"failed to reserve memory for ciphertext shifted\n");
diff --git a/test/test/test_cryptodev_perf.c b/test/test/test_cryptodev_perf.c
index d60028d..894b2dd 100644
--- a/test/test/test_cryptodev_perf.c
+++ b/test/test/test_cryptodev_perf.c
@@ -2634,6 +2634,11 @@ static uint8_t aes_iv[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
+static uint8_t aes_gcm_aad[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
static uint8_t triple_des_key[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -2895,7 +2900,7 @@ test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
#define AES_BLOCK_SIZE 16
#define AES_CIPHER_IV_LENGTH 16
-
+#define AES_GCM_AAD_LENGTH 16
#define TRIPLE_DES_BLOCK_SIZE 8
#define TRIPLE_DES_CIPHER_IV_LENGTH 8
@@ -2939,8 +2944,6 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
AES_CIPHER_IV_LENGTH + data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
op->sym->auth.data.offset = AES_CIPHER_IV_LENGTH;
op->sym->auth.data.length = data_len;
}
@@ -2977,8 +2980,8 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+ op->sym->auth.aad.data = aes_gcm_aad;
+ op->sym->auth.aad.length = AES_GCM_AAD_LENGTH;
/* Cipher Parameters */
op->sym->cipher.iv.data = aes_iv;
@@ -3110,8 +3113,6 @@ test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = triple_des_iv;
- op->sym->auth.aad.length = TRIPLE_DES_CIPHER_IV_LENGTH;
/* Cipher Parameters */
op->sym->cipher.iv.data = triple_des_iv;
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 52d2d05..7c3d5a9 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -83,7 +83,7 @@
#define MAX_PKT_BURST (512)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_ut")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (-1)
@@ -221,6 +221,10 @@ static struct rte_eth_txconf tx_conf_default = {
};
+static void free_virtualpmd_tx_queue(void);
+
+
+
static int
configure_ethdev(uint8_t port_id, uint8_t start, uint8_t en_isr)
{
@@ -684,6 +688,7 @@ static int
remove_slaves_and_stop_bonded_device(void)
{
/* Clean up and remove slaves from bonded device */
+ free_virtualpmd_tx_queue();
while (test_params->bonded_slave_count > 0)
TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(),
"test_remove_slave_from_bonded_device failed");
@@ -939,7 +944,7 @@ test_set_bonded_port_initialization_mac_assignment(void)
/*
* 1. a - Create / configure bonded / slave ethdevs
*/
- bonded_port_id = rte_eth_bond_create("ethdev_bond_mac_ass_test",
+ bonded_port_id = rte_eth_bond_create("net_bonding_mac_ass_test",
BONDING_MODE_ACTIVE_BACKUP, rte_socket_id());
TEST_ASSERT(bonded_port_id > 0, "failed to create bonded device");
@@ -1617,9 +1622,6 @@ test_roundrobin_rx_burst_on_single_slave(void)
/* free mbufs */
for (i = 0; i < MAX_PKT_BURST; i++) {
- if (gen_pkt_burst[i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[i]);
-
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
}
@@ -1966,12 +1968,6 @@ test_roundrobin_verify_slave_link_status_change_behaviour(void)
for (i = 0; i < MAX_PKT_BURST; i++) {
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
-
- if (gen_pkt_burst[1][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
-
- if (gen_pkt_burst[3][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
}
/* Clean up and remove slaves from bonded device */
@@ -2410,7 +2406,7 @@ test_activebackup_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -2543,16 +2539,6 @@ test_activebackup_verify_slave_link_status_change_failover(void)
"(%d) port_stats.opackets not as expected",
test_params->slave_port_ids[3]);
- /* free mbufs */
- for (i = 0; i < TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -2785,7 +2771,7 @@ balance_l23_tx_burst(uint8_t vlan_enabled, uint8_t ipv4,
static int
test_balance_l23_tx_burst_ipv4_toggle_ip_addr(void)
{
- return balance_l23_tx_burst(0, 1, 1, 0);
+ return balance_l23_tx_burst(0, 1, 0, 1);
}
static int
@@ -3314,7 +3300,7 @@ test_balance_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
@@ -3452,16 +3438,6 @@ test_balance_verify_slave_link_status_change_behaviour(void)
test_params->bonded_port_id, (int)port_stats.ipackets,
burst_size * 3);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -3883,7 +3859,7 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
@@ -3980,16 +3956,6 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < BROADCAST_LINK_STATUS_NUM_OF_SLAVES; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -4405,7 +4371,7 @@ test_tlb_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -4523,18 +4489,6 @@ test_tlb_verify_slave_link_status_change_failover(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs */
-
- for (i = 0; i < TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 106ec62..e65653a 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -73,11 +73,11 @@
#define MAX_PKT_BURST (32)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_mode4_bond_dev")
+#define BONDED_DEV_NAME ("ut_mode4_bond_dev")
-#define SLAVE_DEV_NAME_FMT ("unit_test_mode4_slave_%d")
-#define SLAVE_RX_QUEUE_FMT ("unit_test_mode4_slave_%d_rx")
-#define SLAVE_TX_QUEUE_FMT ("unit_test_mode4_slave_%d_tx")
+#define SLAVE_DEV_NAME_FMT ("ut_mode4_slave_%d")
+#define SLAVE_RX_QUEUE_FMT ("ut_mode4_slave_%d_rx")
+#define SLAVE_TX_QUEUE_FMT ("ut_mode4_slave_%d_tx")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (0xFF)
diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index d28db7d..9073d4a 100644
--- a/test/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -60,7 +60,7 @@
#define RXTX_RING_SIZE 1024
#define RXTX_QUEUE_COUNT 4
-#define BONDED_DEV_NAME ("rssconf_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_rss")
#define SLAVE_DEV_NAME_FMT ("rssconf_slave%d")
#define SLAVE_RXTX_QUEUE_FMT ("rssconf_slave%d_q%d")