aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h31
-rw-r--r--drivers/net/ethernet/sfc/rx.c8
3 files changed, 24 insertions, 23 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 01b99206139a..39e4cb39de29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
- EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+ NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = false;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
- PAGE_SIZE / 2);
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
efx->rx_scatter = true;
efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
efx->rx_buffer_order = 0;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9bd433a095c5..39d6bd77f015 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -72,8 +72,20 @@
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
-/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
-#define EFX_RX_USR_BUF_SIZE 1824
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer. Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT 4
+#endif
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
@@ -468,24 +480,11 @@ enum nic_state {
};
/*
- * Alignment of page-allocated RX buffers
- *
- * Controls the number of bytes inserted at the start of an RX buffer.
- * This is the equivalent of NET_IP_ALIGN [which controls the alignment
- * of the skb->head for hardware DMA].
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define EFX_PAGE_IP_ALIGN 0
-#else
-#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
-#endif
-
-/*
* Alignment of the skb->head which wraps a page-allocated RX buffer
*
* The skb allocated to wrap an rx_buffer can have this alignment. Since
* the data is memcpy'd from the rx_buf, it does not need to be equal to
- * EFX_PAGE_IP_ALIGN.
+ * NET_IP_ALIGN.
*/
#define EFX_PAGE_SKB_ALIGN 2
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index e73e30bac10e..a7dfe36cabf4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
- L1_CACHE_BYTES);
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+ EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
efx->rx_page_buf_step);
@@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
rx_buf->page = page;
- rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->page_offset = page_offset + NET_IP_ALIGN;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;