aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/efx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2011-12-20 01:08:05 +0000
committerBen Hutchings <bhutchings@solarflare.com>2012-01-09 17:08:18 +0000
commitcdb08f8fd8642a6d661c920f565e85cf87a0c9be (patch)
tree038c7e30139a5d28abe8935be8935fe5a607a9f9 /drivers/net/ethernet/sfc/efx.c
parentfa142b9da3393fd92b398b6bdecf3f21914e309b (diff)
sfc: Set default parallelism to per-core by default
The previous default of per-package can be more CPU-efficient, but users generally seem to prefer per-core. It should also allow accelerated RFS to direct packets more precisely, if IRQ affinity is properly spread out. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5fcc42f7d86..d7301d2e81a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -162,7 +162,7 @@ static unsigned int interrupt_mode;
* interrupt handling.
*
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each package (level II cache)
+ * The default (0) means to assign an interrupt to each core.
*/
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
@@ -1148,14 +1148,14 @@ static void efx_fini_io(struct efx_nic *efx)
static int efx_wanted_parallelism(void)
{
- cpumask_var_t core_mask;
+ cpumask_var_t thread_mask;
int count;
int cpu;
if (rss_cpus)
return rss_cpus;
- if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
@@ -1163,14 +1163,14 @@ static int efx_wanted_parallelism(void)
count = 0;
for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, core_mask)) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
++count;
- cpumask_or(core_mask, core_mask,
- topology_core_cpumask(cpu));
+ cpumask_or(thread_mask, thread_mask,
+ topology_thread_cpumask(cpu));
}
}
- free_cpumask_var(core_mask);
+ free_cpumask_var(thread_mask);
return count;
}