aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c56
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/resource.c160
-rw-r--r--drivers/base/regmap/regmap.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c15
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/gpio/gpio-crystalcove.c1
-rw-r--r--drivers/gpio/gpio-rcar.c13
-rw-r--r--drivers/iio/accel/kxcjk-1013.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c83
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/leds/led-class.c7
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/hw-me.c59
-rw-r--r--drivers/misc/mei/hw-txe.c33
-rw-r--r--drivers/misc/mei/mei_dev.h11
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mtd/maps/dc21285.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c5
-rw-r--r--drivers/net/can/dev.c5
-rw-r--r--drivers/net/can/slcan.c1
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/phy/phy_device.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c52
-rw-r--r--drivers/pci/pci.c11
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pcmcia/topic.h16
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/power/power_supply_core.c61
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/max77686.c6
-rw-r--r--drivers/s390/kvm/virtio_ccw.c11
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/scsi_transport_srp.c58
-rw-r--r--drivers/spi/spi-orion.c25
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/video/fbdev/mxsfb.c68
56 files changed, 762 insertions, 229 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c412fdb28d34..513e7230e3d0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -470,6 +470,16 @@ static int __init acpi_bus_init_irq(void)
return 0;
}
+/**
+ * acpi_early_init - Initialize ACPICA and populate the ACPI namespace.
+ *
+ * The ACPI tables are accessible after this, but the handling of events has not
+ * been initialized and the global lock is not available yet, so AML should not
+ * be executed at this point.
+ *
+ * Doing this before switching the EFI runtime services to virtual mode allows
+ * the EfiBootServices memory to be freed slightly earlier on boot.
+ */
void __init acpi_early_init(void)
{
acpi_status status;
@@ -533,26 +543,42 @@ void __init acpi_early_init(void)
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
}
#endif
+ return;
+
+ error0:
+ disable_acpi();
+}
+
+/**
+ * acpi_subsystem_init - Finalize the early initialization of ACPI.
+ *
+ * Switch over the platform to the ACPI mode (if possible), initialize the
+ * handling of ACPI events, install the interrupt and global lock handlers.
+ *
+ * Doing this too early is generally unsafe, but at the same time it needs to be
+ * done before all things that really depend on ACPI. The right spot appears to
+ * be before finalizing the EFI initialization.
+ */
+void __init acpi_subsystem_init(void)
+{
+ acpi_status status;
+
+ if (acpi_disabled)
+ return;
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
- goto error0;
+ disable_acpi();
+ } else {
+ /*
+ * If the system is using ACPI then we can be reasonably
+ * confident that any regulators are managed by the firmware
+ * so tell the regulator core it has everything it needs to
+ * know.
+ */
+ regulator_has_full_constraints();
}
-
- /*
- * If the system is using ACPI then we can be reasonably
- * confident that any regulators are managed by the firmware
- * so tell the regulator core it has everything it needs to
- * know.
- */
- regulator_has_full_constraints();
-
- return;
-
- error0:
- disable_acpi();
- return;
}
static int __init acpi_bus_init(void)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 735db11a9b00..8217e0bda60f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -953,6 +953,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
*/
void acpi_subsys_complete(struct device *dev)
{
+ pm_generic_complete(dev);
/*
* If the device had been runtime-suspended before the system went into
* the sleep state it is going out of and it has never been resumed till
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7ccba395c9dd..5226a8b921ae 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,11 +175,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
if (!addr || !length)
return;
- /* Resources are never freed */
- if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr, length, desc);
- else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr, length, desc);
+ acpi_reserve_region(addr, length, gas->space_id, 0, desc);
}
static void __init acpi_reserve_resources(void)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 8244f013f210..fcb7807ea8b7 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,6 +26,7 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
+#include <linux/list.h>
#include <linux/slab.h>
#ifdef CONFIG_X86
@@ -621,3 +622,162 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+
+struct reserved_region {
+ struct list_head node;
+ u64 start;
+ u64 end;
+};
+
+static LIST_HEAD(reserved_io_regions);
+static LIST_HEAD(reserved_mem_regions);
+
+static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
+ char *desc)
+{
+ unsigned int length = end - start + 1;
+ struct resource *res;
+
+ res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
+ request_region(start, length, desc) :
+ request_mem_region(start, length, desc);
+ if (!res)
+ return -EIO;
+
+ res->flags &= ~flags;
+ return 0;
+}
+
+static int add_region_before(u64 start, u64 end, u8 space_id,
+ unsigned long flags, char *desc,
+ struct list_head *head)
+{
+ struct reserved_region *reg;
+ int error;
+
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ error = request_range(start, end, space_id, flags, desc);
+ if (error)
+ return error;
+
+ reg->start = start;
+ reg->end = end;
+ list_add_tail(&reg->node, head);
+ return 0;
+}
+
+/**
+ * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
+ * @start: Starting address of the region.
+ * @length: Length of the region.
+ * @space_id: Identifier of address space to reserve the region from.
+ * @flags: Resource flags to clear for the region after requesting it.
+ * @desc: Region description (for messages).
+ *
+ * Reserve an I/O or memory region as a system resource to prevent others from
+ * using it. If the new region overlaps with one of the regions (in the given
+ * address space) already reserved by this routine, only the non-overlapping
+ * parts of it will be reserved.
+ *
+ * Returned is either 0 (success) or a negative error code indicating a resource
+ * reservation problem. It is the code of the first encountered error, but the
+ * routine doesn't abort until it has attempted to request all of the parts of
+ * the new region that don't overlap with other regions reserved previously.
+ *
+ * The resources requested by this routine are never released.
+ */
+int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+ unsigned long flags, char *desc)
+{
+ struct list_head *regions;
+ struct reserved_region *reg;
+ u64 end = start + length - 1;
+ int ret = 0, error = 0;
+
+ if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ regions = &reserved_io_regions;
+ else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ regions = &reserved_mem_regions;
+ else
+ return -EINVAL;
+
+ if (list_empty(regions))
+ return add_region_before(start, end, space_id, flags, desc, regions);
+
+ list_for_each_entry(reg, regions, node)
+ if (reg->start == end + 1) {
+ /* The new region can be prepended to this one. */
+ ret = request_range(start, end, space_id, flags, desc);
+ if (!ret)
+ reg->start = start;
+
+ return ret;
+ } else if (reg->start > end) {
+ /* No overlap. Add the new region here and get out. */
+ return add_region_before(start, end, space_id, flags,
+ desc, &reg->node);
+ } else if (reg->end == start - 1) {
+ goto combine;
+ } else if (reg->end >= start) {
+ goto overlap;
+ }
+
+ /* The new region goes after the last existing one. */
+ return add_region_before(start, end, space_id, flags, desc, regions);
+
+ overlap:
+ /*
+ * The new region overlaps an existing one.
+ *
+ * The head part of the new region immediately preceding the existing
+ * overlapping one can be combined with it right away.
+ */
+ if (reg->start > start) {
+ error = request_range(start, reg->start - 1, space_id, flags, desc);
+ if (error)
+ ret = error;
+ else
+ reg->start = start;
+ }
+
+ combine:
+ /*
+ * The new region is adjacent to an existing one. If it extends beyond
+ * that region all the way to the next one, it is possible to combine
+ * all three of them.
+ */
+ while (reg->end < end) {
+ struct reserved_region *next = NULL;
+ u64 a = reg->end + 1, b = end;
+
+ if (!list_is_last(&reg->node, regions)) {
+ next = list_next_entry(reg, node);
+ if (next->start <= end)
+ b = next->start - 1;
+ }
+ error = request_range(a, b, space_id, flags, desc);
+ if (!error) {
+ if (next && next->start == b + 1) {
+ reg->end = next->end;
+ list_del(&next->node);
+ kfree(next);
+ } else {
+ reg->end = end;
+ break;
+ }
+ } else if (next) {
+ if (!ret)
+ ret = error;
+
+ reg = next;
+ } else {
+ break;
+ }
+ }
+
+ return ret ? ret : error;
+}
+EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6273ff072f3e..1c76dcb502cf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -945,11 +945,10 @@ EXPORT_SYMBOL_GPL(devm_regmap_init);
static void regmap_field_init(struct regmap_field *rm_field,
struct regmap *regmap, struct reg_field reg_field)
{
- int field_bits = reg_field.msb - reg_field.lsb + 1;
rm_field->regmap = regmap;
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
- rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+ rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
rm_field->id_size = reg_field.id_size;
rm_field->id_offset = reg_field.id_offset;
}
@@ -2318,7 +2317,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
&ival);
if (ret != 0)
return ret;
- memcpy(val + (i * val_bytes), &ival, val_bytes);
+ map->format.format_val(val + (i * val_bytes), ival, 0);
}
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6414661ac1c4..c45d274a75c8 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -535,7 +535,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
val |= vid;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
}
#define BYT_BCLK_FREQS 5
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 59372077ec7c..3442764a5293 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -60,6 +60,8 @@ static int nap_loop(struct cpuidle_device *dev,
return index;
}
+/* Register for fastsleep only in oneshot mode of broadcast */
+#ifdef CONFIG_TICK_ONESHOT
static int fastsleep_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -83,7 +85,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index;
}
-
+#endif
/*
* States for dedicated partition case.
*/
@@ -209,7 +211,14 @@ static int powernv_add_idle_states(void)
powernv_states[nr_idle_states].flags = 0;
powernv_states[nr_idle_states].target_residency = 100;
powernv_states[nr_idle_states].enter = &nap_loop;
- } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+ }
+
+ /*
+ * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
+ * within this config dependency check.
+ */
+#ifdef CONFIG_TICK_ONESHOT
+ if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
strcpy(powernv_states[nr_idle_states].name, "FastSleep");
@@ -218,7 +227,7 @@ static int powernv_add_idle_states(void)
powernv_states[nr_idle_states].target_residency = 300000;
powernv_states[nr_idle_states].enter = &fastsleep_loop;
}
-
+#endif
powernv_states[nr_idle_states].exit_latency =
((unsigned int)latency_ns[i]) / 1000;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 857414afa29a..f062158d4dc9 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -925,7 +925,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
sg_count--;
link_tbl_ptr--;
}
- be16_add_cpu(&link_tbl_ptr->len, cryptlen);
+ link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
+ + cryptlen);
/* tag end of link table */
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
@@ -2561,6 +2562,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
default:
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+ kfree(t_alg);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3061bb8629dc..e14363d12690 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -65,7 +65,6 @@ static int __init parse_efi_cmdline(char *str)
early_param("efi", parse_efi_cmdline);
static struct kobject *efi_kobj;
-static struct kobject *efivars_kobj;
/*
* Let's not leave out systab information that snuck into
@@ -212,10 +211,9 @@ static int __init efisubsys_init(void)
goto err_remove_group;
/* and the standard mountpoint for efivarfs */
- efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
- if (!efivars_kobj) {
+ error = sysfs_create_mount_point(efi_kobj, "efivars");
+ if (error) {
pr_err("efivars: Subsystem registration failed.\n");
- error = -ENOMEM;
goto err_remove_group;
}
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 91a7ffe83135..ab457fc00e75 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -255,6 +255,7 @@ static struct irq_chip crystalcove_irqchip = {
.irq_set_type = crystalcove_irq_type,
.irq_bus_lock = crystalcove_bus_lock,
.irq_bus_sync_unlock = crystalcove_bus_sync_unlock,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fd3977465948..1e14a6c74ed1 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -177,8 +177,17 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
gpio_chip);
-
- irq_set_irq_wake(p->irq_parent, on);
+ int error;
+
+ if (p->irq_parent) {
+ error = irq_set_irq_wake(p->irq_parent, on);
+ if (error) {
+ dev_dbg(&p->pdev->dev,
+ "irq %u doesn't support irq_set_wake\n",
+ p->irq_parent);
+ p->irq_parent = 0;
+ }
+ }
if (!p->clk)
return 0;
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 51da3692d561..5b7a860df524 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1418,6 +1418,7 @@ static const struct dev_pm_ops kxcjk1013_pm_ops = {
static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1013", KXCJK1013},
{"KXCJ1008", KXCJ91008},
+ {"KXCJ9000", KXCJ91008},
{"KXTJ1009", KXTJ21009},
{"SMO8500", KXCJ91008},
{ },
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 918814cd0f80..75c01b27bd0b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -465,14 +465,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
*/
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
- struct srp_target_port *target = ch->target;
static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
struct ib_recv_wr *bad_wr;
int ret;
/* Destroying a QP and reusing ch->done is only safe if not connected */
- WARN_ON_ONCE(target->connected);
+ WARN_ON_ONCE(ch->connected);
ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
@@ -811,35 +810,19 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
return changed;
}
-static bool srp_change_conn_state(struct srp_target_port *target,
- bool connected)
-{
- bool changed = false;
-
- spin_lock_irq(&target->lock);
- if (target->connected != connected) {
- target->connected = connected;
- changed = true;
- }
- spin_unlock_irq(&target->lock);
-
- return changed;
-}
-
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
int i;
- if (srp_change_conn_state(target, false)) {
- /* XXX should send SRP_I_LOGOUT request */
+ /* XXX should send SRP_I_LOGOUT request */
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
- shost_printk(KERN_DEBUG, target->scsi_host,
- PFX "Sending CM DREQ failed\n");
- }
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ ch->connected = false;
+ if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM DREQ failed\n");
}
}
}
@@ -986,14 +969,26 @@ static void srp_rport_delete(struct srp_rport *rport)
srp_queue_remove_work(target);
}
+/**
+ * srp_connected_ch() - number of connected channels
+ * @target: SRP target port.
+ */
+static int srp_connected_ch(struct srp_target_port *target)
+{
+ int i, c = 0;
+
+ for (i = 0; i < target->ch_count; i++)
+ c += target->ch[i].connected;
+
+ return c;
+}
+
static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
int ret;
- WARN_ON_ONCE(!multich && target->connected);
-
- target->qp_in_error = false;
+ WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
ret = srp_lookup_path(ch);
if (ret)
@@ -1016,7 +1011,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
*/
switch (ch->status) {
case 0:
- srp_change_conn_state(target, true);
+ ch->connected = true;
return 0;
case SRP_PORT_REDIRECT:
@@ -1243,13 +1238,13 @@ static int srp_rport_reconnect(struct srp_rport *rport)
for (j = 0; j < target->queue_size; ++j)
list_add(&ch->tx_ring[j]->list, &ch->free_tx);
}
+
+ target->qp_in_error = false;
+
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
- if (ret || !ch->target) {
- if (i > 1)
- ret = 0;
+ if (ret || !ch->target)
break;
- }
ret = srp_connect_ch(ch, multich);
multich = true;
}
@@ -1929,7 +1924,7 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
return;
}
- if (target->connected && !target->qp_in_error) {
+ if (ch->connected && !target->qp_in_error) {
if (wr_id & LOCAL_INV_WR_ID_MASK) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"LOCAL_INV failed with status %d\n",
@@ -2367,7 +2362,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
- srp_change_conn_state(target, false);
+ ch->connected = false;
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
@@ -2423,7 +2418,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- if (!target->connected || target->qp_in_error)
+ if (!ch->connected || target->qp_in_error)
return -1;
init_completion(&ch->tsk_mgmt_done);
@@ -2797,7 +2792,8 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
- if (!target->connected || target->qp_in_error) {
+ if (srp_connected_ch(target) < target->ch_count ||
+ target->qp_in_error) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "SCSI scan failed - removing SCSI host\n");
srp_queue_remove_work(target);
@@ -3172,11 +3168,11 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_parse_options(buf, target);
if (ret)
- goto err;
+ goto out;
ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
if (ret)
- goto err;
+ goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
@@ -3187,7 +3183,7 @@ static ssize_t srp_create_target(struct device *dev,
be64_to_cpu(target->ioc_guid),
be64_to_cpu(target->initiator_ext));
ret = -EEXIST;
- goto err;
+ goto out;
}
if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
@@ -3208,7 +3204,7 @@ static ssize_t srp_create_target(struct device *dev,
spin_lock_init(&target->lock);
ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
if (ret)
- goto err;
+ goto out;
ret = -ENOMEM;
target->ch_count = max_t(unsigned, num_online_nodes(),
@@ -3219,7 +3215,7 @@ static ssize_t srp_create_target(struct device *dev,
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
- goto err;
+ goto out;
node_idx = 0;
for_each_online_node(node) {
@@ -3315,9 +3311,6 @@ err_disconnect:
}
kfree(target->ch);
-
-err:
- scsi_host_put(target_host);
goto out;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index a611556406ac..e690847a46dd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -170,6 +170,7 @@ struct srp_rdma_ch {
struct completion tsk_mgmt_done;
u8 tsk_mgmt_status;
+ bool connected;
};
/**
@@ -214,7 +215,6 @@ struct srp_target_port {
__be16 pkey;
u32 rq_tmo_jiffies;
- bool connected;
int zero_req_lim;
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 2c2107147319..8f3e243a62bf 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -78,7 +78,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
}
ret = i2c_master_recv(tsdata->client, rdbuf, readsize);
- if (ret != sizeof(rdbuf)) {
+ if (ret != readsize) {
dev_err(&tsdata->client->dev,
"%s: i2c_master_recv failed(), ret=%d\n",
__func__, ret);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e1c7e9e51045..ca9f4edbb940 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1869,9 +1869,15 @@ static void free_pt_##LVL (unsigned long __pt) \
pt = (u64 *)__pt; \
\
for (i = 0; i < 512; ++i) { \
+ /* PTE present? */ \
if (!IOMMU_PTE_PRESENT(pt[i])) \
continue; \
\
+ /* Large PTE? */ \
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
+ PM_PTE_LEVEL(pt[i]) == 7) \
+ continue; \
+ \
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
FN(p); \
} \
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 66a803b9dd3a..65075ef75e2a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1567,7 +1567,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
+ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
dev_notice(smmu->dev, "\taddress translation ops\n");
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 728681debdbe..7fb2a19ac649 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -187,6 +187,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
+#ifdef CONFIG_PM_SLEEP
static int led_suspend(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -206,11 +207,9 @@ static int led_resume(struct device *dev)
return 0;
}
+#endif
-static const struct dev_pm_ops leds_class_dev_pm_ops = {
- .suspend = led_suspend,
- .resume = led_resume,
-};
+static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
static int match_name(struct device *dev, const void *data)
{
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e99ef6a54a2..b2b9f4382d77 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -699,7 +699,7 @@ void mei_host_client_init(struct work_struct *work)
bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
- dev->pg_event == MEI_PG_EVENT_WAIT) {
+ mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n");
return false;
}
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6fb75e62a764..43d7101ff993 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -663,11 +663,27 @@ int mei_me_pg_exit_sync(struct mei_device *dev)
mutex_lock(&dev->device_lock);
reply:
- if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
- ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
+ ret = 0;
else
ret = -ETIME;
+out:
dev->pg_event = MEI_PG_EVENT_IDLE;
hw->pg_state = MEI_PG_OFF;
@@ -675,6 +691,19 @@ reply:
}
/**
+ * mei_me_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_me_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+/**
* mei_me_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -705,6 +734,24 @@ notsupported:
}
/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
+ return;
+
+ dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+ hw->pg_state = MEI_PG_OFF;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -761,6 +808,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
+ mei_me_pg_intr(dev);
+
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
@@ -797,9 +846,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/*
* During PG handshake only allowed write is the replay to the
* PG exit message, so block calling write function
- * if the pg state is not idle
+ * if the pg event is in PG handshake
*/
- if (dev->pg_event == MEI_PG_EVENT_IDLE) {
+ if (dev->pg_event != MEI_PG_EVENT_WAIT &&
+ dev->pg_event != MEI_PG_EVENT_RECEIVED) {
rets = mei_irq_write_handler(dev, &complete_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
@@ -824,6 +874,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
+ .pg_in_transition = mei_me_pg_in_transition,
.pg_is_enabled = mei_me_pg_is_enabled,
.intr_clear = mei_me_intr_clear,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 7abafe7d120d..bae680c648ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/irqreturn.h>
@@ -218,26 +219,25 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev)
*
* Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
- * Return: > 0 if the expected value was received, -ETIME otherwise
+ * Return: 0 if the expected value was received, -ETIME otherwise
*/
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
- int t = 0;
+ ktime_t stop, start;
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev,
- "aliveness settled after %d msecs\n", t);
- return t;
+ dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
}
- mutex_unlock(&dev->device_lock);
- msleep(MSEC_PER_SEC / 5);
- mutex_lock(&dev->device_lock);
- t += MSEC_PER_SEC / 5;
- } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+ usleep_range(20, 50);
+ } while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(dev->dev, "aliveness timed out\n");
@@ -302,6 +302,18 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
}
/**
+ * mei_txe_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_txe_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event == MEI_PG_EVENT_WAIT;
+}
+
+/**
* mei_txe_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -1138,6 +1150,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
+ .pg_in_transition = mei_txe_pg_in_transition,
.pg_is_enabled = mei_txe_pg_is_enabled,
.intr_clear = mei_txe_intr_clear,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f066ecd71939..f84c39ee28a8 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -271,6 +271,7 @@ struct mei_cl {
* @fw_status : get fw status registers
* @pg_state : power gating state of the device
+ * @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
* @intr_clear : clear pending interrupts
@@ -300,6 +301,7 @@ struct mei_hw_ops {
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
enum mei_pg_state (*pg_state)(struct mei_device *dev);
+ bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
void (*intr_clear)(struct mei_device *dev);
@@ -398,11 +400,15 @@ struct mei_cl_device {
* @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
* @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
* @MEI_PG_EVENT_RECEIVED: the driver received pg event
+ * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt
+ * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt
*/
enum mei_pg_event {
MEI_PG_EVENT_IDLE,
MEI_PG_EVENT_WAIT,
MEI_PG_EVENT_RECEIVED,
+ MEI_PG_EVENT_INTR_WAIT,
+ MEI_PG_EVENT_INTR_RECEIVED,
};
/**
@@ -717,6 +723,11 @@ static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
return dev->ops->pg_state(dev);
}
+static inline bool mei_pg_in_transition(struct mei_device *dev)
+{
+ return dev->ops->pg_in_transition(dev);
+}
+
static inline bool mei_pg_is_enabled(struct mei_device *dev)
{
return dev->ops->pg_is_enabled(dev);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c80287a02735..9231cdfe2757 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -848,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
int sg_cnt;
sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
- if (sg_cnt == 0) {
+ if (sg_cnt <= 0) {
/*
* This only happens when someone fed
* us an invalid request.
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index f8a7dd14cee0..70a3db3ab856 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -38,9 +38,9 @@ static void nw_en_write(void)
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 2b0c52870999..df7c6c70757a 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -197,6 +197,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
mutex_lock(&dev->lock);
+ mutex_lock(&mtd_table_mutex);
if (dev->open)
goto unlock;
@@ -220,6 +221,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
unlock:
dev->open++;
+ mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
@@ -230,6 +232,7 @@ error_release:
error_put:
module_put(dev->tr->owner);
kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
@@ -243,6 +246,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
return;
mutex_lock(&dev->lock);
+ mutex_lock(&mtd_table_mutex);
if (--dev->open)
goto unlock;
@@ -256,6 +260,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
__put_mtd_device(dev->mtd);
}
unlock:
+ mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b0f69248cb71..e9b1810d319f 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,6 +440,9 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
+ if (!(skb->tstamp.tv64))
+ __net_timestamp(skb);
+
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
@@ -575,6 +578,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
if (unlikely(!skb))
return NULL;
+ __net_timestamp(skb);
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -603,6 +607,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
if (unlikely(!skb))
return NULL;
+ __net_timestamp(skb);
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index c837eb91d43e..f64f5290d6f8 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,6 +207,7 @@ static void slc_bump(struct slcan *sl)
if (!skb)
return;
+ __net_timestamp(skb);
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 674f367087c5..0ce868de855d 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,6 +78,9 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(skb->tstamp.tv64))
+ __net_timestamp(skb);
+
netif_rx_ni(skb);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d81fc6bd4759..5c92fb71b37e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
int ret;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP;
+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 33501bcddc48..8a97d28f3d65 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9323,7 +9323,8 @@ unload_error:
* function stop ramrod is sent, since as part of this ramrod FW access
* PTP registers.
*/
- bnx2x_stop_ptp(bp);
+ if (bp->flags & PTP_SUPPORTED)
+ bnx2x_stop_ptp(bp);
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ce5f7f9cff06..74d0389bf233 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -310,6 +310,7 @@ struct mvneta_port {
unsigned int link;
unsigned int duplex;
unsigned int speed;
+ unsigned int tx_csum_limit;
int use_inband_status:1;
};
@@ -1013,6 +1014,12 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+ } else {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
mvneta_set_ucast_table(pp, -1);
@@ -2502,8 +2509,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
dev->mtu = mtu;
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ netdev_update_features(dev);
return 0;
+ }
/* The interface is running, so we have to force a
* reallocation of the queues
@@ -2532,9 +2541,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_start_dev(pp);
mvneta_port_up(pp);
+ netdev_update_features(dev);
+
return 0;
}
+static netdev_features_t mvneta_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev_info(dev,
+ "Disable IP checksum for MTU greater than %dB\n",
+ pp->tx_csum_limit);
+ }
+
+ return features;
+}
+
/* Get mac address */
static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
{
@@ -2856,6 +2882,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_set_rx_mode = mvneta_set_rx_mode,
.ndo_set_mac_address = mvneta_set_mac_addr,
.ndo_change_mtu = mvneta_change_mtu,
+ .ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
};
@@ -3101,6 +3128,9 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
+ if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
+ pp->tx_csum_limit = 1600;
+
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -3179,6 +3209,7 @@ static int mvneta_remove(struct platform_device *pdev)
static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
+ { .compatible = "marvell,armada-xp-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cf467a9f6cc7..a5a0b8420d26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1973,10 +1973,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
- if (priv->base_tx_qpn) {
- mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
- priv->base_tx_qpn = 0;
- }
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2a77a6b19121..eab4e080ebd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#endif
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
- int hwtstamp_rx_filter)
+ netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
@@ -731,14 +731,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
- hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
- /* next protocol non IPv4 or IPv6 */
- if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
- != htons(ETH_P_IP) &&
- ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
- != htons(ETH_P_IPV6))
- return -1;
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
}
@@ -901,7 +895,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (ip_summed == CHECKSUM_COMPLETE) {
void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
- if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
+ if (check_csum(cqe, gro_skb, va,
+ dev->features)) {
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
ring->csum_complete--;
@@ -956,7 +951,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (ip_summed == CHECKSUM_COMPLETE) {
- if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
+ if (check_csum(cqe, skb, skb->data, dev->features)) {
ip_summed = CHECKSUM_NONE;
ring->csum_complete--;
ring->csum_none++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7bed3a88579f..c10d98f6ad96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
+ ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
@@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
@@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
+static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
+{
+ return ring->prod - ring->cons > ring->full_size;
+}
+
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, int index,
u8 owner)
@@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
- /*
- * Wakeup Tx queue if this stopped, and at least 1 packet
- * was completed
+ /* Wakeup Tx queue if this stopped, and ring is not full.
*/
- if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+ if (netif_tx_queue_stopped(ring->tx_queue) &&
+ !mlx4_en_is_tx_ring_full(ring)) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
@@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
/* Check available TXBBs And 2K spare for prefetch */
- stop_queue = (int)(ring->prod - ring_cons) >
- ring->size - HEADROOM - MAX_DESC_TXBBS;
+ stop_queue = mlx4_en_is_tx_ring_full(ring);
if (unlikely(stop_queue)) {
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
@@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
smp_rmb();
ring_cons = ACCESS_ONCE(ring->cons);
- if (unlikely(((int)(ring->prod - ring_cons)) <=
- ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 6fce58718837..0d80aed59043 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
mutex_lock(&intf_mutex);
list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
+ list_for_each_entry(priv, &dev_list, dev_list) {
+ if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
+ mlx4_dbg(&priv->dev,
+ "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
+ intf->flags &= ~MLX4_INTFF_BONDING;
+ }
mlx4_add_device(intf, priv);
+ }
mutex_unlock(&intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d021f079f181..909fcf803c54 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
u32 size; /* number of TXBBs */
u32 size_mask;
u16 stride;
+ u32 full_size;
u16 cqn; /* index of port CQ associated with this ring */
u32 buf_size;
__be32 doorbell_qpn;
@@ -579,7 +580,6 @@ struct mlx4_en_priv {
int vids[128];
bool wol;
struct device *ddev;
- int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bdfe51fc3a65..d551df62e61a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -796,10 +796,11 @@ static int genphy_config_advert(struct phy_device *phydev)
if (phydev->supported & (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full)) {
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (adv != oldadv)
- changed = 1;
}
+ if (adv != oldadv)
+ changed = 1;
+
err = phy_write(phydev, MII_CTRL1000, adv);
if (err < 0)
return err;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 968787abf78d..ec383b0f5443 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -681,6 +681,9 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
char *node;
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+ if (vif->credit_watch.node)
+ return -EADDRINUSE;
+
node = kmalloc(maxlen, GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -770,6 +773,7 @@ static void connect(struct backend_info *be)
}
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ xen_unregister_watchers(be->vif);
xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 78a7dcbec7d8..6906a3f61bd8 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
spin_lock(&io_range_lock);
list_for_each_entry(res, &io_range_list, list) {
if (address >= res->start && address < res->start + res->size) {
- addr = res->start - address + offset;
+ addr = address - res->start + offset;
break;
}
offset += res->size;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7a8f1c5e65af..73de4efcbe6e 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -1,6 +1,10 @@
#
# PCI configuration
#
+config PCI_BUS_ADDR_T_64BIT
+ def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ depends on PCI
+
config PCI_MSI
bool "Message Signaled Interrupts (MSI and MSI-X)"
depends on PCI
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 90fa3a78fb7c..6fbd3f2b5992 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -92,11 +92,11 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
static struct pci_bus_region pci_64_bit = {0,
- (dma_addr_t) 0xffffffffffffffffULL};
-static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL,
- (dma_addr_t) 0xffffffffffffffffULL};
+ (pci_bus_addr_t) 0xffffffffffffffffULL};
+static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
+ (pci_bus_addr_t) 0xffffffffffffffffULL};
#endif
/*
@@ -200,7 +200,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t),
void *alignf_data)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
int rc;
if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 0ebf754fc177..6d6868811e56 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -176,20 +176,17 @@ static void pcie_wait_cmd(struct controller *ctrl)
jiffies_to_msecs(jiffies - ctrl->cmd_started));
}
-/**
- * pcie_write_cmd - Issue controller command
- * @ctrl: controller to which the command is issued
- * @cmd: command value written to slot control register
- * @mask: bitmask of slot control register to be modified
- */
-static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
+ u16 mask, bool wait)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
- /* Wait for any previous command that might still be in progress */
+ /*
+ * Always wait for any previous command that might still be in progress
+ */
pcie_wait_cmd(ctrl);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
@@ -201,9 +198,33 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
ctrl->cmd_started = jiffies;
ctrl->slot_ctrl = slot_ctrl;
+ /*
+ * Optionally wait for the hardware to be ready for a new command,
+ * indicating completion of the above issued command.
+ */
+ if (wait)
+ pcie_wait_cmd(ctrl);
+
mutex_unlock(&ctrl->ctrl_lock);
}
+/**
+ * pcie_write_cmd - Issue controller command
+ * @ctrl: controller to which the command is issued
+ * @cmd: command value written to slot control register
+ * @mask: bitmask of slot control register to be modified
+ */
+static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+{
+ pcie_do_write_cmd(ctrl, cmd, mask, true);
+}
+
+/* Same as above without waiting for the hardware to latch */
+static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
+{
+ pcie_do_write_cmd(ctrl, cmd, mask, false);
+}
+
bool pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
@@ -422,7 +443,7 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
default:
return;
}
- pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
+ pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
@@ -434,7 +455,8 @@ void pciehp_green_led_on(struct slot *slot)
if (!PWR_LED(ctrl))
return;
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
+ PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PWR_IND_ON);
@@ -447,7 +469,8 @@ void pciehp_green_led_off(struct slot *slot)
if (!PWR_LED(ctrl))
return;
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
+ PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PWR_IND_OFF);
@@ -460,7 +483,8 @@ void pciehp_green_led_blink(struct slot *slot)
if (!PWR_LED(ctrl))
return;
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
+ PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PWR_IND_BLINK);
@@ -613,7 +637,7 @@ void pcie_enable_notification(struct controller *ctrl)
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
- pcie_write_cmd(ctrl, cmd, mask);
+ pcie_write_cmd_nowait(ctrl, cmd, mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
@@ -664,7 +688,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pci_reset_bridge_secondary_bus(ctrl->pcie->port);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
- pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+ pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index acc4b6ef78c4..c44393f26fd3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4324,6 +4324,17 @@ bool pci_device_is_present(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(pci_device_is_present);
+void pci_ignore_hotplug(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+
+ dev->ignore_hotplug = 1;
+ /* Propagate the "ignore hotplug" setting to the parent bridge. */
+ if (bridge)
+ bridge->ignore_hotplug = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6675a7a1b9fc..c91185721345 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -254,8 +254,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
if (res->flags & IORESOURCE_MEM_64) {
- if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
- sz64 > 0x100000000ULL) {
+ if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
+ && sz64 > 0x100000000ULL) {
res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
res->start = 0;
res->end = 0;
@@ -264,7 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto out;
}
- if ((sizeof(dma_addr_t) < 8) && l) {
+ if ((sizeof(pci_bus_addr_t) < 8) && l) {
/* Above 32-bit boundary; try to reallocate */
res->flags |= IORESOURCE_UNSET;
res->start = 0;
@@ -399,7 +399,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
u64 base64, limit64;
- dma_addr_t base, limit;
+ pci_bus_addr_t base, limit;
struct pci_bus_region region;
struct resource *res;
@@ -426,8 +426,8 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
}
}
- base = (dma_addr_t) base64;
- limit = (dma_addr_t) limit64;
+ base = (pci_bus_addr_t) base64;
+ limit = (pci_bus_addr_t) limit64;
if (base != base64) {
dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
index 615a45a8fe86..582688fe7505 100644
--- a/drivers/pcmcia/topic.h
+++ b/drivers/pcmcia/topic.h
@@ -104,6 +104,9 @@
#define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */
#define TOPIC_EXCA_IFC_33V_ENA 0x01
+#define TOPIC_PCI_CFG_PPBCN 0x3e /* 16-bit */
+#define TOPIC_PCI_CFG_PPBCN_WBEN 0x0400
+
static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket)
static int topic95_override(struct yenta_socket *socket)
{
u8 fctrl;
+ u16 ppbcn;
/* enable 3.3V support for 16bit cards */
fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket)
/* tell yenta to use exca registers to power 16bit cards */
socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
+ /* Disable write buffers to prevent lockups under load with numerous
+ Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
+ net. This is not a power-on default according to the datasheet
+ but some BIOSes seem to set it. */
+ if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
+ && socket->dev->revision <= 7
+ && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
+ ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
+ pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
+ dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
+ }
+
return 0;
}
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 49c1720df59a..515f33882ab8 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,6 +7,7 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
+#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -22,25 +23,41 @@ static const struct pnp_device_id pnp_dev_table[] = {
{"", 0}
};
+#ifdef CONFIG_ACPI
+static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+{
+ u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
+}
+#else
+static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+{
+ struct resource *res;
+
+ res = io ? request_region(start, length, desc) :
+ request_mem_region(start, length, desc);
+ if (res) {
+ res->flags &= ~IORESOURCE_BUSY;
+ return true;
+ }
+ return false;
+}
+#endif
+
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
- struct resource *res;
+ bool reserved;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
- if (port)
- res = request_region(start, end - start + 1, regionid);
- else
- res = request_mem_region(start, end - start + 1, regionid);
- if (res)
- res->flags &= ~IORESOURCE_BUSY;
- else
+ reserved = __reserve_range(start, end - start + 1, !!port, regionid);
+ if (!reserved)
kfree(regionid);
/*
@@ -49,7 +66,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
- res ? "has been" : "could not be");
+ reserved ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 2ed4a4a6b3c5..4bc0c7f459a5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -30,6 +30,8 @@ EXPORT_SYMBOL_GPL(power_supply_notifier);
static struct device_type power_supply_dev_type;
+#define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10)
+
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
struct power_supply *supply)
{
@@ -121,6 +123,30 @@ void power_supply_changed(struct power_supply *psy)
}
EXPORT_SYMBOL_GPL(power_supply_changed);
+/*
+ * Notify that power supply was registered after parent finished the probing.
+ *
+ * Often power supply is registered from driver's probe function. However
+ * calling power_supply_changed() directly from power_supply_register()
+ * would lead to execution of get_property() function provided by the driver
+ * too early - before the probe ends.
+ *
+ * Avoid that by waiting on parent's mutex.
+ */
+static void power_supply_deferred_register_work(struct work_struct *work)
+{
+ struct power_supply *psy = container_of(work, struct power_supply,
+ deferred_register_work.work);
+
+ if (psy->dev.parent)
+ mutex_lock(&psy->dev.parent->mutex);
+
+ power_supply_changed(psy);
+
+ if (psy->dev.parent)
+ mutex_unlock(&psy->dev.parent->mutex);
+}
+
#ifdef CONFIG_OF
#include <linux/of.h>
@@ -645,6 +671,10 @@ __power_supply_register(struct device *parent,
struct power_supply *psy;
int rc;
+ if (!parent)
+ pr_warn("%s: Expected proper parent device for '%s'\n",
+ __func__, desc->name);
+
psy = kzalloc(sizeof(*psy), GFP_KERNEL);
if (!psy)
return ERR_PTR(-ENOMEM);
@@ -659,7 +689,6 @@ __power_supply_register(struct device *parent,
dev->release = power_supply_dev_release;
dev_set_drvdata(dev, psy);
psy->desc = desc;
- atomic_inc(&psy->use_cnt);
if (cfg) {
psy->drv_data = cfg->drv_data;
psy->of_node = cfg->of_node;
@@ -672,6 +701,8 @@ __power_supply_register(struct device *parent,
goto dev_set_name_failed;
INIT_WORK(&psy->changed_work, power_supply_changed_work);
+ INIT_DELAYED_WORK(&psy->deferred_register_work,
+ power_supply_deferred_register_work);
rc = power_supply_check_supplies(psy);
if (rc) {
@@ -700,7 +731,20 @@ __power_supply_register(struct device *parent,
if (rc)
goto create_triggers_failed;
- power_supply_changed(psy);
+ /*
+ * Update use_cnt after any uevents (most notably from device_add()).
+ * We are here still during driver's probe but
+ * the power_supply_uevent() calls back driver's get_property
+ * method so:
+ * 1. Driver did not assigned the returned struct power_supply,
+ * 2. Driver could not finish initialization (anything in its probe
+ * after calling power_supply_register()).
+ */
+ atomic_inc(&psy->use_cnt);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &psy->deferred_register_work,
+ POWER_SUPPLY_DEFERRED_REGISTER_TIME);
return psy;
@@ -720,7 +764,8 @@ dev_set_name_failed:
/**
* power_supply_register() - Register new power supply
- * @parent: Device to be a parent of power supply's device
+ * @parent: Device to be a parent of power supply's device, usually
+ * the device which probe function calls this
* @desc: Description of power supply, must be valid through whole
* lifetime of this power supply
* @cfg: Run-time specific configuration accessed during registering,
@@ -741,7 +786,8 @@ EXPORT_SYMBOL_GPL(power_supply_register);
/**
* power_supply_register() - Register new non-waking-source power supply
- * @parent: Device to be a parent of power supply's device
+ * @parent: Device to be a parent of power supply's device, usually
+ * the device which probe function calls this
* @desc: Description of power supply, must be valid through whole
* lifetime of this power supply
* @cfg: Run-time specific configuration accessed during registering,
@@ -770,7 +816,8 @@ static void devm_power_supply_release(struct device *dev, void *res)
/**
* power_supply_register() - Register managed power supply
- * @parent: Device to be a parent of power supply's device
+ * @parent: Device to be a parent of power supply's device, usually
+ * the device which probe function calls this
* @desc: Description of power supply, must be valid through whole
* lifetime of this power supply
* @cfg: Run-time specific configuration accessed during registering,
@@ -805,7 +852,8 @@ EXPORT_SYMBOL_GPL(devm_power_supply_register);
/**
* power_supply_register() - Register managed non-waking-source power supply
- * @parent: Device to be a parent of power supply's device
+ * @parent: Device to be a parent of power supply's device, usually
+ * the device which probe function calls this
* @desc: Description of power supply, must be valid through whole
* lifetime of this power supply
* @cfg: Run-time specific configuration accessed during registering,
@@ -849,6 +897,7 @@ void power_supply_unregister(struct power_supply *psy)
{
WARN_ON(atomic_dec_return(&psy->use_cnt));
cancel_work_sync(&psy->changed_work);
+ cancel_delayed_work_sync(&psy->deferred_register_work);
sysfs_remove_link(&psy->dev.kobj, "powers");
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 443eaab933fc..8a28116b5805 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -779,7 +779,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
static void print_constraints(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
- char buf[80] = "";
+ char buf[160] = "";
int count = 0;
int ret;
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 15fb1416bfbd..c064e32fb3b9 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -88,7 +88,7 @@ enum max77686_ramp_rate {
};
struct max77686_data {
- u64 gpio_enabled:MAX77686_REGULATORS;
+ DECLARE_BITMAP(gpio_enabled, MAX77686_REGULATORS);
/* Array indexed by regulator id */
unsigned int opmode[MAX77686_REGULATORS];
@@ -121,7 +121,7 @@ static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
case MAX77686_BUCK8:
case MAX77686_BUCK9:
case MAX77686_LDO20 ... MAX77686_LDO22:
- if (max77686->gpio_enabled & (1 << id))
+ if (test_bit(id, max77686->gpio_enabled))
return MAX77686_GPIO_CONTROL;
}
@@ -277,7 +277,7 @@ static int max77686_of_parse_cb(struct device_node *np,
}
if (gpio_is_valid(config->ena_gpio)) {
- max77686->gpio_enabled |= (1 << desc->id);
+ set_bit(desc->id, max77686->gpio_enabled);
return regmap_update_bits(config->regmap, desc->enable_reg,
desc->enable_mask,
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 6f1fa1773e76..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -65,6 +65,7 @@ struct virtio_ccw_device {
bool is_thinint;
bool going_away;
bool device_lost;
+ unsigned int config_ready;
void *airq_info;
};
@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
if (ret)
goto out_free;
- memcpy(vcdev->config, config_area, sizeof(vcdev->config));
- memcpy(buf, &vcdev->config[offset], len);
+ memcpy(vcdev->config, config_area, offset + len);
+ if (buf)
+ memcpy(buf, &vcdev->config[offset], len);
+ if (vcdev->config_ready < offset + len)
+ vcdev->config_ready = offset + len;
out_free:
kfree(config_area);
@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
if (!config_area)
goto out_free;
+ /* Make sure we don't overwrite fields. */
+ if (vcdev->config_ready < offset)
+ virtio_ccw_get_config(vdev, 0, NULL, offset);
memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 47412cf4eaac..73790a1d0969 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -272,7 +272,7 @@
#define IPR_RUNTIME_RESET 0x40000000
#define IPR_IPL_INIT_MIN_STAGE_TIME 5
-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30
#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
#define IPR_IPL_INIT_STAGE_MASK 0xff000000
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index ae45bd99baed..f115f67a6ba5 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -396,6 +396,36 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
+ *
+ * To do: add support for scsi-mq in this function.
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int request_fn_active = 0;
+
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active += q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return request_fn_active;
+}
+
+/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
+static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
+{
+ while (scsi_request_fn_active(shost))
+ msleep(20);
+}
+
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -409,8 +439,10 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt);
- if (i->f->terminate_rport_io)
+ if (i->f->terminate_rport_io) {
+ srp_wait_for_queuecommand(shost);
i->f->terminate_rport_io(rport);
+ }
}
/**
@@ -504,27 +536,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport)
EXPORT_SYMBOL(srp_start_tl_fail_timers);
/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev;
- struct request_queue *q;
- int request_fn_active = 0;
-
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
-
- spin_lock_irq(q->queue_lock);
- request_fn_active += q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
- }
-
- return request_fn_active;
-}
-
-/**
* srp_reconnect_rport() - reconnect to an SRP target port
* @rport: SRP target port.
*
@@ -559,8 +570,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res)
goto out;
scsi_target_block(&shost->shost_gendev);
- while (scsi_request_fn_active(shost))
- msleep(20);
+ srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 861664776672..ff97cabdaa81 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -61,6 +61,12 @@ enum orion_spi_type {
struct orion_spi_dev {
enum orion_spi_type typ;
+ /*
+ * min_divisor and max_hz should be exclusive, the only we can
+ * have both is for managing the armada-370-spi case with old
+ * device tree
+ */
+ unsigned long max_hz;
unsigned int min_divisor;
unsigned int max_divisor;
u32 prescale_mask;
@@ -387,8 +393,9 @@ static const struct orion_spi_dev orion_spi_dev_data = {
static const struct orion_spi_dev armada_spi_dev_data = {
.typ = ARMADA_SPI,
- .min_divisor = 1,
+ .min_divisor = 4,
.max_divisor = 1920,
+ .max_hz = 50000000,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
@@ -454,7 +461,21 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out;
tclk_hz = clk_get_rate(spi->clk);
- master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+
+ /*
+ * With old device tree, armada-370-spi could be used with
+ * Armada XP, however for this SoC the maximum frequency is
+ * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
+ * higher than 200MHz. So, in order to be able to handle both
+ * SoCs, we can take the minimum of 50MHz and tclk/4.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-spi"))
+ master->max_speed_hz = min(devdata->max_hz,
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
+ else
+ master->max_speed_hz =
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 50910d85df5a..d35c1a13217c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -988,9 +988,6 @@ void spi_finalize_current_message(struct spi_master *master)
spin_lock_irqsave(&master->queue_lock, flags);
mesg = master->cur_msg;
- master->cur_msg = NULL;
-
- queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
spi_unmap_msg(master, mesg);
@@ -1003,9 +1000,13 @@ void spi_finalize_current_message(struct spi_master *master)
}
}
- trace_spi_message_done(mesg);
-
+ spin_lock_irqsave(&master->queue_lock, flags);
+ master->cur_msg = NULL;
master->cur_msg_prepared = false;
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ trace_spi_message_done(mesg);
mesg->state = NULL;
if (mesg->complete)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3507f880eb74..45b8c8b338df 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3435,6 +3435,7 @@ done:
static void ffs_closed(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
+ struct f_fs_opts *opts;
ENTER();
ffs_dev_lock();
@@ -3449,8 +3450,13 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
- if (!ffs_obj->opts || ffs_obj->opts->no_configfs
- || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
+ if (ffs_obj->opts)
+ opts = ffs_obj->opts;
+ else
+ goto done;
+
+ if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
+ || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
goto done;
unregister_gadget_item(ffs_obj->opts->
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index f8ac4a452f26..0f64165b0147 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -316,6 +316,18 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
return 0;
}
+static inline void mxsfb_enable_axi_clk(struct mxsfb_info *host)
+{
+ if (host->clk_axi)
+ clk_prepare_enable(host->clk_axi);
+}
+
+static inline void mxsfb_disable_axi_clk(struct mxsfb_info *host)
+{
+ if (host->clk_axi)
+ clk_disable_unprepare(host->clk_axi);
+}
+
static void mxsfb_enable_controller(struct fb_info *fb_info)
{
struct mxsfb_info *host = to_imxfb_host(fb_info);
@@ -333,14 +345,13 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
}
}
- if (host->clk_axi)
- clk_prepare_enable(host->clk_axi);
-
if (host->clk_disp_axi)
clk_prepare_enable(host->clk_disp_axi);
clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
+ mxsfb_enable_axi_clk(host);
+
/* if it was disabled, re-enable the mode again */
writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
@@ -380,11 +391,11 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
reg = readl(host->base + LCDC_VDCTRL4);
writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
+ mxsfb_disable_axi_clk(host);
+
clk_disable_unprepare(host->clk);
if (host->clk_disp_axi)
clk_disable_unprepare(host->clk_disp_axi);
- if (host->clk_axi)
- clk_disable_unprepare(host->clk_axi);
host->enabled = 0;
@@ -421,6 +432,8 @@ static int mxsfb_set_par(struct fb_info *fb_info)
mxsfb_disable_controller(fb_info);
}
+ mxsfb_enable_axi_clk(host);
+
/* clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
@@ -438,6 +451,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
ctrl |= CTRL_SET_WORD_LENGTH(3);
switch (host->ld_intf_width) {
case STMLCDIF_8BIT:
+ mxsfb_disable_axi_clk(host);
dev_err(&host->pdev->dev,
"Unsupported LCD bus width mapping\n");
return -EINVAL;
@@ -451,6 +465,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
break;
default:
+ mxsfb_disable_axi_clk(host);
dev_err(&host->pdev->dev, "Unhandled color depth of %u\n",
fb_info->var.bits_per_pixel);
return -EINVAL;
@@ -504,6 +519,8 @@ static int mxsfb_set_par(struct fb_info *fb_info)
fb_info->fix.line_length * fb_info->var.yoffset,
host->base + host->devdata->next_buf);
+ mxsfb_disable_axi_clk(host);
+
if (reenable)
mxsfb_enable_controller(fb_info);
@@ -582,10 +599,14 @@ static int mxsfb_pan_display(struct fb_var_screeninfo *var,
offset = fb_info->fix.line_length * var->yoffset;
+ mxsfb_enable_axi_clk(host);
+
/* update on next VSYNC */
writel(fb_info->fix.smem_start + offset,
host->base + host->devdata->next_buf);
+ mxsfb_disable_axi_clk(host);
+
return 0;
}
@@ -608,13 +629,17 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
unsigned line_count;
unsigned period;
unsigned long pa, fbsize;
- int bits_per_pixel, ofs;
+ int bits_per_pixel, ofs, ret = 0;
u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
+ mxsfb_enable_axi_clk(host);
+
/* Only restore the mode when the controller is running */
ctrl = readl(host->base + LCDC_CTRL);
- if (!(ctrl & CTRL_RUN))
- return -EINVAL;
+ if (!(ctrl & CTRL_RUN)) {
+ ret = -EINVAL;
+ goto err;
+ }
vdctrl0 = readl(host->base + LCDC_VDCTRL0);
vdctrl2 = readl(host->base + LCDC_VDCTRL2);
@@ -635,7 +660,8 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
break;
case 1:
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
fb_info->var.bits_per_pixel = bits_per_pixel;
@@ -673,10 +699,14 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
pa = readl(host->base + host->devdata->cur_buf);
fbsize = fb_info->fix.line_length * vmode->yres;
- if (pa < fb_info->fix.smem_start)
- return -EINVAL;
- if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
- return -EINVAL;
+ if (pa < fb_info->fix.smem_start) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len) {
+ ret = -EINVAL;
+ goto err;
+ }
ofs = pa - fb_info->fix.smem_start;
if (ofs) {
memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
@@ -689,7 +719,11 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
clk_prepare_enable(host->clk);
host->enabled = 1;
- return 0;
+err:
+ if (ret)
+ mxsfb_disable_axi_clk(host);
+
+ return ret;
}
static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
@@ -915,7 +949,9 @@ static int mxsfb_probe(struct platform_device *pdev)
}
if (!host->enabled) {
+ mxsfb_enable_axi_clk(host);
writel(0, host->base + LCDC_CTRL);
+ mxsfb_disable_axi_clk(host);
mxsfb_set_par(fb_info);
mxsfb_enable_controller(fb_info);
}
@@ -954,11 +990,15 @@ static void mxsfb_shutdown(struct platform_device *pdev)
struct fb_info *fb_info = platform_get_drvdata(pdev);
struct mxsfb_info *host = to_imxfb_host(fb_info);
+ mxsfb_enable_axi_clk(host);
+
/*
* Force stop the LCD controller as keeping it running during reboot
* might interfere with the BootROM's boot mode pads sampling.
*/
writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+
+ mxsfb_disable_axi_clk(host);
}
static struct platform_driver mxsfb_driver = {