From 6904756d54d4f98a9f835ed9cbc3f149ef7ac678 Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Fri, 26 Jul 2013 15:04:00 +0200 Subject: KVM: s390: move kvm_guest_enter,exit closer to sie commit 2b29a9fdcb92bfc6b6f4c412d71505869de61a56 upstream. Any uaccess between guest_enter and guest_exit could trigger a page fault, the page fault handler would handle it as a guest fault and translate a user address as guest address. Signed-off-by: Dominik Dingel Signed-off-by: Christian Borntraeger Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/kvm-s390.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c1c7c683fa2..698fb826e14 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -622,14 +622,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; - preempt_disable(); - kvm_guest_enter(); - preempt_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); trace_kvm_s390_sie_enter(vcpu, atomic_read(&vcpu->arch.sie_block->cpuflags)); + + /* + * As PF_VCPU will be used in fault handler, between guest_enter + * and guest_exit should be no uaccess. + */ + preempt_disable(); + kvm_guest_enter(); + preempt_enable(); rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); + kvm_guest_exit(); + + VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", + vcpu->arch.sie_block->icptcode); + trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); + if (rc) { if (kvm_is_ucontrol(vcpu->kvm)) { rc = SIE_INTERCEPT_UCONTROL; @@ -639,10 +650,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } } - VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", - vcpu->arch.sie_block->icptcode); - trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); - kvm_guest_exit(); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); return rc; -- cgit v1.2.3 From e0f23666f0d04cf80c18fdc7ad762d1a55187d9c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 29 Jul 2013 23:07:43 +0200 Subject: mac80211: don't wait for TX status forever commit cb236d2d713cff83d024a82b836757d9e2b50715 upstream. TX status notification can get lost, or the frames could get stuck on the queue, so don't wait for the callback from the driver forever and instead time out after half a second. Signed-off-by: Johannes Berg Signed-off-by: Luciano Coelho Signed-off-by: Greg Kroah-Hartman --- net/mac80211/mlme.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 55a42f9c4f3..5b4328dcbe4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -31,10 +31,12 @@ #include "led.h" #define IEEE80211_AUTH_TIMEOUT (HZ / 5) +#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) +#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_ASSOC_MAX_TRIES 3 @@ -3470,10 +3472,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; - ifmgd->auth_data->timeout_started = true; + auth_data->timeout_started = true; run_again(ifmgd, auth_data->timeout); } else { - auth_data->timeout_started = false; + auth_data->timeout = + round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); + auth_data->timeout_started = true; + run_again(ifmgd, auth_data->timeout); } return 0; @@ -3510,7 +3515,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) assoc_data->timeout_started = true; run_again(&sdata->u.mgd, assoc_data->timeout); } else { - assoc_data->timeout_started = false; + assoc_data->timeout = + round_jiffies_up(jiffies + + IEEE80211_ASSOC_TIMEOUT_LONG); + assoc_data->timeout_started = true; + run_again(&sdata->u.mgd, assoc_data->timeout); } return 0; -- cgit v1.2.3 From 2ac3b5e4a97cd3c677ea27ce1136c21018e75cbf Mon Sep 17 00:00:00 2001 From: Jeff Wu Date: Wed, 29 May 2013 06:31:30 +0000 Subject: ACPI: add _STA evaluation at do_acpi_find_child() commit c7d9ca90aa9497f0b6e301ec67c52dd4b57a7852 upstream. Once do_acpi_find_child() has found the first matching handle, it makes the acpi_get_child() loop stop and return that handle. On some platforms, though, there are multiple devices with the same value of "_ADR" in the same namespace scope, and if one of them is enabled, the others will be disabled. For example: Address : 0x1FFFF ; path : SB_PCI0.SATA.DEV0 Address : 0x1FFFF ; path : SB_PCI0.SATA.DEV1 Address : 0x1FFFF ; path : SB_PCI0.SATA.DEV2 If DEV0 and DEV1 are disabled and DEV2 is enabled, the handle of DEV2 should be returned, but actually the function always returns the handle of DEV0. To address that issue, make do_acpi_find_child() evaluate _STA to check the device status. If a matching device object exists, but is disabled, acpi_get_child() will continue to walk the namespace in the hope of finding an enabled one. If one is found, its handle will be returned, but otherwise the function will return the handle of the disabled object found before (in case it is enabled going forward). [rjw: Changelog] Signed-off-by: Jeff Wu Signed-off-by: Rafael J. Wysocki Cc: Peter Wu Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/glue.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 40a84cc6740..51ca764cffe 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -81,13 +81,15 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, void *addr_p, void **ret_p) { - unsigned long long addr; + unsigned long long addr, sta; acpi_status status; status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { *ret_p = handle; - return AE_CTRL_TERMINATE; + status = acpi_bus_get_status_handle(handle, &sta); + if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) + return AE_CTRL_TERMINATE; } return AE_OK; } -- cgit v1.2.3 From 6e4fdb803584635587bd4dc00d1f8c0883a02d3b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Aug 2013 22:55:00 +0200 Subject: ACPI: Try harder to resolve _ADR collisions for bridges commit 60f75b8e97daf4a39790a20d962cb861b9220af5 upstream. In theory, under a given ACPI namespace node there should be only one child device object with _ADR whose value matches a given bus address exactly. In practice, however, there are systems in which multiple child device objects under a given parent have _ADR matching exactly the same address. In those cases we use _STA to determine which of the multiple matching devices is enabled, since some systems are known to indicate which ACPI device object to associate with the given physical (usually PCI) device this way. Unfortunately, as it turns out, there are systems in which many device objects under the same parent have _ADR matching exactly the same bus address and none of them has _STA, in which case they all should be regarded as enabled according to the spec. Still, if those device objects are supposed to represent bridges (e.g. this is the case for device objects corresponding to PCIe ports), we can try harder and skip the ones that have no child device objects in the ACPI namespace. With luck, we can avoid using device objects that we are not expected to use this way. Although this only works for bridges whose children also have ACPI namespace representation, it is sufficient to address graphics adapter detection issues on some systems, so rework the code finding a matching device ACPI handle for a given bus address to implement this idea. Introduce a new function, acpi_find_child(), taking three arguments: the ACPI handle of the device's parent, a bus address suitable for the device's bus type and a bool indicating if the device is a bridge and make it work as outlined above. Reimplement the function currently used for this purpose, acpi_get_child(), as a call to acpi_find_child() with the last argument set to 'false' and make the PCI subsystem use acpi_find_child() with the bridge information passed as the last argument to it. [Lan Tianyu notices that it is not sufficient to use pci_is_bridge() for that, because the device's subordinate pointer hasn't been set yet at this point, so use hdr_type instead.] This change fixes a regression introduced inadvertently by commit 33f767d (ACPI: Rework acpi_get_child() to be more efficient) which overlooked the fact that for acpi_walk_namespace() "post-order" means "after all children have been visited" rather than "on the way back", so for device objects without children and for namespace walks of depth 1, as in the acpi_get_child() case, the "post-order" callbacks ordering is actually the same as the ordering of "pre-order" ones. Since that commit changed the namespace walk in acpi_get_child() to terminate after finding the first matching object instead of going through all of them and returning the last one, it effectively changed the result returned by that function in some rare cases and that led to problems (the switch from a "pre-order" to a "post-order" callback was supposed to prevent that from happening, but it was ineffective). As it turns out, the systems where the change made by commit 33f767d actually matters are those where there are multiple ACPI device objects representing the same PCIe port (which effectively is a bridge). Moreover, only one of them, and the one we are expected to use, has child device objects in the ACPI namespace, so the regression can be addressed as described above. References: https://bugzilla.kernel.org/show_bug.cgi?id=60561 Reported-by: Peter Wu Tested-by: Vladimir Lalov Signed-off-by: Rafael J. Wysocki Acked-by: Bjorn Helgaas Cc: Peter Wu Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/glue.c | 99 ++++++++++++++++++++++++++++++++++++++++--------- drivers/pci/pci-acpi.c | 15 ++++++-- include/acpi/acpi_bus.h | 6 ++- 3 files changed, 98 insertions(+), 22 deletions(-) diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 51ca764cffe..238412077c8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -78,34 +78,99 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) return ret; } -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, - void *addr_p, void **ret_p) +static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, + void *not_used, void **ret_p) { - unsigned long long addr, sta; - acpi_status status; + struct acpi_device *adev = NULL; - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { + acpi_bus_get_device(handle, &adev); + if (adev) { *ret_p = handle; - status = acpi_bus_get_status_handle(handle, &sta); - if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) - return AE_CTRL_TERMINATE; + return AE_CTRL_TERMINATE; } return AE_OK; } -acpi_handle acpi_get_child(acpi_handle parent, u64 address) +static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) +{ + unsigned long long sta; + acpi_status status; + + status = acpi_bus_get_status_handle(handle, &sta); + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) + return false; + + if (is_bridge) { + void *test = NULL; + + /* Check if this object has at least one child device. */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_dev_present, NULL, NULL, &test); + return !!test; + } + return true; +} + +struct find_child_context { + u64 addr; + bool is_bridge; + acpi_handle ret; + bool ret_checked; +}; + +static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, + void *data, void **not_used) { - void *ret = NULL; + struct find_child_context *context = data; + unsigned long long addr; + acpi_status status; + + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); + if (ACPI_FAILURE(status) || addr != context->addr) + return AE_OK; - if (!parent) - return NULL; + if (!context->ret) { + /* This is the first matching object. Save its handle. */ + context->ret = handle; + return AE_OK; + } + /* + * There is more than one matching object with the same _ADR value. + * That really is unexpected, so we are kind of beyond the scope of the + * spec here. We have to choose which one to return, though. + * + * First, check if the previously found object is good enough and return + * its handle if so. Second, check the same for the object that we've + * just found. + */ + if (!context->ret_checked) { + if (acpi_extra_checks_passed(context->ret, context->is_bridge)) + return AE_CTRL_TERMINATE; + else + context->ret_checked = true; + } + if (acpi_extra_checks_passed(handle, context->is_bridge)) { + context->ret = handle; + return AE_CTRL_TERMINATE; + } + return AE_OK; +} - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, - do_acpi_find_child, &address, &ret); - return (acpi_handle)ret; +acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge) +{ + if (parent) { + struct find_child_context context = { + .addr = addr, + .is_bridge = is_bridge, + }; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child, + NULL, &context, NULL); + return context.ret; + } + return NULL; } -EXPORT_SYMBOL(acpi_get_child); +EXPORT_SYMBOL_GPL(acpi_find_child); static int acpi_bind_one(struct device *dev, acpi_handle handle) { diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index e4b1fb2c0f5..336b3f94a19 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { - struct pci_dev * pci_dev; - u64 addr; + struct pci_dev *pci_dev = to_pci_dev(dev); + bool is_bridge; + u64 addr; - pci_dev = to_pci_dev(dev); + /* + * pci_is_bridge() is not suitable here, because pci_dev->subordinate + * is set only after acpi_pci_find_device() has been called for the + * given device. + */ + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); if (!*handle) return -ENODEV; return 0; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c13c919ab99..f45b2a7800c 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -455,7 +455,11 @@ struct acpi_pci_root { }; /* helper */ -acpi_handle acpi_get_child(acpi_handle, u64); +acpi_handle acpi_find_child(acpi_handle, u64, bool); +static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr) +{ + return acpi_find_child(handle, addr, false); +} int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) -- cgit v1.2.3 From 9b2c750d8e81da33cdf6e16db911faa2008652cd Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 20 Aug 2013 13:38:10 +0530 Subject: ARC: gdbserver breakage in Big-Endian configuration #1 [Based on mainline commit 502a0c775c7f0a: "ARC: pt_regs update #5"] gdbserver needs @stop_pc, served by ptrace, but fetched from pt_regs differently, based on in_brkpt_traps(), which in turn relies on additional machine state in pt_regs->event bitfield. unsigned long orig_r8:16, event:16; For big endian config, this macro was returning false, despite being in breakpoint Trap exception, causing wrong @stop_pc to be returned to gdb. Issue #1: In BE, @event above is at offset 2 in word, while a STW insn at offset 0 was used to update it. Resort to using ST insn which updates the half-word at right location. Issue #2: The union involving bitfields causes all the members to be laid out at offset 0. So with fix #1 above, ASM was now updating at offset 2, "C" code was still referencing at offset 0. Fixed by wrapping bitfield in a struct. Reported-by: Noam Camus Tested-by: Anton Kolesov Signed-off-by: Vineet Gupta Signed-off-by: Greg Kroah-Hartman --- arch/arc/include/asm/ptrace.h | 2 ++ arch/arc/kernel/entry.S | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 6179de7e07c..2046a89a57c 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -52,12 +52,14 @@ struct pt_regs { /*to distinguish bet excp, syscall, irq */ union { + struct { #ifdef CONFIG_CPU_BIG_ENDIAN /* so that assembly code is same for LE/BE */ unsigned long orig_r8:16, event:16; #else unsigned long event:16, orig_r8:16; #endif + }; long orig_r8_word; }; }; diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 0c6d664d4a8..6dbe359c760 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -498,7 +498,7 @@ tracesys_exit: trap_with_param: ; stop_pc info by gdb needs this info - stw orig_r8_IS_BRKPT, [sp, PT_orig_r8] + st orig_r8_IS_BRKPT, [sp, PT_orig_r8] mov r0, r12 lr r1, [efa] @@ -723,7 +723,7 @@ not_exception: ; things to what they were, before returning from L2 context ;---------------------------------------------------------------- - ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is + ld r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) -- cgit v1.2.3 From 16a06df257641d466895454f0320b8ccb1583b21 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 20 Aug 2013 13:38:11 +0530 Subject: ARC: gdbserver breakage in Big-Endian configuration #2 [Based on mainline commit 352c1d95e3220d0: "ARC: stop using pt_regs->orig_r8"] Stop using orig_r8 as it could get clobbered by ST in trap_with_param, and further it is semantically not needed either. Signed-off-by: Vineet Gupta Signed-off-by: Greg Kroah-Hartman --- arch/arc/include/asm/syscall.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h index 33ab3048e9b..29de0980430 100644 --- a/arch/arc/include/asm/syscall.h +++ b/arch/arc/include/asm/syscall.h @@ -18,7 +18,7 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { if (user_mode(regs) && in_syscall(regs)) - return regs->orig_r8; + return regs->r8; else return -1; } @@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */ - regs->r8 = regs->orig_r8; + regs->r0 = regs->orig_r0; } static inline long -- cgit v1.2.3 From dfc855921b899a61b9cef8e48d74441a2876e75f Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Thu, 18 Apr 2013 10:13:21 +0200 Subject: ARM: at91: at91sam9x5 RTC is not compatible with at91rm9200 one commit 23fb05c688a8dcb0cf6a4d8d819cffeca82e5c54 upstream. Due to a bug with RTC IMR, we cannot consider at91sam9x5 RTC compatible with the previous one. Modify DT compatibility string, even if the driver is not yet modified to take it into account. Signed-off-by: Nicolas Ferre Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/at91sam9x5.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 1145ac330fb..b5833d1f7cf 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -643,7 +643,7 @@ }; rtc@fffffeb0 { - compatible = "atmel,at91rm9200-rtc"; + compatible = "atmel,at91sam9x5-rtc"; reg = <0xfffffeb0 0x40>; interrupts = <1 4 7>; status = "disabled"; -- cgit v1.2.3 From da989ee1c7702050741a551df6866cc1588f277b Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 3 May 2013 18:29:30 +0200 Subject: NFC: llcp: Fix non blocking sockets connections commit b4011239a08e7e6c2c6e970dfa9e8ecb73139261 upstream. Without the new LLCP_CONNECTING state, non blocking sockets will be woken up with a POLLHUP right after calling connect() because their state is stuck at LLCP_CLOSED. That prevents userspace from implementing any proper non blocking socket based NFC p2p client. Signed-off-by: Samuel Ortiz Signed-off-by: Greg Kroah-Hartman --- net/nfc/llcp.h | 1 + net/nfc/llcp_sock.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h index ff8c434f7df..f924dd209b3 100644 --- a/net/nfc/llcp.h +++ b/net/nfc/llcp.h @@ -19,6 +19,7 @@ enum llcp_state { LLCP_CONNECTED = 1, /* wait_for_packet() wants that */ + LLCP_CONNECTING, LLCP_CLOSED, LLCP_BOUND, LLCP_LISTEN, diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 380253eccb7..7522c370872 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -571,7 +571,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; - if (sock_writeable(sk)) + if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); @@ -722,14 +722,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, if (ret) goto sock_unlink; + sk->sk_state = LLCP_CONNECTING; + ret = sock_wait_state(sk, LLCP_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); - if (ret) + if (ret && ret != -EINPROGRESS) goto sock_unlink; release_sock(sk); - return 0; + return ret; sock_unlink: nfc_llcp_put_ssap(local, llcp_sock->ssap); -- cgit v1.2.3 From 446b977ec00db5f4bfb4187bca300dd38bfe7e7d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 26 May 2013 20:47:53 +0300 Subject: iwlwifi: mvm: correctly configure MCAST in AP mode commit 86a91ec757338edbce51de5dabd7afb0366f485c upstream. The AP mode needs to use the MCAST fifo for the MCAST frames sent after the DTIM. This fifo needs to be configured with the same parameters as the VOICE FIFO. A separate SCD queue is mapped to this fifo - the cab_queue (cab stands for Content After Beacon). This queue isn't connected to any station, but rather to the MAC context. This queue should (and is already) be set as the MCAST queue - this is part of the of MAC context command. Signed-off-by: Emmanuel Grumbach Reviewed-by: Ilan Peer Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h | 8 +++++++- drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | 6 +++++- drivers/net/wireless/iwlwifi/mvm/mvm.h | 1 + 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h index d68640ea41d..98b1feb43d3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h @@ -71,7 +71,13 @@ #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define AC_NUM 4 /* Number of access categories */ +enum iwl_ac { + AC_BK, + AC_BE, + AC_VI, + AC_VO, + AC_NUM, +}; /** * enum iwl_mac_protection_flags - MAC context flags diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index b2cc3d98e0f..3b69045e3c8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -362,7 +362,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) break; case NL80211_IFTYPE_AP: iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue, - IWL_MVM_TX_FIFO_VO); + IWL_MVM_TX_FIFO_MCAST); /* fall through */ default: for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) @@ -550,6 +550,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]); } + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ + if (vif->type == NL80211_IFTYPE_AP) + cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); + if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 9f46b23801b..80862319c42 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -88,6 +88,7 @@ enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, }; extern struct ieee80211_ops iwl_mvm_hw_ops; -- cgit v1.2.3 From 21e0f1b26a6ef640c02f09c4390057ee726de389 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 2 Jun 2013 19:54:01 +0300 Subject: iwlwifi: mvm: fix MCAST in AP mode commit 9116a3683902583a302ac5dcb283416d504d9bb4 upstream. In multicast, there is no retries nor RTS since there is no specific recipient that can ACK or send CTS. This means that we must not use the rate scale table for multicast frames. This true for any frame that doesn't have a valid ieee80211_sta pointer. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 48c1891e3df..b9ba4e71ea4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, * table is controlled by LINK_QUALITY commands */ - if (ieee80211_is_data(fc)) { + if (ieee80211_is_data(fc) && sta) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; -- cgit v1.2.3 From 06a80c46e85d004a18a51529937049c533e492cd Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 9 Jun 2013 12:59:24 +0300 Subject: iwlwifi: mvm: properly tell the fw that a STA is awake commit 5af01772ee1d6e96849adf728ff837bd71b119c0 upstream. The firmware API wasn't being used correctly, fix that. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/sta.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 736b50bc962..35c8ed5e58a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1296,17 +1296,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, - .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, - .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE), + .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; - /* - * Same modify mask for sleep_tx_count and sleep_state_flags but this - * should be fine since if we set the STA as "awake", then - * sleep_tx_count is not relevant. - */ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); -- cgit v1.2.3 From 0710345e2d2eca0c98b75dfe7c1a8a50c04d4829 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 9 Jun 2013 13:00:12 +0300 Subject: iwlwifi: mvm: don't set the MCAST queue in STA's queue list commit 837fb69f10588caafc883c4473a864660e1403ce upstream. The MCAST queue should be enabled after DTIM only. According to fw API, the MCAST must not be attached to any station, but should appear in the mcast_qid of the AP's mac context only. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | 5 +---- drivers/net/wireless/iwlwifi/mvm/sta.c | 3 --- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 3b69045e3c8..d8e858cc8dd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 qmask, ac; + u32 qmask = 0, ac; if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ? - BIT(vif->cab_queue) : 0; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) qmask |= BIT(vif->hw_queue[ac]); diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 35c8ed5e58a..68f0bbe1f38 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -226,9 +226,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue); - /* for HW restart - need to reset the seq_number etc... */ memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); -- cgit v1.2.3 From 07e915c91b08d06db1e38241bc517e9b2e4fa3bd Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 13 Jun 2013 10:07:47 +0300 Subject: iwlwifi: mvm: take the seqno from packet if transmit failed commit ebea2f32e814445f94f9e087b646f1cf4d55fa5a upstream. The fw is unreliable in all the cases in which the packet wasn't sent. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/tx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index b9ba4e71ea4..a2e6112e91e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -610,8 +610,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, !(info->flags & IEEE80211_TX_STAT_ACK)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ - if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { + /* W/A FW bug: seq_ctl is wrong when the status isn't success */ + if (status != TX_STATUS_SUCCESS) { struct ieee80211_hdr *hdr = (void *)skb->data; seq_ctl = le16_to_cpu(hdr->seq_ctrl); } -- cgit v1.2.3 From 8351cafff00be058ce4db3753010632e29d104f3 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 24 Jun 2013 15:44:03 +0300 Subject: iwlwifi: mvm: unregister leds when registration failed commit b7327d89ae694a89f9934d428bde520b77b3131c upstream. This was missing and prevented any further attempts to load the module. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index b7e95b0a42b..f7545e06ce2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -243,7 +243,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (ret) return ret; - return ieee80211_register_hw(mvm->hw); + ret = ieee80211_register_hw(mvm->hw); + if (ret) + iwl_mvm_leds_exit(mvm); + + return ret; } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, -- cgit v1.2.3 From a76139c02f366c601a6895c1a1c1c6d26ec7d469 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 13 Jun 2013 16:06:08 +0200 Subject: iwlwifi: bump required firmware API version for 3160/7260 commit a2d0909a687b4d250cc2b7481072e361678745ba upstream. As the firmware API has changed significantly and we don't have support code for the old APIs, bump the version to be able to release the version 7 API firmware. Unfortunately this means that the driver in 3.9 and 3.10 can't work, but that's still better than crashing the device/driver there. Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/iwl-7000.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 50263e87fe1..dc94d44d95c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,16 +67,16 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 6 -#define IWL3160_UCODE_API_MAX 6 +#define IWL7260_UCODE_API_MAX 7 +#define IWL3160_UCODE_API_MAX 7 /* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 6 -#define IWL3160_UCODE_API_OK 6 +#define IWL7260_UCODE_API_OK 7 +#define IWL3160_UCODE_API_OK 7 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 6 -#define IWL3160_UCODE_API_MIN 6 +#define IWL7260_UCODE_API_MIN 7 +#define IWL3160_UCODE_API_MIN 7 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d -- cgit v1.2.3 From b460440f81f11175bdc657ee28a2421f8a02d209 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 May 2013 11:44:49 +0200 Subject: iwlwifi: mvm: adjust firmware D3 configuration API commit dfcb4c3aacedee6838e436fb575b31e138505203 upstream. The D3 firmware API changed to include a new field, adjust the driver to it to avoid getting an NMI when configuring. Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 51e015d1dfb..6f8b2c16ae1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags { * struct iwl_d3_manager_config - D3 manager configuration command * @min_sleep_time: minimum sleep time (in usec) * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * @wakeup_host_timer: force wakeup after this many seconds * * The structure is used for the D3_CONFIG_CMD command. */ struct iwl_d3_manager_config { __le32 min_sleep_time; __le32 wakeup_flags; -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */ + __le32 wakeup_host_timer; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ -- cgit v1.2.3 From b0cfaffa7e183719436a7a2b1b814afb5748c05c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 7 Jun 2013 15:07:48 +0900 Subject: tracing: Do not call kmem_cache_free() on allocation failure commit aaf6ac0f0871cb7fc0f28f3a00edf329bc7adc29 upstream. There's no point calling it when _alloc() failed. Link: http://lkml.kernel.org/r/1370585268-29169-1-git-send-email-namhyung@kernel.org Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 69532630a2d..5f9a002d17d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -114,7 +114,7 @@ static int __trace_define_field(struct list_head *head, const char *type, field = kmem_cache_alloc(field_cachep, GFP_TRACE); if (!field) - goto err; + return -ENOMEM; field->name = name; field->type = type; @@ -131,11 +131,6 @@ static int __trace_define_field(struct list_head *head, const char *type, list_add(&field->link, head); return 0; - -err: - kmem_cache_free(field_cachep, field); - - return -ENOMEM; } int trace_define_field(struct ftrace_event_call *call, const char *type, -- cgit v1.2.3 From e899e7e4f83ecd606e85d355410d724ec76542cb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 9 Jul 2013 18:35:26 +0900 Subject: tracing/kprobe: Wait for disabling all running kprobe handlers commit a232e270dcb55a70ad3241bc6fc160fd9b5c9e6c upstream. Wait for disabling all running kprobe handlers when a kprobe event is disabled, since the caller, trace_remove_event_call() supposes that a removing event is disabled completely by disabling the event. With this change, ftrace can ensure that there is no running event handlers after disabling it. Link: http://lkml.kernel.org/r/20130709093526.20138.93100.stgit@mhiramat-M0-7522 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_kprobe.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9f46e98ba8f..c209c6ec34c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -281,6 +281,8 @@ trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) static int disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) { + struct ftrace_event_file **old = NULL; + int wait = 0; int ret = 0; mutex_lock(&probe_enable_lock); @@ -314,10 +316,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) } rcu_assign_pointer(tp->files, new); - - /* Make sure the probe is done with old files */ - synchronize_sched(); - kfree(old); + wait = 1; } else tp->flags &= ~TP_FLAG_PROFILE; @@ -326,11 +325,25 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); + wait = 1; } out_unlock: mutex_unlock(&probe_enable_lock); + if (wait) { + /* + * Synchronize with kprobe_trace_func/kretprobe_trace_func + * to ensure disabled (all running handlers are finished). + * This is not only for kfree(), but also the caller, + * trace_remove_event_call() supposes it for releasing + * event_call related objects, which will be accessed in + * the kprobe_trace_func/kretprobe_trace_func. + */ + synchronize_sched(); + kfree(old); /* Ignored if link == NULL */ + } + return ret; } -- cgit v1.2.3 From e2d4dbe02175e54616fe52c4e9c367199f8ecdaa Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:25:54 +0200 Subject: tracing: Introduce trace_create_cpu_file() and tracing_get_cpu() commit 649e9c70da6bfbeb563193a35d3424a5aa7c0d38 upstream. Every "file_operations" used by tracing_init_debugfs_percpu is buggy. f_op->open/etc does: 1. struct trace_cpu *tc = inode->i_private; struct trace_array *tr = tc->tr; 2. trace_array_get(tr) or fail; 3. do_something(tc); But tc (and tr) can be already freed before trace_array_get() is called. And it doesn't matter whether this file is per-cpu or it was created by init_tracer_debugfs(), free_percpu() or kfree() are equally bad. Note that even 1. is not safe, the freed memory can be unmapped. But even if it was safe trace_array_get() can wrongly succeed if we also race with the next new_instance_create() which can re-allocate the same tr, or tc was overwritten and ->tr points to the valid tr. In this case 3. uses the freed/reused memory. Add the new trivial helper, trace_create_cpu_file() which simply calls trace_create_file() and encodes "cpu" in "struct inode". Another helper, tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS. The patch abuses ->i_cdev to encode the number, it is never used unless the file is S_ISCHR(). But we could use something else, say, i_bytes or even ->d_fsdata. In any case this hack is hidden inside these 2 helpers, it would be trivial to change them if needed. This patch only changes tracing_init_debugfs_percpu() to use the new trace_create_cpu_file(), the next patches will change file_operations. Note: tracing_get_cpu(inode) is always safe but you can't trust the result unless trace_array_get() was called, without trace_types_lock which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS. Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com Cc: Al Viro Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 50 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 06a5bcef373..f064dfb973f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2834,6 +2834,17 @@ static int s_show(struct seq_file *m, void *v) return 0; } +/* + * Should be used after trace_array_get(), trace_types_lock + * ensures that i_cdev was already initialized. + */ +static inline int tracing_get_cpu(struct inode *inode) +{ + if (inode->i_cdev) /* See trace_create_cpu_file() */ + return (long)inode->i_cdev - 1; + return RING_BUFFER_ALL_CPUS; +} + static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, @@ -5521,6 +5532,17 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) return tr->percpu_dir; } +static struct dentry * +trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, + void *data, long cpu, const struct file_operations *fops) +{ + struct dentry *ret = trace_create_file(name, mode, parent, data, fops); + + if (ret) /* See tracing_get_cpu() */ + ret->d_inode->i_cdev = (void *)(cpu + 1); + return ret; +} + static void tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) { @@ -5540,28 +5562,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) } /* per cpu trace_pipe */ - trace_create_file("trace_pipe", 0444, d_cpu, - (void *)&data->trace_cpu, &tracing_pipe_fops); + trace_create_cpu_file("trace_pipe", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_pipe_fops); /* per cpu trace */ - trace_create_file("trace", 0644, d_cpu, - (void *)&data->trace_cpu, &tracing_fops); + trace_create_cpu_file("trace", 0644, d_cpu, + &data->trace_cpu, cpu, &tracing_fops); - trace_create_file("trace_pipe_raw", 0444, d_cpu, - (void *)&data->trace_cpu, &tracing_buffers_fops); + trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_buffers_fops); - trace_create_file("stats", 0444, d_cpu, - (void *)&data->trace_cpu, &tracing_stats_fops); + trace_create_cpu_file("stats", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_stats_fops); - trace_create_file("buffer_size_kb", 0444, d_cpu, - (void *)&data->trace_cpu, &tracing_entries_fops); + trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT - trace_create_file("snapshot", 0644, d_cpu, - (void *)&data->trace_cpu, &snapshot_fops); + trace_create_cpu_file("snapshot", 0644, d_cpu, + &data->trace_cpu, cpu, &snapshot_fops); - trace_create_file("snapshot_raw", 0444, d_cpu, - (void *)&data->trace_cpu, &snapshot_raw_fops); + trace_create_cpu_file("snapshot_raw", 0444, d_cpu, + &data->trace_cpu, cpu, &snapshot_raw_fops); #endif } -- cgit v1.2.3 From 23b625fa111e7358ce84672c4444c068b2b1e204 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:25:57 +0200 Subject: tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu() commit 15544209cb0b5312e5220a9337a1fe61d1a1f2d9 upstream. tracing_open_pipe() is racy, the memory inode->i_private points to can be already freed. Change debugfs_create_file("trace_pipe", data) callers to to pass "data = tr", tracing_open_pipe() can use tracing_get_cpu(). Link: http://lkml.kernel.org/r/20130723152557.GA23717@redhat.com Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f064dfb973f..8e8bb0e2548 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3950,8 +3950,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, static int tracing_open_pipe(struct inode *inode, struct file *filp) { - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; + struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; @@ -3997,9 +3996,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; - iter->cpu_file = tc->cpu; - iter->tr = tc->tr; - iter->trace_buffer = &tc->tr->trace_buffer; + iter->tr = tr; + iter->trace_buffer = &tr->trace_buffer; + iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; @@ -4022,8 +4021,7 @@ fail: static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; + struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); @@ -5563,7 +5561,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, - &data->trace_cpu, cpu, &tracing_pipe_fops); + tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, @@ -6149,7 +6147,7 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) (void *)&tr->trace_cpu, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, - (void *)&tr->trace_cpu, &tracing_pipe_fops); + tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, (void *)&tr->trace_cpu, &tracing_entries_fops); -- cgit v1.2.3 From 92b6279e284257fb7908bf4d24956f5af13aaa2d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:26:00 +0200 Subject: tracing: Change tracing_buffers_fops to rely on tracing_get_cpu() commit 46ef2be0d1d5ccea0c41bb606143586daadd537c upstream. tracing_buffers_open() is racy, the memory inode->i_private points to can be already freed. Change debugfs_create_file("trace_pipe_raw", data) caller to pass "data = tr", tracing_buffers_open() can use tracing_get_cpu(). Change debugfs_create_file("snapshot_raw_fops", data) caller too, this file uses tracing_buffers_open/release. Link: http://lkml.kernel.org/r/20130723152600.GA23720@redhat.com Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8e8bb0e2548..7dcd67332f5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4941,8 +4941,7 @@ static const struct file_operations snapshot_raw_fops = { static int tracing_buffers_open(struct inode *inode, struct file *filp) { - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; + struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; @@ -4961,7 +4960,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) mutex_lock(&trace_types_lock); info->iter.tr = tr; - info->iter.cpu_file = tc->cpu; + info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; @@ -5568,7 +5567,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) &data->trace_cpu, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, - &data->trace_cpu, cpu, &tracing_buffers_fops); + tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, &data->trace_cpu, cpu, &tracing_stats_fops); @@ -5581,7 +5580,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) &data->trace_cpu, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, - &data->trace_cpu, cpu, &snapshot_raw_fops); + tr, cpu, &snapshot_raw_fops); #endif } -- cgit v1.2.3 From 6dafb0f13798c998ec344bf910b2d89c51961481 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:26:03 +0200 Subject: tracing: Change tracing_stats_fops to rely on tracing_get_cpu() commit 4d3435b8a4c3357695e09c5e7a3bf73a19fca5b0 upstream. tracing_open_generic_tc() is racy, the memory inode->i_private points to can be already freed. 1. Change one of its users, tracing_stats_fops, to use tracing_*_generic_tr() instead. 2. Change trace_create_cpu_file("stats", data) to pass "data = tr". 3. Change tracing_stats_read() to use tracing_get_cpu(). Link: http://lkml.kernel.org/r/20130723152603.GA23727@redhat.com Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7dcd67332f5..801d72b84a2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2973,7 +2973,6 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp) filp->private_data = inode->i_private; return 0; - } int tracing_open_generic_tc(struct inode *inode, struct file *filp) @@ -5277,14 +5276,14 @@ static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { - struct trace_cpu *tc = filp->private_data; - struct trace_array *tr = tc->tr; + struct inode *inode = file_inode(filp); + struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; + int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; - int cpu = tc->cpu; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) @@ -5337,10 +5336,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf, } static const struct file_operations tracing_stats_fops = { - .open = tracing_open_generic_tc, + .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, - .release = tracing_release_generic_tc, + .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -5570,7 +5569,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, - &data->trace_cpu, cpu, &tracing_stats_fops); + tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, &data->trace_cpu, cpu, &tracing_entries_fops); -- cgit v1.2.3 From e5dd03c411713c066b94e1d448359e5216115c1c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:26:06 +0200 Subject: tracing: Change tracing_entries_fops to rely on tracing_get_cpu() commit 0bc392ee46d0fd8e6b678457ef71f074f19a03c5 upstream. tracing_open_generic_tc() is racy, the memory inode->i_private points to can be already freed. 1. Change its last user, tracing_entries_fops, to use tracing_*_generic_tr() instead. 2. Change debugfs_create_file("buffer_size_kb", data) callers to pass "data = tr". 3. Change tracing_entries_read() and tracing_entries_write() to use tracing_get_cpu(). 4. Kill the no longer used tracing_open_generic_tc() and tracing_release_generic_tc(). Link: http://lkml.kernel.org/r/20130723152606.GA23730@redhat.com Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 49 ++++++++++++------------------------------------- 1 file changed, 12 insertions(+), 37 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 801d72b84a2..83fba3c576e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2975,23 +2975,6 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp) return 0; } -int tracing_open_generic_tc(struct inode *inode, struct file *filp) -{ - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; - - if (tracing_disabled) - return -ENODEV; - - if (trace_array_get(tr) < 0) - return -ENODEV; - - filp->private_data = inode->i_private; - - return 0; - -} - static int tracing_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; @@ -3045,15 +3028,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file) return 0; } -static int tracing_release_generic_tc(struct inode *inode, struct file *file) -{ - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; - - trace_array_put(tr); - return 0; -} - static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; @@ -4374,15 +4348,16 @@ static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct trace_cpu *tc = filp->private_data; - struct trace_array *tr = tc->tr; + struct inode *inode = file_inode(filp); + struct trace_array *tr = inode->i_private; + int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); - if (tc->cpu == RING_BUFFER_ALL_CPUS) { + if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; @@ -4409,7 +4384,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, } else r = sprintf(buf, "X\n"); } else - r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); + r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); @@ -4421,7 +4396,8 @@ static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct trace_cpu *tc = filp->private_data; + struct inode *inode = file_inode(filp); + struct trace_array *tr = inode->i_private; unsigned long val; int ret; @@ -4435,8 +4411,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, /* value is in KB */ val <<= 10; - - ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); + ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; @@ -4884,11 +4859,11 @@ static const struct file_operations tracing_pipe_fops = { }; static const struct file_operations tracing_entries_fops = { - .open = tracing_open_generic_tc, + .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, - .release = tracing_release_generic_tc, + .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { @@ -5572,7 +5547,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, - &data->trace_cpu, cpu, &tracing_entries_fops); + tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, @@ -6148,7 +6123,7 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, - (void *)&tr->trace_cpu, &tracing_entries_fops); + tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); -- cgit v1.2.3 From 7272711a2fc64dd6ef980630e129d8731897ee56 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 23 Jul 2013 17:26:10 +0200 Subject: tracing: Change tracing_fops/snapshot_fops to rely on tracing_get_cpu() commit 6484c71cbc170634fa131b6d022d86d61686b88b upstream. tracing_open() and tracing_snapshot_open() are racy, the memory inode->i_private points to can be already freed. Convert these last users of "inode->i_private == trace_cpu" to use "i_private = trace_array" and rely on tracing_get_cpu(). v2: incorporate the fix from Steven, tracing_release() must not blindly dereference file->private_data unless we know that the file was opened for reading. Link: http://lkml.kernel.org/r/20130723152610.GA23737@redhat.com Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 50 ++++++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 83fba3c576e..0582a01a81e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2853,9 +2853,9 @@ static const struct seq_operations tracer_seq_ops = { }; static struct trace_iterator * -__tracing_open(struct trace_array *tr, struct trace_cpu *tc, - struct inode *inode, struct file *file, bool snapshot) +__tracing_open(struct inode *inode, struct file *file, bool snapshot) { + struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; @@ -2896,8 +2896,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc, iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; + iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); - iter->cpu_file = tc->cpu; /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) @@ -2977,22 +2977,18 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp) static int tracing_release(struct inode *inode, struct file *file) { + struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; - struct trace_array *tr; int cpu; - /* Writes do not use seq_file, need to grab tr from inode */ if (!(file->f_mode & FMODE_READ)) { - struct trace_cpu *tc = inode->i_private; - - trace_array_put(tc->tr); + trace_array_put(tr); return 0; } + /* Writes do not use seq_file */ iter = m->private; - tr = iter->tr; - mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { @@ -3039,8 +3035,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file) static int tracing_open(struct inode *inode, struct file *file) { - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; + struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; @@ -3048,16 +3043,17 @@ static int tracing_open(struct inode *inode, struct file *file) return -ENODEV; /* If this file was open for write, then erase contents */ - if ((file->f_mode & FMODE_WRITE) && - (file->f_flags & O_TRUNC)) { - if (tc->cpu == RING_BUFFER_ALL_CPUS) + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { + int cpu = tracing_get_cpu(inode); + + if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->trace_buffer); else - tracing_reset(&tr->trace_buffer, tc->cpu); + tracing_reset(&tr->trace_buffer, cpu); } if (file->f_mode & FMODE_READ) { - iter = __tracing_open(tr, tc, inode, file, false); + iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (trace_flags & TRACE_ITER_LATENCY_FMT) @@ -4672,8 +4668,7 @@ struct ftrace_buffer_info { #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { - struct trace_cpu *tc = inode->i_private; - struct trace_array *tr = tc->tr; + struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; @@ -4682,7 +4677,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) return -ENODEV; if (file->f_mode & FMODE_READ) { - iter = __tracing_open(tr, tc, inode, file, true); + iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { @@ -4699,8 +4694,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) ret = 0; iter->tr = tr; - iter->trace_buffer = &tc->tr->max_buffer; - iter->cpu_file = tc->cpu; + iter->trace_buffer = &tr->max_buffer; + iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } @@ -5517,7 +5512,6 @@ trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, static void tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) { - struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu); struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ @@ -5538,7 +5532,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, - &data->trace_cpu, cpu, &tracing_fops); + tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); @@ -5551,7 +5545,7 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, - &data->trace_cpu, cpu, &snapshot_fops); + tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); @@ -6117,7 +6111,7 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, - (void *)&tr->trace_cpu, &tracing_fops); + tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); @@ -6138,11 +6132,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, - tr, &rb_simple_fops); + tr, &rb_simple_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, - (void *)&tr->trace_cpu, &snapshot_fops); + tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) -- cgit v1.2.3 From 29632b10ef02a31dec2918177f6d4244e66d635d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 23 Jul 2013 22:06:15 -0400 Subject: ftrace: Add check for NULL regs if ops has SAVE_REGS set commit 195a8afc7ac962f8da795549fe38e825f1372b0d upstream. If a ftrace ops is registered with the SAVE_REGS flag set, and there's already a ops registered to one of its functions but without the SAVE_REGS flag, there's a small race window where the SAVE_REGS ops gets added to the list of callbacks to call for that function before the callback trampoline gets set to save the regs. The problem is, the function is not currently saving regs, which opens a small race window where the ops that is expecting regs to be passed to it, wont. This can cause a crash if the callback were to reference the regs, as the SAVE_REGS guarantees that regs will be set. To fix this, we add a check in the loop case where it checks if the ops has the SAVE_REGS flag set, and if so, it will ignore it if regs is not set. Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c508ff33c6..13f12351b35 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1416,12 +1416,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, * the hashes are freed with call_rcu_sched(). */ static int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) { struct ftrace_hash *filter_hash; struct ftrace_hash *notrace_hash; int ret; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + /* + * There's a small race when adding ops that the ftrace handler + * that wants regs, may be called without them. We can not + * allow that handler to be called if regs is NULL. + */ + if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) + return 0; +#endif + filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); @@ -4188,7 +4198,7 @@ static inline void ftrace_startup_enable(int command) { } # define ftrace_shutdown_sysctl() do { } while (0) static inline int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) { return 1; } @@ -4211,7 +4221,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, do_for_each_ftrace_op(op, ftrace_control_list) { if (!(op->flags & FTRACE_OPS_FL_STUB) && !ftrace_function_local_disabled(op) && - ftrace_ops_test(op, ip)) + ftrace_ops_test(op, ip, regs)) op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); trace_recursion_clear(TRACE_CONTROL_BIT); @@ -4244,7 +4254,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, */ preempt_disable_notrace(); do_for_each_ftrace_op(op, ftrace_ops_list) { - if (ftrace_ops_test(op, ip)) + if (ftrace_ops_test(op, ip, regs)) op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); preempt_enable_notrace(); -- cgit v1.2.3 From fdb65fe265a389144db73cca023266dd6d5ff8d9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 26 Jul 2013 19:25:32 +0200 Subject: tracing: Turn event/id->i_private into call->event.type commit 1a11126bcb7c93c289bf3218fa546fd3b0c0df8b upstream. event_id_read() is racy, ftrace_event_call can be already freed by trace_remove_event_call() callers. Change event_create_dir() to pass "data = call->event.type", this is all event_id_read() needs. ftrace_event_id_fops no longer needs tracing_open_generic(). We add the new helper, event_file_data(), to read ->i_private, it will have more users. Note: currently ACCESS_ONCE() and "id != 0" check are not needed, but we are going to change event_remove/rmdir to clear ->i_private. Link: http://lkml.kernel.org/r/20130726172532.GA3605@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 5f9a002d17d..fd16a254173 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -407,6 +407,11 @@ static void put_system(struct ftrace_subsystem_dir *dir) mutex_unlock(&event_mutex); } +static void *event_file_data(struct file *filp) +{ + return ACCESS_ONCE(file_inode(filp)->i_private); +} + /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. @@ -960,19 +965,22 @@ static int trace_format_open(struct inode *inode, struct file *file) static ssize_t event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + int id = (long)event_file_data(filp); struct trace_seq *s; int r; if (*ppos) return 0; + if (unlikely(!id)) + return -ENODEV; + s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); - trace_seq_printf(s, "%d\n", call->event.type); + trace_seq_printf(s, "%d\n", id); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); @@ -1263,7 +1271,6 @@ static const struct file_operations ftrace_event_format_fops = { }; static const struct file_operations ftrace_event_id_fops = { - .open = tracing_open_generic, .read = event_id_read, .llseek = default_llseek, }; @@ -1511,8 +1518,8 @@ event_create_dir(struct dentry *parent, #ifdef CONFIG_PERF_EVENTS if (call->event.type && call->class->reg) - trace_create_file("id", 0444, file->dir, call, - id); + trace_create_file("id", 0444, file->dir, + (void *)(long)call->event.type, id); #endif /* -- cgit v1.2.3 From df89bf77ca930f69ba07fd459f735ec3cac69f8f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 26 Jul 2013 19:25:36 +0200 Subject: tracing: Change event_enable/disable_read() to verify i_private != NULL commit bc6f6b08dee5645770efb4b76186ded313f23752 upstream. tracing_open_generic_file() is racy, ftrace_event_file can be already freed by rmdir or trace_remove_event_call(). Change event_enable_read() and event_disable_read() to read and verify "file = i_private" under event_mutex. This fixes nothing, but now we can change debugfs_remove("enable") callers to nullify ->i_private and fix the the problem. Link: http://lkml.kernel.org/r/20130726172536.GA3612@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index fd16a254173..e8c445d1f19 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -682,13 +682,23 @@ static ssize_t event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_file *file = filp->private_data; + struct ftrace_event_file *file; + unsigned long flags; char *buf; - if (file->flags & FTRACE_EVENT_FL_ENABLED) { - if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) + mutex_lock(&event_mutex); + file = event_file_data(filp); + if (likely(file)) + flags = file->flags; + mutex_unlock(&event_mutex); + + if (!file) + return -ENODEV; + + if (flags & FTRACE_EVENT_FL_ENABLED) { + if (flags & FTRACE_EVENT_FL_SOFT_DISABLED) buf = "0*\n"; - else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) + else if (flags & FTRACE_EVENT_FL_SOFT_MODE) buf = "1*\n"; else buf = "1\n"; @@ -702,13 +712,10 @@ static ssize_t event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_file *file = filp->private_data; + struct ftrace_event_file *file; unsigned long val; int ret; - if (!file) - return -EINVAL; - ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; @@ -720,8 +727,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, switch (val) { case 0: case 1: + ret = -ENODEV; mutex_lock(&event_mutex); - ret = ftrace_event_enable_disable(file, val); + file = event_file_data(filp); + if (likely(file)) + ret = ftrace_event_enable_disable(file, val); mutex_unlock(&event_mutex); break; -- cgit v1.2.3 From 70c91fb9a74e7937ef76a542a86c1551d5df4281 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 26 Jul 2013 19:25:40 +0200 Subject: tracing: Change event_filter_read/write to verify i_private != NULL commit e2912b091c26b8ea95e5e00a43a7ac620f6c94a6 upstream. event_filter_read/write() are racy, ftrace_event_call can be already freed by trace_remove_event_call() callers. 1. Shift mutex_lock(event_mutex) from print/apply_event_filter to the callers. 2. Change the callers, event_filter_read() and event_filter_write() to read i_private under this mutex and abort if it is NULL. This fixes nothing, but now we can change debugfs_remove("filter") callers to nullify ->i_private and fix the the problem. Link: http://lkml.kernel.org/r/20130726172540.GA3619@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 26 +++++++++++++++++++------- kernel/trace/trace_events_filter.c | 17 ++++++----------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e8c445d1f19..285589354cb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1002,21 +1002,28 @@ static ssize_t event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + struct ftrace_event_call *call; struct trace_seq *s; - int r; + int r = -ENODEV; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) return -ENOMEM; trace_seq_init(s); - print_event_filter(call, s); - r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); + mutex_lock(&event_mutex); + call = event_file_data(filp); + if (call) + print_event_filter(call, s); + mutex_unlock(&event_mutex); + + if (call) + r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); @@ -1027,9 +1034,9 @@ static ssize_t event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + struct ftrace_event_call *call; char *buf; - int err; + int err = -ENODEV; if (cnt >= PAGE_SIZE) return -EINVAL; @@ -1044,7 +1051,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, } buf[cnt] = '\0'; - err = apply_event_filter(call, buf); + mutex_lock(&event_mutex); + call = event_file_data(filp); + if (call) + err = apply_event_filter(call, buf); + mutex_unlock(&event_mutex); + free_page((unsigned long) buf); if (err < 0) return err; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e1b653f7e1c..0a1edc694d6 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -631,17 +631,15 @@ static void append_filter_err(struct filter_parse_state *ps, free_page((unsigned long) buf); } +/* caller must hold event_mutex */ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) { - struct event_filter *filter; + struct event_filter *filter = call->filter; - mutex_lock(&event_mutex); - filter = call->filter; if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_printf(s, "none\n"); - mutex_unlock(&event_mutex); } void print_subsystem_event_filter(struct event_subsystem *system, @@ -1835,23 +1833,22 @@ static int create_system_filter(struct event_subsystem *system, return err; } +/* caller must hold event_mutex */ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { struct event_filter *filter; - int err = 0; - - mutex_lock(&event_mutex); + int err; if (!strcmp(strstrip(filter_string), "0")) { filter_disable(call); filter = call->filter; if (!filter) - goto out_unlock; + return 0; RCU_INIT_POINTER(call->filter, NULL); /* Make sure the filter is not being used */ synchronize_sched(); __free_filter(filter); - goto out_unlock; + return 0; } err = create_filter(call, filter_string, true, &filter); @@ -1878,8 +1875,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) __free_filter(tmp); } } -out_unlock: - mutex_unlock(&event_mutex); return err; } -- cgit v1.2.3 From b86d0ba62decb830aed2fa525e7557857d3199f2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 26 Jul 2013 19:25:43 +0200 Subject: tracing: Change f_start() to take event_mutex and verify i_private != NULL commit c5a44a1200c6eda2202434f25325e8ad19533fca upstream. trace_format_open() and trace_format_seq_ops are racy, nothing protects ftrace_event_call from trace_remove_event_call(). Change f_start() to take event_mutex and verify i_private != NULL, change f_stop() to drop this lock. This fixes nothing, but now we can change debugfs_remove("format") callers to nullify ->i_private and fix the the problem. Note: the usage of event_mutex is sub-optimal but simple, we can change this later. Link: http://lkml.kernel.org/r/20130726172543.GA3622@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 285589354cb..09782d623c9 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -838,7 +838,7 @@ enum { static void *f_next(struct seq_file *m, void *v, loff_t *pos) { - struct ftrace_event_call *call = m->private; + struct ftrace_event_call *call = event_file_data(m->private); struct ftrace_event_field *field; struct list_head *common_head = &ftrace_common_fields; struct list_head *head = trace_get_fields(call); @@ -882,6 +882,11 @@ static void *f_start(struct seq_file *m, loff_t *pos) loff_t l = 0; void *p; + /* ->stop() is called even if ->start() fails */ + mutex_lock(&event_mutex); + if (!event_file_data(m->private)) + return ERR_PTR(-ENODEV); + /* Start by showing the header */ if (!*pos) return (void *)FORMAT_HEADER; @@ -896,7 +901,7 @@ static void *f_start(struct seq_file *m, loff_t *pos) static int f_show(struct seq_file *m, void *v) { - struct ftrace_event_call *call = m->private; + struct ftrace_event_call *call = event_file_data(m->private); struct ftrace_event_field *field; const char *array_descriptor; @@ -947,6 +952,7 @@ static int f_show(struct seq_file *m, void *v) static void f_stop(struct seq_file *m, void *p) { + mutex_unlock(&event_mutex); } static const struct seq_operations trace_format_seq_ops = { @@ -958,7 +964,6 @@ static const struct seq_operations trace_format_seq_ops = { static int trace_format_open(struct inode *inode, struct file *file) { - struct ftrace_event_call *call = inode->i_private; struct seq_file *m; int ret; @@ -967,7 +972,7 @@ static int trace_format_open(struct inode *inode, struct file *file) return ret; m = file->private_data; - m->private = call; + m->private = file; return 0; } -- cgit v1.2.3 From c6febdf258313a120acd1298dd1dd41442131a37 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 26 Jul 2013 19:25:47 +0200 Subject: tracing: Introduce remove_event_file_dir() commit f6a84bdc75b5c11621dec58db73fe102cbaf40cc upstream. Preparation for the next patch. Extract the common code from remove_event_from_tracers() and __trace_remove_event_dirs() into the new helper, remove_event_file_dir(). The patch looks more complicated than it actually is, it also moves remove_subsystem() up to avoid the forward declaration. Link: http://lkml.kernel.org/r/20130726172547.GA3629@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 47 ++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 09782d623c9..422f99dfe69 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -407,11 +407,31 @@ static void put_system(struct ftrace_subsystem_dir *dir) mutex_unlock(&event_mutex); } +static void remove_subsystem(struct ftrace_subsystem_dir *dir) +{ + if (!dir) + return; + + if (!--dir->nr_events) { + debugfs_remove_recursive(dir->entry); + list_del(&dir->list); + __put_system_dir(dir); + } +} + static void *event_file_data(struct file *filp) { return ACCESS_ONCE(file_inode(filp)->i_private); } +static void remove_event_file_dir(struct ftrace_event_file *file) +{ + list_del(&file->list); + debugfs_remove_recursive(file->dir); + remove_subsystem(file->system); + kmem_cache_free(file_cachep, file); +} + /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. @@ -1571,33 +1591,16 @@ event_create_dir(struct dentry *parent, return 0; } -static void remove_subsystem(struct ftrace_subsystem_dir *dir) -{ - if (!dir) - return; - - if (!--dir->nr_events) { - debugfs_remove_recursive(dir->entry); - list_del(&dir->list); - __put_system_dir(dir); - } -} - static void remove_event_from_tracers(struct ftrace_event_call *call) { struct ftrace_event_file *file; struct trace_array *tr; do_for_each_event_file_safe(tr, file) { - if (file->event_call != call) continue; - list_del(&file->list); - debugfs_remove_recursive(file->dir); - remove_subsystem(file->system); - kmem_cache_free(file_cachep, file); - + remove_event_file_dir(file); /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this @@ -2330,12 +2333,8 @@ __trace_remove_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file, *next; - list_for_each_entry_safe(file, next, &tr->events, list) { - list_del(&file->list); - debugfs_remove_recursive(file->dir); - remove_subsystem(file->system); - kmem_cache_free(file_cachep, file); - } + list_for_each_entry_safe(file, next, &tr->events, list) + remove_event_file_dir(file); } static void -- cgit v1.2.3 From 012dc156d6af49e02a30fcba7d688e251608d97c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 28 Jul 2013 20:35:27 +0200 Subject: tracing: Change remove_event_file_dir() to clear "d_subdirs"->i_private commit bf682c3159c4d298d1126a56793ed3f5e80395f7 upstream. Change remove_event_file_dir() to clear ->i_private for every file we are going to remove. We need to check file->dir != NULL because event_create_dir() can fail. debugfs_remove_recursive(NULL) is fine but the patch moves it under the same check anyway for readability. spin_lock(d_lock) and "d_inode != NULL" check are not needed afaics, but I do not understand this code enough. tracing_open_generic_file() and tracing_release_generic_file() can go away, ftrace_enable_fops and ftrace_event_filter_fops() use tracing_open_generic() but only to check tracing_disabled. This fixes all races with event_remove() or instance_delete(). f_op->read/write/whatever can never use the freed file/call, all event/* files were changed to check and use ->i_private under event_mutex. Note: this doesn't not fix other problems, event_remove() can destroy the active ftrace_event_call, we need more changes but those changes are completely orthogonal. Link: http://lkml.kernel.org/r/20130728183527.GB16723@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 47 +++++++++++++++------------------------------ 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 422f99dfe69..0bff8aaf581 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -426,41 +426,25 @@ static void *event_file_data(struct file *filp) static void remove_event_file_dir(struct ftrace_event_file *file) { + struct dentry *dir = file->dir; + struct dentry *child; + + if (dir) { + spin_lock(&dir->d_lock); /* probably unneeded */ + list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { + if (child->d_inode) /* probably unneeded */ + child->d_inode->i_private = NULL; + } + spin_unlock(&dir->d_lock); + + debugfs_remove_recursive(dir); + } + list_del(&file->list); - debugfs_remove_recursive(file->dir); remove_subsystem(file->system); kmem_cache_free(file_cachep, file); } -/* - * Open and update trace_array ref count. - * Must have the current trace_array passed to it. - */ -static int tracing_open_generic_file(struct inode *inode, struct file *filp) -{ - struct ftrace_event_file *file = inode->i_private; - struct trace_array *tr = file->tr; - int ret; - - if (trace_array_get(tr) < 0) - return -ENODEV; - - ret = tracing_open_generic(inode, filp); - if (ret < 0) - trace_array_put(tr); - return ret; -} - -static int tracing_release_generic_file(struct inode *inode, struct file *filp) -{ - struct ftrace_event_file *file = inode->i_private; - struct trace_array *tr = file->tr; - - trace_array_put(tr); - - return 0; -} - /* * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. */ @@ -1303,10 +1287,9 @@ static const struct file_operations ftrace_set_event_fops = { }; static const struct file_operations ftrace_enable_fops = { - .open = tracing_open_generic_file, + .open = tracing_open_generic, .read = event_enable_read, .write = event_enable_write, - .release = tracing_release_generic_file, .llseek = default_llseek, }; -- cgit v1.2.3 From 8169887b694aa3d9632fdd0308aaa1db7c422e75 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 29 Jul 2013 19:50:33 +0200 Subject: tracing: trace_remove_event_call() should fail if call/file is in use commit 2816c551c796ec14620325b2c9ed75b9979d3125 upstream. Change trace_remove_event_call(call) to return the error if this call is active. This is what the callers assume but can't verify outside of the tracing locks. Both trace_kprobe.c/trace_uprobe.c need the additional changes, unregister_trace_probe() should abort if trace_remove_event_call() fails. The caller is going to free this call/file so we must ensure that nobody can use them after trace_remove_event_call() succeeds. debugfs should be fine after the previous changes and event_remove() does TRACE_REG_UNREGISTER, but still there are 2 reasons why we need the additional checks: - There could be a perf_event(s) attached to this tp_event, so the patch checks ->perf_refcount. - TRACE_REG_UNREGISTER can be suppressed by FTRACE_EVENT_FL_SOFT_MODE, so we simply check FTRACE_EVENT_FL_ENABLED protected by event_mutex. Link: http://lkml.kernel.org/r/20130729175033.GB26284@redhat.com Reviewed-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- include/linux/ftrace_event.h | 2 +- kernel/trace/trace_events.c | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 44cdc11b15c..120d57a1c3a 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -334,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); extern int trace_add_event_call(struct ftrace_event_call *call); -extern void trace_remove_event_call(struct ftrace_event_call *call); +extern int trace_remove_event_call(struct ftrace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 0bff8aaf581..3d18aadef49 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1735,16 +1735,47 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) destroy_preds(call); } +static int probe_remove_event_call(struct ftrace_event_call *call) +{ + struct trace_array *tr; + struct ftrace_event_file *file; + +#ifdef CONFIG_PERF_EVENTS + if (call->perf_refcount) + return -EBUSY; +#endif + do_for_each_event_file(tr, file) { + if (file->event_call != call) + continue; + /* + * We can't rely on ftrace_event_enable_disable(enable => 0) + * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress + * TRACE_REG_UNREGISTER. + */ + if (file->flags & FTRACE_EVENT_FL_ENABLED) + return -EBUSY; + break; + } while_for_each_event_file(); + + __trace_remove_event_call(call); + + return 0; +} + /* Remove an event_call */ -void trace_remove_event_call(struct ftrace_event_call *call) +int trace_remove_event_call(struct ftrace_event_call *call) { + int ret; + mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); down_write(&trace_event_sem); - __trace_remove_event_call(call); + ret = probe_remove_event_call(call); up_write(&trace_event_sem); mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); + + return ret; } #define for_each_event(event, start, end) \ -- cgit v1.2.3 From 428fbcf9d5d654526a73229d97e76d5d7ac47190 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 3 Jul 2013 23:33:50 -0400 Subject: tracing/kprobes: Fail to unregister if probe event files are in use commit 40c32592668b727cbfcf7b1c0567f581bd62a5e4 upstream. When a probe is being removed, it cleans up the event files that correspond to the probe. But there is a race between writing to one of these files and deleting the probe. This is especially true for the "enable" file. CPU 0 CPU 1 ----- ----- fd = open("enable",O_WRONLY); probes_open() release_all_trace_probes() unregister_trace_probe() if (trace_probe_is_enabled(tp)) return -EBUSY write(fd, "1", 1) __ftrace_set_clr_event() call->class->reg() (kprobe_register) enable_trace_probe(tp) __unregister_trace_probe(tp); list_del(&tp->list) unregister_probe_event(tp) <-- fails! free_trace_probe(tp) write(fd, "0", 1) __ftrace_set_clr_event() call->class->unreg (kprobe_register) disable_trace_probe(tp) <-- BOOM! A test program was written that used two threads to simulate the above scenario adding a nanosleep() interval to change the timings and after several thousand runs, it was able to trigger this bug and crash: BUG: unable to handle kernel paging request at 00000005000000f9 IP: [] probes_open+0x3b/0xa7 PGD 7808a067 PUD 0 Oops: 0000 [#1] PREEMPT SMP Dumping ftrace buffer: --------------------------------- Modules linked in: ipt_MASQUERADE sunrpc ip6t_REJECT nf_conntrack_ipv6 CPU: 1 PID: 2070 Comm: test-kprobe-rem Not tainted 3.11.0-rc3-test+ #47 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff880077756440 ti: ffff880076e52000 task.ti: ffff880076e52000 RIP: 0010:[] [] probes_open+0x3b/0xa7 RSP: 0018:ffff880076e53c38 EFLAGS: 00010203 RAX: 0000000500000001 RBX: ffff88007844f440 RCX: 0000000000000003 RDX: 0000000000000003 RSI: 0000000000000003 RDI: ffff880076e52000 RBP: ffff880076e53c58 R08: ffff880076e53bd8 R09: 0000000000000000 R10: ffff880077756440 R11: 0000000000000006 R12: ffffffff810dee35 R13: ffff880079250418 R14: 0000000000000000 R15: ffff88007844f450 FS: 00007f87a276f700(0000) GS:ffff88007d480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000005000000f9 CR3: 0000000077262000 CR4: 00000000000007e0 Stack: ffff880076e53c58 ffffffff81219ea0 ffff88007844f440 ffffffff810dee35 ffff880076e53ca8 ffffffff81130f78 ffff8800772986c0 ffff8800796f93a0 ffffffff81d1b5d8 ffff880076e53e04 0000000000000000 ffff88007844f440 Call Trace: [] ? security_file_open+0x2c/0x30 [] ? unregister_trace_probe+0x4b/0x4b [] do_dentry_open+0x162/0x226 [] finish_open+0x46/0x54 [] do_last+0x7f6/0x996 [] ? inode_permission+0x42/0x44 [] path_openat+0x232/0x496 [] do_filp_open+0x3a/0x8a [] ? __alloc_fd+0x168/0x17a [] do_sys_open+0x70/0x102 [] ? trace_hardirqs_on_caller+0x160/0x197 [] SyS_open+0x1e/0x20 [] system_call_fastpath+0x16/0x1b Code: e5 41 54 53 48 89 f3 48 83 ec 10 48 23 56 78 48 39 c2 75 6c 31 f6 48 c7 RIP [] probes_open+0x3b/0xa7 RSP CR2: 00000005000000f9 ---[ end trace 35f17d68fc569897 ]--- The unregister_trace_probe() must be done first, and if it fails it must fail the removal of the kprobe. Several changes have already been made by Oleg Nesterov and Masami Hiramatsu to allow moving the unregister_probe_event() before the removal of the probe and exit the function if it fails. This prevents the tp structure from being used after it is freed. Link: http://lkml.kernel.org/r/20130704034038.819592356@goodmis.org Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_kprobe.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c209c6ec34c..64abc8ca928 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -90,7 +90,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) } static int register_probe_event(struct trace_probe *tp); -static void unregister_probe_event(struct trace_probe *tp); +static int unregister_probe_event(struct trace_probe *tp); static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); @@ -411,9 +411,12 @@ static int unregister_trace_probe(struct trace_probe *tp) if (trace_probe_is_enabled(tp)) return -EBUSY; + /* Will fail if probe is being used by ftrace or perf */ + if (unregister_probe_event(tp)) + return -EBUSY; + __unregister_trace_probe(tp); list_del(&tp->list); - unregister_probe_event(tp); return 0; } @@ -692,7 +695,9 @@ static int release_all_trace_probes(void) /* TODO: Use batch unregistration */ while (!list_empty(&probe_list)) { tp = list_entry(probe_list.next, struct trace_probe, list); - unregister_trace_probe(tp); + ret = unregister_trace_probe(tp); + if (ret) + goto end; free_trace_probe(tp); } @@ -1325,11 +1330,15 @@ static int register_probe_event(struct trace_probe *tp) return ret; } -static void unregister_probe_event(struct trace_probe *tp) +static int unregister_probe_event(struct trace_probe *tp) { + int ret; + /* tp->event is unregistered in trace_remove_event_call() */ - trace_remove_event_call(&tp->call); - kfree(tp->call.print_fmt); + ret = trace_remove_event_call(&tp->call); + if (!ret) + kfree(tp->call.print_fmt); + return ret; } /* Make a debugfs interface for controlling probe points */ -- cgit v1.2.3 From 396dd500521287215d836b84b2ae61cd9163bba5 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 3 Jul 2013 23:33:51 -0400 Subject: tracing/uprobes: Fail to unregister if probe event files are in use commit c6c2401d8bbaf9edc189b4c35a8cb2780b8b988e upstream. Uprobes suffer the same problem that kprobes have. There's a race between writing to the "enable" file and removing the probe. The probe checks for it being in use and if it is not, goes about deleting the probe and the event that represents it. But the problem with that is, after it checks if it is in use it can be enabled, and the deletion of the event (access to the probe) will fail, as it is in use. But the uprobe will still be deleted. This is a problem as the event can reference the uprobe that was deleted. The fix is to remove the event first, and check to make sure the event removal succeeds. Then it is safe to remove the probe. When the event exists, either ftrace or perf can enable the probe and prevent the event from being removed. Link: http://lkml.kernel.org/r/20130704034038.991525256@goodmis.org Acked-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_uprobe.c | 51 +++++++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d5d0cd368a5..6fd72b76852 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -70,7 +70,7 @@ struct trace_uprobe { (sizeof(struct probe_arg) * (n))) static int register_uprobe_event(struct trace_uprobe *tu); -static void unregister_uprobe_event(struct trace_uprobe *tu); +static int unregister_uprobe_event(struct trace_uprobe *tu); static DEFINE_MUTEX(uprobe_lock); static LIST_HEAD(uprobe_list); @@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou } /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ -static void unregister_trace_uprobe(struct trace_uprobe *tu) +static int unregister_trace_uprobe(struct trace_uprobe *tu) { + int ret; + + ret = unregister_uprobe_event(tu); + if (ret) + return ret; + list_del(&tu->list); - unregister_uprobe_event(tu); free_trace_uprobe(tu); + return 0; } /* Register a trace_uprobe and probe_event */ @@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) /* register as an event */ old_tp = find_probe_event(tu->call.name, tu->call.class->system); - if (old_tp) + if (old_tp) { /* delete old event */ - unregister_trace_uprobe(old_tp); + ret = unregister_trace_uprobe(old_tp); + if (ret) + goto end; + } ret = register_uprobe_event(tu); if (ret) { @@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv) group = UPROBE_EVENT_SYSTEM; if (is_delete) { + int ret; + if (!event) { pr_info("Delete command needs an event name.\n"); return -EINVAL; @@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv) return -ENOENT; } /* delete an event */ - unregister_trace_uprobe(tu); + ret = unregister_trace_uprobe(tu); mutex_unlock(&uprobe_lock); - return 0; + return ret; } if (argc < 2) { @@ -408,16 +419,20 @@ fail_address_parse: return ret; } -static void cleanup_all_probes(void) +static int cleanup_all_probes(void) { struct trace_uprobe *tu; + int ret = 0; mutex_lock(&uprobe_lock); while (!list_empty(&uprobe_list)) { tu = list_entry(uprobe_list.next, struct trace_uprobe, list); - unregister_trace_uprobe(tu); + ret = unregister_trace_uprobe(tu); + if (ret) + break; } mutex_unlock(&uprobe_lock); + return ret; } /* Probes listing interfaces */ @@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = { static int probes_open(struct inode *inode, struct file *file) { - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - cleanup_all_probes(); + int ret; + + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { + ret = cleanup_all_probes(); + if (ret) + return ret; + } return seq_open(file, &probes_seq_op); } @@ -970,12 +990,17 @@ static int register_uprobe_event(struct trace_uprobe *tu) return ret; } -static void unregister_uprobe_event(struct trace_uprobe *tu) +static int unregister_uprobe_event(struct trace_uprobe *tu) { + int ret; + /* tu->event is unregistered in trace_remove_event_call() */ - trace_remove_event_call(&tu->call); + ret = trace_remove_event_call(&tu->call); + if (ret) + return ret; kfree(tu->call.print_fmt); tu->call.print_fmt = NULL; + return 0; } /* Make a trace interface for controling probe points */ -- cgit v1.2.3 From f05e999f16aed044c0b8b0837bdb084cbeb8cee6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 30 Jul 2013 00:04:32 -0400 Subject: ftrace: Check module functions being traced on reload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8c4f3c3fa9681dc549cd35419b259496082fef8b upstream. There's been a nasty bug that would show up and not give much info. The bug displayed the following warning: WARNING: at kernel/trace/ftrace.c:1529 __ftrace_hash_rec_update+0x1e3/0x230() Pid: 20903, comm: bash Tainted: G O 3.6.11+ #38405.trunk Call Trace: [] warn_slowpath_common+0x7f/0xc0 [] warn_slowpath_null+0x1a/0x20 [] __ftrace_hash_rec_update+0x1e3/0x230 [] ftrace_hash_move+0x28/0x1d0 [] ? kfree+0x2c/0x110 [] ftrace_regex_release+0x8e/0x150 [] __fput+0xae/0x220 [] ____fput+0xe/0x10 [] task_work_run+0x72/0x90 [] do_notify_resume+0x6c/0xc0 [] ? trace_hardirqs_on_thunk+0x3a/0x3c [] int_signal+0x12/0x17 ---[ end trace 793179526ee09b2c ]--- It was finally narrowed down to unloading a module that was being traced. It was actually more than that. When functions are being traced, there's a table of all functions that have a ref count of the number of active tracers attached to that function. When a function trace callback is registered to a function, the function's record ref count is incremented. When it is unregistered, the function's record ref count is decremented. If an inconsistency is detected (ref count goes below zero) the above warning is shown and the function tracing is permanently disabled until reboot. The ftrace callback ops holds a hash of functions that it filters on (and/or filters off). If the hash is empty, the default means to filter all functions (for the filter_hash) or to disable no functions (for the notrace_hash). When a module is unloaded, it frees the function records that represent the module functions. These records exist on their own pages, that is function records for one module will not exist on the same page as function records for other modules or even the core kernel. Now when a module unloads, the records that represents its functions are freed. When the module is loaded again, the records are recreated with a default ref count of zero (unless there's a callback that traces all functions, then they will also be traced, and the ref count will be incremented). The problem is that if an ftrace callback hash includes functions of the module being unloaded, those hash entries will not be removed. If the module is reloaded in the same location, the hash entries still point to the functions of the module but the module's ref counts do not reflect that. With the help of Steve and Joern, we found a reproducer: Using uinput module and uinput_release function. cd /sys/kernel/debug/tracing modprobe uinput echo uinput_release > set_ftrace_filter echo function > current_tracer rmmod uinput modprobe uinput # check /proc/modules to see if loaded in same addr, otherwise try again echo nop > current_tracer [BOOM] The above loads the uinput module, which creates a table of functions that can be traced within the module. We add uinput_release to the filter_hash to trace just that function. Enable function tracincg, which increments the ref count of the record associated to uinput_release. Remove uinput, which frees the records including the one that represents uinput_release. Load the uinput module again (and make sure it's at the same address). This recreates the function records all with a ref count of zero, including uinput_release. Disable function tracing, which will decrement the ref count for uinput_release which is now zero because of the module removal and reload, and we have a mismatch (below zero ref count). The solution is to check all currently tracing ftrace callbacks to see if any are tracing any of the module's functions when a module is loaded (it already does that with callbacks that trace all functions). If a callback happens to have a module function being traced, it increments that records ref count and starts tracing that function. There may be a strange side effect with this, where tracing module functions on unload and then reloading a new module may have that new module's functions being traced. This may be something that confuses the user, but it's not a big deal. Another approach is to disable all callback hashes on module unload, but this leaves some ftrace callbacks that may not be registered, but can still have hashes tracing the module's function where ftrace doesn't know about it. That situation can cause the same bug. This solution solves that case too. Another benefit of this solution, is it is possible to trace a module's function on unload and load. Link: http://lkml.kernel.org/r/20130705142629.GA325@redhat.com Reported-by: Jörn Engel Reported-by: Dave Jones Reported-by: Steve Hodgson Tested-by: Steve Hodgson Signed-off-by: Steven Rostedt Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 71 ++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 62 insertions(+), 9 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 13f12351b35..f23449d0650 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2144,12 +2144,57 @@ static cycle_t ftrace_update_time; static unsigned long ftrace_update_cnt; unsigned long ftrace_update_tot_cnt; -static int ops_traces_mod(struct ftrace_ops *ops) +static inline int ops_traces_mod(struct ftrace_ops *ops) { - struct ftrace_hash *hash; + /* + * Filter_hash being empty will default to trace module. + * But notrace hash requires a test of individual module functions. + */ + return ftrace_hash_empty(ops->filter_hash) && + ftrace_hash_empty(ops->notrace_hash); +} + +/* + * Check if the current ops references the record. + * + * If the ops traces all functions, then it was already accounted for. + * If the ops does not trace the current record function, skip it. + * If the ops ignores the function via notrace filter, skip it. + */ +static inline bool +ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ + /* If ops isn't enabled, ignore it */ + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return 0; + + /* If ops traces all mods, we already accounted for it */ + if (ops_traces_mod(ops)) + return 0; + + /* The function must be in the filter */ + if (!ftrace_hash_empty(ops->filter_hash) && + !ftrace_lookup_ip(ops->filter_hash, rec->ip)) + return 0; - hash = ops->filter_hash; - return ftrace_hash_empty(hash); + /* If in notrace hash, we ignore it too */ + if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) + return 0; + + return 1; +} + +static int referenced_filters(struct dyn_ftrace *rec) +{ + struct ftrace_ops *ops; + int cnt = 0; + + for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { + if (ops_references_rec(ops, rec)) + cnt++; + } + + return cnt; } static int ftrace_update_code(struct module *mod) @@ -2158,6 +2203,7 @@ static int ftrace_update_code(struct module *mod) struct dyn_ftrace *p; cycle_t start, stop; unsigned long ref = 0; + bool test = false; int i; /* @@ -2171,9 +2217,12 @@ static int ftrace_update_code(struct module *mod) for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops->flags & FTRACE_OPS_FL_ENABLED && - ops_traces_mod(ops)) - ref++; + if (ops->flags & FTRACE_OPS_FL_ENABLED) { + if (ops_traces_mod(ops)) + ref++; + else + test = true; + } } } @@ -2183,12 +2232,16 @@ static int ftrace_update_code(struct module *mod) for (pg = ftrace_new_pgs; pg; pg = pg->next) { for (i = 0; i < pg->index; i++) { + int cnt = ref; + /* If something went wrong, bail without enabling anything */ if (unlikely(ftrace_disabled)) return -1; p = &pg->records[i]; - p->flags = ref; + if (test) + cnt += referenced_filters(p); + p->flags = cnt; /* * Do the initial record conversion from mcount jump @@ -2208,7 +2261,7 @@ static int ftrace_update_code(struct module *mod) * conversion puts the module to the correct state, thus * passing the ftrace_make_call check. */ - if (ftrace_start_up && ref) { + if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(p, 1); if (failed) ftrace_bug(failed, p->ip); -- cgit v1.2.3 From 51f508721b778bd48d723bed1c4ca4af0855764c Mon Sep 17 00:00:00 2001 From: Chuck Anderson Date: Tue, 6 Aug 2013 15:12:19 -0700 Subject: xen/smp: initialize IPI vectors before marking CPU online commit fc78d343fa74514f6fd117b5ef4cd27e4ac30236 upstream. An older PVHVM guest (v3.0 based) crashed during vCPU hot-plug with: kernel BUG at drivers/xen/events.c:1328! RCU has detected that a CPU has not entered a quiescent state within the grace period. It needs to send the CPU a reschedule IPI if it is not offline. rcu_implicit_offline_qs() does this check: /* * If the CPU is offline, it is in a quiescent state. We can * trust its state not to change because interrupts are disabled. */ if (cpu_is_offline(rdp->cpu)) { rdp->offline_fqs++; return 1; } Else the CPU is online. Send it a reschedule IPI. The CPU is in the middle of being hot-plugged and has been marked online (!cpu_is_offline()). See start_secondary(): set_cpu_online(smp_processor_id(), true); ... per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; start_secondary() then waits for the CPU bringing up the hot-plugged CPU to mark it as active: /* * Wait until the cpu which brought this one up marked it * online before enabling interrupts. If we don't do that then * we can end up waking up the softirq thread before this cpu * reached the active state, which makes the scheduler unhappy * and schedule the softirq thread on the wrong cpu. This is * only observable with forced threaded interrupts, but in * theory it could also happen w/o them. It's just way harder * to achieve. */ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) cpu_relax(); /* enable local interrupts */ local_irq_enable(); The CPU being hot-plugged will be marked active after it has been fully initialized by the CPU managing the hot-plug. In the Xen PVHVM case xen_smp_intr_init() is called to set up the hot-plugged vCPU's XEN_RESCHEDULE_VECTOR. The hot-plugging CPU is marked online, not marked active and does not have its IPI vectors set up. rcu_implicit_offline_qs() sees the hot-plugging cpu is !cpu_is_offline() and tries to send it a reschedule IPI: This will lead to: kernel BUG at drivers/xen/events.c:1328! xen_send_IPI_one() xen_smp_send_reschedule() rcu_implicit_offline_qs() rcu_implicit_dynticks_qs() force_qs_rnp() force_quiescent_state() __rcu_process_callbacks() rcu_process_callbacks() __do_softirq() call_softirq() do_softirq() irq_exit() xen_evtchn_do_upcall() because xen_send_IPI_one() will attempt to use an uninitialized IRQ for the XEN_RESCHEDULE_VECTOR. There is at least one other place that has caused the same crash: xen_smp_send_reschedule() wake_up_idle_cpu() add_timer_on() clocksource_watchdog() call_timer_fn() run_timer_softirq() __do_softirq() call_softirq() do_softirq() irq_exit() xen_evtchn_do_upcall() xen_hvm_callback_vector() clocksource_watchdog() uses cpu_online_mask to pick the next CPU to handle a watchdog timer: /* * Cycle through CPUs to check if the CPUs stay synchronized * to each other. */ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(cpu_online_mask); watchdog_timer.expires += WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, next_cpu); This resulted in an attempt to send an IPI to a hot-plugging CPU that had not initialized its reschedule vector. One option would be to make the RCU code check to not check for CPU offline but for CPU active. As becoming active is done after a CPU is online (in older kernels). But Srivatsa pointed out that "the cpu_active vs cpu_online ordering has been completely reworked - in the online path, cpu_active is set *before* cpu_online, and also, in the cpu offline path, the cpu_active bit is reset in the CPU_DYING notification instead of CPU_DOWN_PREPARE." Drilling in this the bring-up path: "[brought up CPU].. send out a CPU_STARTING notification, and in response to that, the scheduler sets the CPU in the cpu_active_mask. Again, this mask is better left to the scheduler alone, since it has the intelligence to use it judiciously." The conclusion was that: " 1. At the IPI sender side: It is incorrect to send an IPI to an offline CPU (cpu not present in the cpu_online_mask). There are numerous places where we check this and warn/complain. 2. At the IPI receiver side: It is incorrect to let the world know of our presence (by setting ourselves in global bitmasks) until our initialization steps are complete to such an extent that we can handle the consequences (such as receiving interrupts without crashing the sender etc.) " (from Srivatsa) As the native code enables the interrupts at some point we need to be able to service them. In other words a CPU must have valid IPI vectors if it has been marked online. It doesn't need to handle the IPI (interrupts may be disabled) but needs to have valid IPI vectors because another CPU may find it in cpu_online_mask and attempt to send it an IPI. This patch will change the order of the Xen vCPU bring-up functions so that Xen vectors have been set up before start_secondary() is called. It also will not continue to bring up a Xen vCPU if xen_smp_intr_init() fails to initialize it. Orabug 13823853 Signed-off-by Chuck Anderson Acked-by: Srivatsa S. Bhat Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Jonghwan Choi Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/smp.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d99cae8147d..a1e58e19d0c 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -667,8 +667,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; - rc = native_cpu_up(cpu, tidle); - WARN_ON (xen_smp_intr_init(cpu)); + /* + * xen_smp_intr_init() needs to run before native_cpu_up() + * so that IPI vectors are set up on the booting CPU before + * it is marked online in native_cpu_up(). + */ + rc = xen_smp_intr_init(cpu); + WARN_ON(rc); + if (!rc) + rc = native_cpu_up(cpu, tidle); return rc; } -- cgit v1.2.3 From 0cbf39727e08f58246e763f8af2f8dcb8a9f648f Mon Sep 17 00:00:00 2001 From: Joern Rennecke Date: Sat, 24 Aug 2013 12:03:06 +0530 Subject: ARC: [lib] strchr breakage in Big-endian configuration commit b0f55f2a1a295c364be012e82dbab079a2454006 upstream. For a search buffer, 2 byte aligned, strchr() was returning pointer outside of buffer (buf - 1) ------------->8---------------- // Input buffer (default 4 byte aigned) char *buffer = "1AA_"; // actual search start (to mimick 2 byte alignment) char *current_line = &(buffer[2]); // Character to search for char c = 'A'; char *c_pos = strchr(current_line, c); printf("%s\n", c_pos) --> 'AA_' as oppose to 'A_' ------------->8---------------- Reported-by: Anton Kolesov Debugged-by: Anton Kolesov Cc: Noam Camus Signed-off-by: Joern Rennecke Signed-off-by: Vineet Gupta Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/arc/lib/strchr-700.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S index 99c10475d47..9c548c7cf00 100644 --- a/arch/arc/lib/strchr-700.S +++ b/arch/arc/lib/strchr-700.S @@ -39,9 +39,18 @@ ARC_ENTRY strchr ld.a r2,[r0,4] sub r12,r6,r7 bic r12,r12,r6 +#ifdef __LITTLE_ENDIAN__ and r7,r12,r4 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. b .Lfound_char ; Likewise this one. +#else + and r12,r12,r4 + breq r12,0,.Loop ; For speed, we want this branch to be unaligned. + lsr_s r12,r12,7 + bic r2,r7,r6 + b.d .Lfound_char_b + and_s r2,r2,r12 +#endif ; /* We require this code address to be unaligned for speed... */ .Laligned: ld_s r2,[r0] @@ -95,6 +104,7 @@ ARC_ENTRY strchr lsr r7,r7,7 bic r2,r7,r6 +.Lfound_char_b: norm r2,r2 sub_s r0,r0,4 asr_s r2,r2,3 -- cgit v1.2.3 From b169395145ce013ad1ed3ee25878112f647ffd56 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Tue, 6 Aug 2013 14:28:42 +0300 Subject: zd1201: do not use stack as URB transfer_buffer commit 1206ff4ff9d2ef7468a355328bc58ac6ebf5be44 upstream. Patch fixes zd1201 not to use stack as URB transfer_buffer. URB buffers need to be DMA-able, which stack is not. Patch is only compile tested. Signed-off-by: Jussi Kivilinna Signed-off-by: John W. Linville Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/zd1201.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f201d6c..b8ba1f925e7 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) goto exit; err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, - USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); + USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); if (err < 0) goto exit; + memcpy(&ret, buf, sizeof(ret)); + if (ret & 0x80) { err = -EIO; goto exit; -- cgit v1.2.3 From 2bee1e0f280b159ce8f9f01c2f5c9de410231a3b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Aug 2013 12:44:39 +0300 Subject: VFS: collect_mounts() should return an ERR_PTR commit 52e220d357a38cb29fa2e29f34ed94c1d66357f4 upstream. This should actually be returning an ERR_PTR on error instead of NULL. That was how it was designed and all the callers expect it. [AV: actually, that's what "VFS: Make clone_mnt()/copy_tree()/collect_mounts() return errors" missed - originally collect_mounts() was expected to return NULL on failure] Signed-off-by: Dan Carpenter Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/namespace.c b/fs/namespace.c index 7b1ca9ba0b0..a45ba4f267f 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path) CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) - return NULL; + return ERR_CAST(tree); return &tree->mnt; } -- cgit v1.2.3 From 8a34139a0c0ab7ed616b3e50f061cac8bc1e0531 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Fri, 9 Aug 2013 18:14:20 -0400 Subject: x86: Don't clear olpc_ofw_header when sentinel is detected commit d55e37bb0f51316e552376ddc0a3fff34ca7108b upstream. OpenFirmware wasn't quite following the protocol described in boot.txt and the kernel has detected this through use of the sentinel value in boot_params. OFW does zero out almost all of the stuff that it should do, but not the sentinel. This causes the kernel to clear olpc_ofw_header, which breaks x86 OLPC support. OpenFirmware has now been fixed. However, it would be nice if we could maintain Linux compatibility with old firmware versions. To do that, we just have to avoid zeroing out olpc_ofw_header. OFW does not write to any other parts of the header that are being zapped by the sentinel-detection code, and all users of olpc_ofw_header are somewhat protected through checking for the OLPC_OFW_SIG magic value before using it. So this should not cause any problems for anyone. Signed-off-by: Daniel Drake Link: http://lkml.kernel.org/r/20130809221420.618E6FAB03@dev.laptop.org Acked-by: Yinghai Lu Signed-off-by: H. Peter Anvin Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/bootparam_utils.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f..4a8cb8d7cbd 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ - memset(&boot_params->olpc_ofw_header, 0, + memset(&boot_params->ext_ramdisk_image, 0, (char *)&boot_params->efi_info - - (char *)&boot_params->olpc_ofw_header); + (char *)&boot_params->ext_ramdisk_image); memset(&boot_params->kbd_status, 0, (char *)&boot_params->hdr - (char *)&boot_params->kbd_status); -- cgit v1.2.3 From d8251a942ff0318fd8fde8e69fac5480b60e26b3 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 15 Aug 2013 13:21:06 +0100 Subject: xen/events: initialize local per-cpu mask for all possible events commit 84ca7a8e45dafb49cd5ca90a343ba033e2885c17 upstream. The sizeof() argument in init_evtchn_cpu_bindings() is incorrect resulting in only the first 64 (or 32 in 32-bit guests) ports having their bindings being initialized to VCPU 0. In most cases this does not cause a problem as request_irq() will set the irq affinity which will set the correct local per-cpu mask. However, if the request_irq() is called on a VCPU other than 0, there is a window between the unmasking of the event and the affinity being set were an event may be lost because it is not locally unmasked on any VCPU. If request_irq() is called on VCPU 0 then local irqs are disabled during the window and the race does not occur. Fix this by initializing all NR_EVENT_CHANNEL bits in the local per-cpu masks. Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- drivers/xen/events.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6a6bbe4ede9..c7338868449 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -346,7 +346,7 @@ static void init_evtchn_cpu_bindings(void) for_each_possible_cpu(i) memset(per_cpu(cpu_evtchn_mask, i), - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); } static inline void clear_evtchn(int port) -- cgit v1.2.3 From 131cb95fdf390f414163605c997b48cad591a273 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 15 Aug 2013 13:21:07 +0100 Subject: xen/events: mask events when changing their VCPU binding commit 4704fe4f03a5ab27e3c36184af85d5000e0f8a48 upstream. When a event is being bound to a VCPU there is a window between the EVTCHNOP_bind_vpcu call and the adjustment of the local per-cpu masks where an event may be lost. The hypervisor upcalls the new VCPU but the kernel thinks that event is still bound to the old VCPU and ignores it. There is even a problem when the event is being bound to the same VCPU as there is a small window beween the clear_bit() and set_bit() calls in bind_evtchn_to_cpu(). When scanning for pending events, the kernel may read the bit when it is momentarily clear and ignore the event. Avoid this by masking the event during the whole bind operation. Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Jan Beulich Signed-off-by: Greg Kroah-Hartman --- drivers/xen/events.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index c7338868449..1faa1305c04 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -1492,8 +1492,10 @@ void rebind_evtchn_irq(int evtchn, int irq) /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { + struct shared_info *s = HYPERVISOR_shared_info; struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); + int masked; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1509,6 +1511,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) bind_vcpu.port = evtchn; bind_vcpu.vcpu = tcpu; + /* + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); + /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore @@ -1517,6 +1525,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); + if (!masked) + unmask_evtchn(evtchn); + return 0; } -- cgit v1.2.3 From 66ed6f3d16a224dfffda420d7b047a7346c63821 Mon Sep 17 00:00:00 2001 From: Sekhar Nori Date: Fri, 16 Aug 2013 14:43:48 +0530 Subject: ARM: davinci: nand: specify ecc strength commit acd36357edc08649e85ff15dc4ed62353c912eff upstream. Starting with kernel v3.5, it is mandatory to specify ECC strength when using hardware ECC. Without this, kernel panics with a warning of the sort: Driver must set ecc.strength when using hardware ECC ------------[ cut here ]------------ kernel BUG at drivers/mtd/nand/nand_base.c:3519! Fix this by specifying ECC strength for the boards which were missing this. Reported-by: Holger Freyther Signed-off-by: Sekhar Nori Signed-off-by: Kevin Hilman Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-davinci/board-dm355-leopard.c | 1 + arch/arm/mach-davinci/board-dm644x-evm.c | 1 + arch/arm/mach-davinci/board-dm646x-evm.c | 1 + arch/arm/mach-davinci/board-neuros-osd2.c | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef8..139e42da25f 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW_SYNDROME, + .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb..fa4bfaf952d 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { .parts = davinci_evm_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, .timing = &davinci_evm_nandflash_timing, }; diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc..0c005e876ca 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .options = 0, }; diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf56..808233b60e3 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { .parts = davinci_ntosd2_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, }; -- cgit v1.2.3 From 805eea0bb13dfded98c2383139b64d0576838b65 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 28 Jun 2013 10:39:15 +0200 Subject: ARM: at91/DT: fix at91sam9n12ek memory node commit a57603ca2871ee0773b00839c1ea35c4a2d3eeb0 upstream. Signed-off-by: Nicolas Ferre Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/at91sam9n12ek.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d30e48bd1e9..28ba7989387 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -14,11 +14,11 @@ compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; chosen { - bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; }; memory { - reg = <0x20000000 0x10000000>; + reg = <0x20000000 0x8000000>; }; clocks { -- cgit v1.2.3 From 975a3cd4cc0dff06a635cb16b8fb25fd3ae88234 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:39 +0100 Subject: arm64: perf: fix array out of bounds access in armpmu_map_hw_event() commit 868f6fea8fa63f09acbfa93256d0d2abdcabff79 upstream. This is a port of d9f966357b14 ("ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event()") to arm64, which fixes an oops in the arm64 perf backend found as a result of Vince's fuzzing tool. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf..2012646fb46 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } -- cgit v1.2.3 From 6c6f6c7166e9ee45e545eab850a2862a92c135b2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:40 +0100 Subject: arm64: perf: fix event validation for software group leaders commit ee7538a008a45050c8f706d38b600f55953169f9 upstream. This is a port of c95eb3184ea1 ("ARM: 7809/1: perf: fix event validation for software group leaders") to arm64, which fixes a panic in the arm64 perf backend found as a result of Vince's fuzzing tool. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 2012646fb46..12e6ccb8869 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -322,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; -- cgit v1.2.3 From 90dbc54a171ca7fd96c35d2858d30baf5d7c6376 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 14 Aug 2013 22:36:32 +0100 Subject: ARM: 7816/1: CONFIG_KUSER_HELPERS: fix help text commit ac124504ecf6b20a2457d873d0728a8b991a5b0c upstream. Commit f6f91b0d9fd9 ("ARM: allow kuser helpers to be removed from the vector page") introduced some help text for the CONFIG_KUSER_HELPERS option which is rather contradictory. Let's fix that, and improve it a little. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/Kconfig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 2950082a531..08c9fe917d1 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -789,15 +789,18 @@ config KUSER_HELPERS the CPU type fitted to the system. This permits binaries to be run on ARMv4 through to ARMv7 without modification. + See Documentation/arm/kernel_user_helpers.txt for details. + However, the fixed address nature of these helpers can be used by ROP (return orientated programming) authors when creating exploits. If all of the binaries and libraries which run on your platform are built specifically for your platform, and make no use of - these helpers, then you can turn this option off. However, - when such an binary or library is run, it will receive a SIGILL - signal, which will terminate the program. + these helpers, then you can turn this option off to hinder + such exploits. However, in that case, if a binary or library + relying on those helpers is run, it will receive a SIGILL signal, + which will terminate the program. Say N here only if you are absolutely certain that you do not need these helpers; otherwise, the safe option is to say Y. -- cgit v1.2.3 From 6f123e952c4144ddda16ebdac765677ebdf6f37c Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Fri, 23 Aug 2013 12:37:17 +0100 Subject: staging: comedi: bug-fix NULL pointer dereference on failed attach commit 3955dfa8216f712bc204a5ad2f4e51efff252fde upstream. Commit dcd7b8bd63cb81c5b973bf86510ca3c80bbbd162 ("staging: comedi: put module _after_ detach" by myself) reversed a couple of calls in `comedi_device_attach()` when recovering from an error returned by the low-level driver's 'attach' handler. Unfortunately, that introduced a NULL pointer dereference bug as `dev->driver` is NULL after the call to `comedi_device_detach()`. We still have a pointer to the low-level comedi driver structure in the `driv` variable, so use that instead. Signed-off-by: Ian Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/staging/comedi/drivers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 06d190f8fd3..4a2b0427730 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -464,7 +464,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) ret = comedi_device_postconfig(dev); if (ret < 0) { comedi_device_detach(dev); - module_put(dev->driver->module); + module_put(driv->module); } /* On success, the driver module count has been incremented. */ return ret; -- cgit v1.2.3 From 6a33cbd049c293b0c274661d63c8918eb8557086 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 13 Aug 2013 15:57:32 -0400 Subject: drm/radeon/r7xx: fix copy paste typo in golden register setup commit 022374c02e357ac82e98dd2689fb2efe05723d69 upstream. Uses the wrong array size for some asics which can lead to garbage getting written to registers. Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=60674 Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/rv770.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bcc68ec204a..f5e92cfcc14 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv730_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv730_golden_registers)); radeon_program_register_sequence(rdev, rv730_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv730_mgcg_init)); break; case CHIP_RV710: radeon_program_register_sequence(rdev, @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv710_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv710_golden_registers)); radeon_program_register_sequence(rdev, rv710_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv710_mgcg_init)); break; case CHIP_RV740: radeon_program_register_sequence(rdev, rv740_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv740_golden_registers)); radeon_program_register_sequence(rdev, rv740_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv740_mgcg_init)); break; default: break; -- cgit v1.2.3 From 21779249f05d0ebdd2eaf464673d224f1883b57a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sun, 11 Aug 2013 21:27:56 +0200 Subject: drm/radeon: fix UVD message buffer validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 112a6d0c071808f6d48354fc8834a574e5dcefc0 upstream. When the message buffer is currently moving block until it is idle again. Signed-off-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_uvd.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 97002a08211..f3ccf6d4add 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -359,6 +359,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r; + } + } + r = radeon_bo_kmap(bo, &ptr); if (r) return r; -- cgit v1.2.3 From 749e7bffd3f71d4fadc0fb54eebce12a28d045e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 15 Aug 2013 18:55:22 +0200 Subject: drm/radeon: fix WREG32_OR macro setting bits in a register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d43a93c8d9bc4e0dc0293b6458c077c3c797594f upstream. This bug (introduced in 3.10) in WREG32_OR made commit d3418eacad403033e95e49dc14afa37c2112c134 "drm/radeon/evergreen: setup HDMI before enabling it" cause a regression. Sometimes audio over HDMI wasn't working, sometimes display was corrupted. This fixes: https://bugzilla.kernel.org/show_bug.cgi?id=60687 https://bugzilla.kernel.org/show_bug.cgi?id=60709 https://bugs.freedesktop.org/show_bug.cgi?id=67767 Signed-off-by: Rafał Miłecki Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index bdd9d56812a..d4ff48ce1d8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1764,7 +1764,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); WREG32(reg, tmp_); \ } while (0) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) -#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) #define WREG32_PLL_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32_PLL(reg); \ -- cgit v1.2.3 From ce04434c6e596b264f92f39d3228883c60262b69 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2013 19:01:14 +0100 Subject: drm/i915: Invalidate TLBs for the rings after a reset commit 884020bf3d2a3787a1cc6df902e98e0eec60330b upstream. After any "soft gfx reset" we must manually invalidate the TLBs associated with each ring. Empirically, it seems that a suspend/resume or D3-D0 cycle count as a "soft reset". The symptom is that the hardware would fail to note the new address for its status page, and so it would continue to write the shadow registers and breadcrumbs into the old physical address (now used by something completely different, scary). Whereas the driver would read the new status page and never see any progress, it would appear that the GPU hung immediately upon resume. Based on a patch by naresh kumar kachhi Reported-by: Thiago Macieira Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=64725 Signed-off-by: Chris Wilson Tested-by: Thiago Macieira Signed-off-by: Daniel Vetter Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 80b0a6626a2..01f6c2cf471 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -617,6 +617,8 @@ will not assert AGPBUSY# and will only be delivered when out of C3. */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ +#define INSTPM_TLB_INVALIDATE (1<<9) +#define INSTPM_SYNC_FLUSH (1<<5) #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1424f204283..48fe23e8d18 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -907,6 +907,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } } static int -- cgit v1.2.3 From abcdf87c25278b38972e41ca13fffc42b9d1f6f8 Mon Sep 17 00:00:00 2001 From: Wladislav Wiebe Date: Mon, 12 Aug 2013 13:06:53 +0200 Subject: of: fdt: fix memory initialization for expanded DT commit 9e40127526e857fa3f29d51e83277204fbdfc6ba upstream. Already existing property flags are filled wrong for properties created from initial FDT. This could cause problems if this DYNAMIC device-tree functions are used later, i.e. properties are attached/detached/replaced. Simply dumping flags from the running system show, that some initial static (not allocated via kzmalloc()) nodes are marked as dynamic. I putted some debug extensions to property_proc_show(..) : .. + if (OF_IS_DYNAMIC(pp)) + pr_err("DEBUG: xxx : OF_IS_DYNAMIC\n"); + if (OF_IS_DETACHED(pp)) + pr_err("DEBUG: xxx : OF_IS_DETACHED\n"); when you operate on the nodes (e.g.: ~$ cat /proc/device-tree/*some_node*) you will see that those flags are filled wrong, basically in most cases it will dump a DYNAMIC or DETACHED status, which is in not true. (BTW. this OF_IS_DETACHED is a own define for debug purposes which which just make a test_bit(OF_DETACHED, &x->_flags) If nodes are dynamic kernel is allowed to kfree() them. But it will crash attempting to do so on the nodes from FDT -- they are not allocated via kzmalloc(). Signed-off-by: Wladislav Wiebe Acked-by: Alexander Sverdlin Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/of/fdt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 808be06bb67..118773751ea 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, mem = (unsigned long) dt_alloc(size + 4, __alignof__(struct device_node)); + memset((void *)mem, 0, size); + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); pr_debug(" unflattening %lx...\n", mem); -- cgit v1.2.3 From b0e01ab2f3f31384dd9e1c660e0c9831e717e73c Mon Sep 17 00:00:00 2001 From: Vyacheslav Dubeyko Date: Thu, 22 Aug 2013 16:35:44 -0700 Subject: nilfs2: remove double bio_put() in nilfs_end_bio_write() for BIO_EOPNOTSUPP error commit 2df37a19c686c2d7c4e9b4ce1505b5141e3e5552 upstream. Remove double call of bio_put() in nilfs_end_bio_write() for the case of BIO_EOPNOTSUPP error detection. The issue was found by Dan Carpenter and he suggests first version of the fix too. Signed-off-by: Vyacheslav Dubeyko Reported-by: Dan Carpenter Acked-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segbuf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index dc9a913784a..5bacf46dc4b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err) if (err == -EOPNOTSUPP) { set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); - bio_put(bio); - /* to be detected by submit_seg_bio() */ + /* to be detected by nilfs_segbuf_submit_bio() */ } if (!uptodate) -- cgit v1.2.3 From 020269d2c629a12856a5413528328fbd4ff71ca9 Mon Sep 17 00:00:00 2001 From: Vyacheslav Dubeyko Date: Thu, 22 Aug 2013 16:35:45 -0700 Subject: nilfs2: fix issue with counting number of bio requests for BIO_EOPNOTSUPP error detection commit 4bf93b50fd04118ac7f33a3c2b8a0a1f9fa80bc9 upstream. Fix the issue with improper counting number of flying bio requests for BIO_EOPNOTSUPP error detection case. The sb_nbio must be incremented exactly the same number of times as complete() function was called (or will be called) because nilfs_segbuf_wait() will call wail_for_completion() for the number of times set to sb_nbio: do { wait_for_completion(&segbuf->sb_bio_event); } while (--segbuf->sb_nbio > 0); Two functions complete() and wait_for_completion() must be called the same number of times for the same sb_bio_event. Otherwise, wait_for_completion() will hang or leak. Signed-off-by: Vyacheslav Dubeyko Cc: Dan Carpenter Acked-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segbuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 5bacf46dc4b..2d8be51f90d 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -376,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, bio->bi_private = segbuf; bio_get(bio); submit_bio(mode, bio); + segbuf->sb_nbio++; if (bio_flagged(bio, BIO_EOPNOTSUPP)) { bio_put(bio); err = -EOPNOTSUPP; goto failed; } - segbuf->sb_nbio++; bio_put(bio); wi->bio = NULL; -- cgit v1.2.3 From 6797361e99b0103055b26c0a3d1079bf2402a3a0 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Thu, 22 Aug 2013 16:35:43 -0700 Subject: drivers/platform/olpc/olpc-ec.c: initialise earlier commit 93dbc1b3b506e16c1f6d5b5dcfe756a85cb1dc58 upstream. Being a low-level component, various drivers (e.g. olpc-battery) assume that it is ok to communicate with the OLPC Embedded Controller during probe. Therefore the OLPC EC driver must be initialised before other drivers try to use it. This was the case until it was recently moved out of arch/x86 and restructured around commits ac2504151f5a ("Platform: OLPC: turn EC driver into a platform_driver") and 85f90cf6ca56 ("x86: OLPC: switch over to using new EC driver on x86"). Use arch_initcall so that olpc-ec is readied earlier, matching the previous behaviour. Fixes a regression introduced in Linux-3.6 where various drivers such as olpc-battery and olpc-xo1-sci failed to load due to an inability to communicate with the EC. The user-visible effect was a lack of battery monitoring, missing ebook/lid switch input devices, etc. Signed-off-by: Daniel Drake Cc: Andres Salomon Cc: Paul Fox Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/platform/olpc/olpc-ec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 0f9f8596b30..f9119525f55 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c @@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void) return platform_driver_register(&olpc_ec_plat_driver); } -module_init(olpc_ec_init_module); +arch_initcall(olpc_ec_init_module); MODULE_AUTHOR("Andres Salomon "); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 2356788f31dee0b3c03b3ef594fd113a861b29c1 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Wed, 21 Aug 2013 17:43:31 +0200 Subject: usb: phy: fix build breakage commit 52d5b9aba1f5790ca3231c262979c2c3e26dd99b upstream. Commit 94ae9843 (usb: phy: rename all phy drivers to phy-$name-usb.c) renamed drivers/usb/phy/otg_fsm.h to drivers/usb/phy/phy-fsm-usb.h but changed drivers/usb/phy/phy-fsm-usb.c to include not existing "phy-otg-fsm.h" instead of new "phy-fsm-usb.h". This breaks building: ... drivers/usb/phy/phy-fsm-usb.c:32:25: fatal error: phy-otg-fsm.h: No such file or directory compilation terminated. make[3]: *** [drivers/usb/phy/phy-fsm-usb.o] Error 1 This commit also missed to modify drivers/usb/phy/phy-fsl-usb.h to include new "phy-fsm-usb.h" instead of "otg_fsm.h" resulting in another build breakage: ... In file included from drivers/usb/phy/phy-fsl-usb.c:46:0: drivers/usb/phy/phy-fsl-usb.h:18:21: fatal error: otg_fsm.h: No such file or directory compilation terminated. make[3]: *** [drivers/usb/phy/phy-fsl-usb.o] Error 1 Fix both issues. Signed-off-by: Anatolij Gustschin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/phy/phy-fsl-usb.h | 2 +- drivers/usb/phy/phy-fsm-usb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h index ca266280895..e1859b8ef56 100644 --- a/drivers/usb/phy/phy-fsl-usb.h +++ b/drivers/usb/phy/phy-fsl-usb.h @@ -15,7 +15,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include "otg_fsm.h" +#include "phy-fsm-usb.h" #include #include diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c520b3548e7..7f4596606e1 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c @@ -29,7 +29,7 @@ #include #include -#include "phy-otg-fsm.h" +#include "phy-fsm-usb.h" /* Change USB protocol when there is a protocol change */ static int otg_set_protocol(struct otg_fsm *fsm, int protocol) -- cgit v1.2.3 From 2650a684ca1c26831ecc552c269ffd7a8def3694 Mon Sep 17 00:00:00 2001 From: Anthony Foiani Date: Mon, 19 Aug 2013 19:20:30 -0600 Subject: sata_fsl: save irqs while coalescing commit 99bbdfa6bdcb4bdf5be914a48e9b46941bf30819 upstream. Before this patch, I was seeing the following lockdep splat on my MPC8315 (PPC32) target: [ 9.086051] ================================= [ 9.090393] [ INFO: inconsistent lock state ] [ 9.094744] 3.9.7-ajf-gc39503d #1 Not tainted [ 9.099087] --------------------------------- [ 9.103432] inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. [ 9.109431] scsi_eh_1/39 [HC1[1]:SC0[0]:HE0:SE1] takes: [ 9.114642] (&(&host->lock)->rlock){?.+...}, at: [] sata_fsl_interrupt+0x50/0x250 [ 9.123137] {HARDIRQ-ON-W} state was registered at: [ 9.128004] [] lock_acquire+0x90/0xf4 [ 9.132737] [] _raw_spin_lock+0x34/0x4c [ 9.137645] [] fsl_sata_set_irq_coalescing+0x68/0x100 [ 9.143750] [] sata_fsl_init_controller+0xa8/0xc0 [ 9.149505] [] sata_fsl_probe+0x17c/0x2e8 [ 9.154568] [] driver_probe_device+0x90/0x248 [ 9.159987] [] __driver_attach+0xc4/0xc8 [ 9.164964] [] bus_for_each_dev+0x5c/0xa8 [ 9.170028] [] bus_add_driver+0x100/0x26c [ 9.175091] [] driver_register+0x88/0x198 [ 9.180155] [] do_one_initcall+0x58/0x1b4 [ 9.185226] [] kernel_init_freeable+0x118/0x1c0 [ 9.190823] [] kernel_init+0x18/0x108 [ 9.195542] [] ret_from_kernel_thread+0x64/0x6c [ 9.201142] irq event stamp: 160 [ 9.204366] hardirqs last enabled at (159): [] _raw_spin_unlock_irq+0x30/0x50 [ 9.212469] hardirqs last disabled at (160): [] reenable_mmu+0x30/0x88 [ 9.219867] softirqs last enabled at (144): [] __do_softirq+0x168/0x218 [ 9.227435] softirqs last disabled at (137): [] irq_exit+0xa8/0xb4 [ 9.234481] [ 9.234481] other info that might help us debug this: [ 9.240995] Possible unsafe locking scenario: [ 9.240995] [ 9.246898] CPU0 [ 9.249337] ---- [ 9.251776] lock(&(&host->lock)->rlock); [ 9.255878] [ 9.258492] lock(&(&host->lock)->rlock); [ 9.262765] [ 9.262765] *** DEADLOCK *** [ 9.262765] [ 9.268684] no locks held by scsi_eh_1/39. [ 9.272767] [ 9.272767] stack backtrace: [ 9.277117] Call Trace: [ 9.279589] [cfff9da0] [c0008504] show_stack+0x48/0x150 (unreliable) [ 9.285972] [cfff9de0] [c0447d5c] print_usage_bug.part.35+0x268/0x27c [ 9.292425] [cfff9e10] [c006ace4] mark_lock+0x2ac/0x658 [ 9.297660] [cfff9e40] [c006b7e4] __lock_acquire+0x754/0x1840 [ 9.303414] [cfff9ee0] [c006cdb8] lock_acquire+0x90/0xf4 [ 9.308745] [cfff9f20] [c043ef04] _raw_spin_lock+0x34/0x4c [ 9.314250] [cfff9f30] [c02f4168] sata_fsl_interrupt+0x50/0x250 [ 9.320187] [cfff9f70] [c0079ff0] handle_irq_event_percpu+0x90/0x254 [ 9.326547] [cfff9fc0] [c007a1fc] handle_irq_event+0x48/0x78 [ 9.332220] [cfff9fe0] [c007c95c] handle_level_irq+0x9c/0x104 [ 9.337981] [cfff9ff0] [c000d978] call_handle_irq+0x18/0x28 [ 9.343568] [cc7139f0] [c000608c] do_IRQ+0xf0/0x1a8 [ 9.348464] [cc713a20] [c000fc8c] ret_from_except+0x0/0x14 [ 9.353983] --- Exception: 501 at _raw_spin_unlock_irq+0x40/0x50 [ 9.353983] LR = _raw_spin_unlock_irq+0x30/0x50 [ 9.364839] [cc713af0] [c043db10] wait_for_common+0xac/0x188 [ 9.370513] [cc713b30] [c02ddee4] ata_exec_internal_sg+0x2b0/0x4f0 [ 9.376699] [cc713be0] [c02de18c] ata_exec_internal+0x68/0xa8 [ 9.382454] [cc713c20] [c02de4b8] ata_dev_read_id+0x158/0x594 [ 9.388205] [cc713ca0] [c02ec244] ata_eh_recover+0xd88/0x13d0 [ 9.393962] [cc713d20] [c02f2520] sata_pmp_error_handler+0xc0/0x8ac [ 9.400234] [cc713dd0] [c02ecdc8] ata_scsi_port_error_handler+0x464/0x5e8 [ 9.407023] [cc713e10] [c02ecfd0] ata_scsi_error+0x84/0xb8 [ 9.412528] [cc713e40] [c02c4974] scsi_error_handler+0xd8/0x47c [ 9.418457] [cc713eb0] [c004737c] kthread+0xa8/0xac [ 9.423355] [cc713f40] [c000f6b8] ret_from_kernel_thread+0x64/0x6c This fix was suggested by Bhushan Bharat , and was discussed in email at: http://linuxppc.10917.n7.nabble.com/MPC8315-reboot-failure-lockdep-splat-possibly-related-tp75162.html Same patch successfully tested with 3.9.7. linux-next compiled but not tested on hardware. This patch is based off linux-next tag next-20130819 (which is commit 66a01bae29d11916c09f9f5a937cafe7d402e4a5 ) Signed-off-by: Anthony Foiani Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/sata_fsl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index d40e403e82d..8401061b404 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, { struct sata_fsl_host_priv *host_priv = host->private_data; void __iomem *hcr_base = host_priv->hcr_base; + unsigned long flags; if (count > ICC_MAX_INT_COUNT_THRESHOLD) count = ICC_MAX_INT_COUNT_THRESHOLD; @@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, (count > ICC_MIN_INT_COUNT_THRESHOLD)) ticks = ICC_SAFE_INT_TICKS; - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); iowrite32((count << 24 | ticks), hcr_base + ICC); intr_coalescing_count = count; intr_coalescing_ticks = ticks; - spin_unlock(&host->lock); + spin_unlock_irqrestore(&host->lock, flags); DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", intr_coalescing_count, intr_coalescing_ticks); -- cgit v1.2.3 From 9e50b0dd45dd3a26edf83b3f78117a6765fbe8a2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 9 Aug 2013 12:52:31 +0300 Subject: Hostap: copying wrong data prism2_ioctl_giwaplist() commit 909bd5926d474e275599094acad986af79671ac9 upstream. We want the data stored in "addr" and "qual", but the extra ampersands mean we are copying stack data instead. Signed-off-by: Dan Carpenter Signed-off-by: John W. Linville Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/hostap/hostap_ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index ac074731335..e5090309824 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); - memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); + memcpy(extra, addr, sizeof(struct sockaddr) * data->length); data->flags = 1; /* has quality information */ - memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, + memcpy(extra + sizeof(struct sockaddr) * data->length, qual, sizeof(struct iw_quality) * data->length); kfree(addr); -- cgit v1.2.3 From 323d5a7b5762c2fac8d1a5bb6d24d6d4e0efbfb3 Mon Sep 17 00:00:00 2001 From: Terry Suereth Date: Sat, 17 Aug 2013 15:53:12 -0400 Subject: libata: apply behavioral quirks to sil3826 PMP commit 8ffff94d20b7eb446e848e0046107d51b17a20a8 upstream. Fixing support for the Silicon Image 3826 port multiplier, by applying to it the same quirks applied to the Silicon Image 3726. Specifically fixes the repeated timeout/reset process which previously afflicted the 3726, as described from line 290. Slightly based on notes from: https://bugzilla.redhat.com/show_bug.cgi?id=890237 Signed-off-by: Terry Suereth Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-pmp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 1c41722bb7e..20fd337a573 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) /* Disable sending Early R_OK. * With "cached read" HDD testing and multiple ports busy on a SATA - * host controller, 3726 PMP will very rarely drop a deferred + * host controller, 3x26 PMP will very rarely drop a deferred * R_OK that was intended for the host. Symptom will be all * 5 drives under test will timeout, get reset, and recover. */ - if (vendor == 0x1095 && devid == 0x3726) { + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { u32 reg; err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); if (err_mask) { rc = -EIO; - reason = "failed to read Sil3726 Private Register"; + reason = "failed to read Sil3x26 Private Register"; goto fail; } reg &= ~0x1; err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); if (err_mask) { rc = -EIO; - reason = "failed to write Sil3726 Private Register"; + reason = "failed to write Sil3x26 Private Register"; goto fail; } } @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) u16 devid = sata_pmp_gscr_devid(gscr); struct ata_link *link; - if (vendor == 0x1095 && devid == 0x3726) { - /* sil3726 quirks */ + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { + /* sil3x26 quirks */ ata_for_each_link(link, ap, EDGE) { /* link reports offline after LPM */ link->flags |= ATA_LFLAG_NO_LPM; -- cgit v1.2.3 From 753fb619d10ed87c9ab5b75e83dd464244986f82 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 26 Jul 2013 15:29:09 +0200 Subject: iwlwifi: dvm: fix calling ieee80211_chswitch_done() with NULL commit 9186a1fd9ed190739423db84bc344d258ef3e3d7 upstream. If channel switch is pending and we remove interface we can crash like showed below due to passing NULL vif to mac80211: BUG: unable to handle kernel paging request at fffffffffffff8cc IP: [] strnlen+0xd/0x40 Call Trace: [] string.isra.3+0x3e/0xd0 [] vsnprintf+0x219/0x640 [] vscnprintf+0x11/0x30 [] vprintk_emit+0x115/0x4f0 [] printk+0x61/0x63 [] ieee80211_chswitch_done+0xaf/0xd0 [mac80211] [] iwl_chswitch_done+0x34/0x40 [iwldvm] [] iwlagn_commit_rxon+0x2a3/0xdc0 [iwldvm] [] ? iwlagn_set_rxon_chain+0x180/0x2c0 [iwldvm] [] iwl_set_mode+0x36/0x40 [iwldvm] [] iwlagn_mac_remove_interface+0x8d/0x1b0 [iwldvm] [] ieee80211_do_stop+0x29d/0x7f0 [mac80211] This is because we nulify ctx->vif in iwlagn_mac_remove_interface() before calling some other functions that teardown interface. To fix just check ctx->vif on iwl_chswitch_done(). We should not call ieee80211_chswitch_done() as channel switch works were already canceled by mac80211 in ieee80211_do_stop() -> ieee80211_mgd_stop(). Resolve: https://bugzilla.redhat.com/show_bug.cgi?id=979581 Reported-by: Lukasz Jagiello Signed-off-by: Stanislaw Gruszka Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/dvm/mac80211.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index cab23af0be9..e04f3da1ccb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -1059,7 +1059,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + return; + + if (ctx->vif) ieee80211_chswitch_done(ctx->vif, is_success); } -- cgit v1.2.3 From 67bdd3c0dc4dc00fc1e708fc9f326304d88cc8be Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 29 Jul 2013 23:05:18 +0300 Subject: iwlwifi: pcie: disable L1 Active after pci_enable_device commit eabc4ac5d7606a57ee2b7308cb7323ea8f60183b upstream. As Arjan pointed out, we mustn't do anything related to PCI configuration until the device is properly enabled with pci_enable_device(). Reported-by: Arjan van de Ven Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/iwlwifi/pcie/trans.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 50ba0a468f9..aeb70e13137 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1481,16 +1481,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->reg_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - /* W/A - seems to solve weird behavior. We need to remove this if we - * don't want to stay in L1 all the time. This wastes a lot of power */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } + /* W/A - seems to solve weird behavior. We need to remove this if we + * don't want to stay in L1 all the time. This wastes a lot of power */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); -- cgit v1.2.3 From bda5d1efa09527e443a6990f81459779a313e24d Mon Sep 17 00:00:00 2001 From: Martin Peschke Date: Thu, 22 Aug 2013 17:45:36 +0200 Subject: SCSI: zfcp: fix lock imbalance by reworking request queue locking commit d79ff142624e1be080ad8d09101f7004d79c36e1 upstream. This patch adds wait_event_interruptible_lock_irq_timeout(), which is a straight-forward descendant of wait_event_interruptible_timeout() and wait_event_interruptible_lock_irq(). The zfcp driver used to call wait_event_interruptible_timeout() in combination with some intricate and error-prone locking. Using wait_event_interruptible_lock_irq_timeout() as a replacement nicely cleans up that locking. This rework removes a situation that resulted in a locking imbalance in zfcp_qdio_sbal_get(): BUG: workqueue leaked lock or atomic: events/1/0xffffff00/10 last function: zfcp_fc_wka_port_offline+0x0/0xa0 [zfcp] It was introduced by commit c2af7545aaff3495d9bf9a7608c52f0af86fb194 "[SCSI] zfcp: Do not wait for SBALs on stopped queue", which had a new code path related to ZFCP_STATUS_ADAPTER_QDIOUP that took an early exit without a required lock being held. The problem occured when a special, non-SCSI I/O request was being submitted in process context, when the adapter's queues had been torn down. In this case the bug surfaced when the Fibre Channel port connection for a well-known address was closed during a concurrent adapter shut-down procedure, which is a rare constellation. This patch also fixes these warnings from the sparse tool (make C=1): drivers/s390/scsi/zfcp_qdio.c:224:12: warning: context imbalance in 'zfcp_qdio_sbal_check' - wrong count at exit drivers/s390/scsi/zfcp_qdio.c:244:5: warning: context imbalance in 'zfcp_qdio_sbal_get' - unexpected unlock Last but not least, we get rid of that crappy lock-unlock-lock sequence at the beginning of the critical section. It is okay to call zfcp_erp_adapter_reopen() with req_q_lock held. Reported-by: Mikulas Patocka Reported-by: Heiko Carstens Signed-off-by: Martin Peschke Signed-off-by: Steffen Maier Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/s390/scsi/zfcp_qdio.c | 8 ++---- include/linux/wait.h | 57 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 6 deletions(-) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 665e3cfaaf8..de0598eaacd 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) { - spin_lock_irq(&qdio->req_q_lock); if (atomic_read(&qdio->req_q_free) || !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return 1; - spin_unlock_irq(&qdio->req_q_lock); return 0; } @@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) { long ret; - spin_unlock_irq(&qdio->req_q_lock); - ret = wait_event_interruptible_timeout(qdio->req_q_wq, - zfcp_qdio_sbal_check(qdio), 5 * HZ); + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return -EIO; @@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); } - spin_lock_irq(&qdio->req_q_lock); return -EIO; } diff --git a/include/linux/wait.h b/include/linux/wait.h index 1133695eb06..c8e57602223 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -805,6 +805,63 @@ do { \ __ret; \ }) +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ + lock, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (signal_pending(current)) { \ + ret = -ERESTARTSYS; \ + break; \ + } \ + spin_unlock_irq(&lock); \ + ret = schedule_timeout(ret); \ + spin_lock_irq(&lock); \ + if (!ret) \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. + * The condition is checked under the lock. This is expected + * to be called with the lock taken. + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @lock: a locked spinlock_t, which will be released before schedule() + * and reacquired afterwards. + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or signal is received. The @condition is + * checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * This is supposed to be called while holding the lock. The lock is + * dropped before going to sleep and is reacquired afterwards. + * + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. + */ +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ + timeout) \ +({ \ + int __ret = timeout; \ + \ + if (!(condition)) \ + __wait_event_interruptible_lock_irq_timeout( \ + wq, condition, lock, __ret); \ + __ret; \ +}) + /* * These are the old interfaces to sleep waiting for an event. -- cgit v1.2.3 From e1a289ee6734dd5f9a81a260f3027ad8010f530a Mon Sep 17 00:00:00 2001 From: Martin Peschke Date: Thu, 22 Aug 2013 17:45:37 +0200 Subject: SCSI: zfcp: fix schedule-inside-lock in scsi_device list loops commit 924dd584b198a58aa7cb3efefd8a03326550ce8f upstream. BUG: sleeping function called from invalid context at kernel/workqueue.c:2752 in_atomic(): 1, irqs_disabled(): 1, pid: 360, name: zfcperp0.0.1700 CPU: 1 Not tainted 3.9.3+ #69 Process zfcperp0.0.1700 (pid: 360, task: 0000000075b7e080, ksp: 000000007476bc30) Call Trace: ([<00000000001165de>] show_trace+0x106/0x154) [<00000000001166a0>] show_stack+0x74/0xf4 [<00000000006ff646>] dump_stack+0xc6/0xd4 [<000000000017f3a0>] __might_sleep+0x128/0x148 [<000000000015ece8>] flush_work+0x54/0x1f8 [<00000000001630de>] __cancel_work_timer+0xc6/0x128 [<00000000005067ac>] scsi_device_dev_release_usercontext+0x164/0x23c [<0000000000161816>] execute_in_process_context+0x96/0xa8 [<00000000004d33d8>] device_release+0x60/0xc0 [<000000000048af48>] kobject_release+0xa8/0x1c4 [<00000000004f4bf2>] __scsi_iterate_devices+0xfa/0x130 [<000003ff801b307a>] zfcp_erp_strategy+0x4da/0x1014 [zfcp] [<000003ff801b3caa>] zfcp_erp_thread+0xf6/0x2b0 [zfcp] [<000000000016b75a>] kthread+0xf2/0xfc [<000000000070c9de>] kernel_thread_starter+0x6/0xc [<000000000070c9d8>] kernel_thread_starter+0x0/0xc Apparently, the ref_count for some scsi_device drops down to zero, triggering device removal through execute_in_process_context(), while the lldd error recovery thread iterates through a scsi device list. Unfortunately, execute_in_process_context() decides to immediately execute that device removal function, instead of scheduling asynchronous execution, since it detects process context and thinks it is safe to do so. But almost all calls to shost_for_each_device() in our lldd are inside spin_lock_irq, even in thread context. Obviously, schedule() inside spin_lock_irq sections is a bad idea. Change the lldd to use the proper iterator function, __shost_for_each_device(), in combination with required locking. Occurences that need to be changed include all calls in zfcp_erp.c, since those might be executed in zfcp error recovery thread context with a lock held. Other occurences of shost_for_each_device() in zfcp_fsf.c do not need to be changed (no process context, no surrounding locking). The problem was introduced in Linux 2.6.37 by commit b62a8d9b45b971a67a0f8413338c230e3117dff5 "[SCSI] zfcp: Use SCSI device data zfcp_scsi_dev instead of zfcp_unit". Reported-by: Christian Borntraeger Signed-off-by: Martin Peschke Signed-off-by: Steffen Maier Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/s390/scsi/zfcp_erp.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 4133ab6e20f..8e8f3533d2a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); - else - shost_for_each_device(sdev, port->adapter->scsi_host) + else { + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) zfcp_erp_action_dismiss_lun(sdev); + spin_unlock(port->adapter->scsi_host->host_lock); + } } static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, { struct scsi_device *sdev; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) _zfcp_erp_lun_reopen(sdev, clear, id, 0); + spin_unlock(port->adapter->scsi_host->host_lock); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) @@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) atomic_set_mask(common_mask, &port->status); read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) } read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) { + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) { struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; + unsigned long flags; atomic_set_mask(mask, &port->status); if (!common_mask) return; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** @@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + unsigned long flags; atomic_clear_mask(mask, &port->status); @@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) if (clear_counter) atomic_set(&port->erp_counter, 0); - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** -- cgit v1.2.3 From a271397a97869112f0b77ef7e162f64cf3c55113 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 8 Aug 2013 17:47:34 +1000 Subject: SCSI: lpfc: Don't force CONFIG_GENERIC_CSUM on commit f5944daa0a72316077435c18a6571e73ed338332 upstream. We want ppc64 to be able to select between optimised assembly checksum routines in big endian and the generic lib/checksum.c routines in little endian. The lpfc driver is forcing CONFIG_GENERIC_CSUM on which means we are unable to make the decision to enable it in the arch Kconfig. If the option exists it is always forced on. This got introduced in 3.10 via commit 6a7252fdb0c3 ([SCSI] lpfc: fix up Kconfig dependencies). I spoke to Randy about it and the original issue was with CRC_T10DIF not being defined. As such, remove the select of CONFIG_GENERIC_CSUM. Signed-off-by: Anton Blanchard Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 86af29f53bb..1348fa47d12 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1353,7 +1353,6 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS - select GENERIC_CSUM select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse -- cgit v1.2.3 From 32b8d5f874e1c6ca88ec4f2d10cd885b3d70cf17 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 5 Aug 2013 17:55:01 -0700 Subject: SCSI: sg: Fix user memory corruption when SG_IO is interrupted by a signal commit 35dc248383bbab0a7203fca4d722875bc81ef091 upstream. There is a nasty bug in the SCSI SG_IO ioctl that in some circumstances leads to one process writing data into the address space of some other random unrelated process if the ioctl is interrupted by a signal. What happens is the following: - A process issues an SG_IO ioctl with direction DXFER_FROM_DEV (ie the underlying SCSI command will transfer data from the SCSI device to the buffer provided in the ioctl) - Before the command finishes, a signal is sent to the process waiting in the ioctl. This will end up waking up the sg_ioctl() code: result = wait_event_interruptible(sfp->read_wait, (srp_done(sfp, srp) || sdp->detached)); but neither srp_done() nor sdp->detached is true, so we end up just setting srp->orphan and returning to userspace: srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ At this point the original process is done with the ioctl and blithely goes ahead handling the signal, reissuing the ioctl, etc. - Eventually, the SCSI command issued by the first ioctl finishes and ends up in sg_rq_end_io(). At the end of that function, we run through: write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } Since srp->orphan *is* set, we set done to 0 (assuming the userspace app has not set keep_orphan via an SG_SET_KEEP_ORPHAN ioctl), and therefore we end up scheduling sg_rq_end_io_usercontext() to run in a workqueue. - In workqueue context we go through sg_rq_end_io_usercontext() -> sg_finish_rem_req() -> blk_rq_unmap_user() -> ... -> bio_uncopy_user() -> __bio_copy_iov() -> copy_to_user(). The key point here is that we are doing copy_to_user() on a workqueue -- that is, we're on a kernel thread with current->mm equal to whatever random previous user process was scheduled before this kernel thread. So we end up copying whatever data the SCSI command returned to the virtual address of the buffer passed into the original ioctl, but it's quite likely we do this copying into a different address space! As suggested by James Bottomley , add a check for current->mm (which is NULL if we're on a kernel thread without a real userspace address space) in bio_uncopy_user(), and skip the copy if we're on a kernel thread. There's no reason that I can think of for any caller of bio_uncopy_user() to want to do copying on a kernel thread with a random active userspace address space. Huge thanks to Costa Sapuntzakis for the original pointer to this bug in the sg code. Signed-off-by: Roland Dreier Tested-by: David Milburn Cc: Jens Axboe Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- fs/bio.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/fs/bio.c b/fs/bio.c index 94bbc04dba7..c5eae725149 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, int bio_uncopy_user(struct bio *bio) { struct bio_map_data *bmd = bio->bi_private; - int ret = 0; + struct bio_vec *bvec; + int ret = 0, i; - if (!bio_flagged(bio, BIO_NULL_MAPPED)) - ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, - bmd->nr_sgvecs, bio_data_dir(bio) == READ, - 0, bmd->is_our_pages); + if (!bio_flagged(bio, BIO_NULL_MAPPED)) { + /* + * if we're in a workqueue, the request is orphaned, so + * don't copy into a random user address space, just free. + */ + if (current->mm) + ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, + bmd->nr_sgvecs, bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); + else if (bmd->is_our_pages) + bio_for_each_segment_all(bvec, bio, i) + __free_page(bvec->bv_page); + } bio_free_map_data(bmd); bio_put(bio); return ret; -- cgit v1.2.3 From bd4b69c1f7f58fc28fb636a3be006770edf58d68 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 22 Aug 2013 09:13:06 -0700 Subject: Revert "x86 get_unmapped_area(): use proper mmap base for bottom-up direction" commit 5ea80f76a56605a190a7ea16846c82aa63dbd0aa upstream. This reverts commit df54d6fa54275ce59660453e29d1228c2b45a826. The commit isn't necessarily wrong, but because it recalculates the random mmap_base every time, it seems to confuse user memory allocators that expect contiguous mmap allocations even when the mmap address isn't specified. In particular, the MATLAB Java runtime seems to be unhappy. See https://bugzilla.kernel.org/show_bug.cgi?id=60774 So we'll want to apply the random offset only once, and Radu has a patch for that. Revert this older commit in order to apply the other one. Reported-by: Jeff Shorey Cc: Radu Caragea Cc: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/sys_x86_64.c | 2 +- arch/x86/mm/mmap.c | 2 +- include/linux/sched.h | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 48f8375e4c6..dbded5aedb8 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = mmap_legacy_base(); + *begin = TASK_UNMAPPED_BASE; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index c1af32385ab..845df6835f9 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -98,7 +98,7 @@ static unsigned long mmap_base(void) * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; diff --git a/include/linux/sched.h b/include/linux/sched.h index 3aeb14b0624..178a8d909f1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -314,7 +314,6 @@ struct nsproxy; struct user_namespace; #ifdef CONFIG_MMU -extern unsigned long mmap_legacy_base(void); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, -- cgit v1.2.3 From ff1a668bc01c811ffcf19a3d43af58d7bd658986 Mon Sep 17 00:00:00 2001 From: Radu Caragea Date: Wed, 21 Aug 2013 20:55:59 +0300 Subject: x86 get_unmapped_area: Access mmap_legacy_base through mm_struct member commit 41aacc1eea645c99edbe8fbcf78a97dc9b862adc upstream. This is the updated version of df54d6fa5427 ("x86 get_unmapped_area(): use proper mmap base for bottom-up direction") that only randomizes the mmap base address once. Signed-off-by: Radu Caragea Reported-and-tested-by: Jeff Shorey Cc: Andrew Morton Cc: Michel Lespinasse Cc: Oleg Nesterov Cc: Rik van Riel Cc: Ingo Molnar Cc: Adrian Sendroiu Cc: Kamal Mostafa Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/sys_x86_64.c | 2 +- arch/x86/mm/mmap.c | 6 ++++-- include/linux/mm_types.h | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb8..30277e27431 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = current->mm->mmap_legacy_base; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 845df6835f9..5c1ae28825c 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -112,12 +112,14 @@ static unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_base(); + if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { - mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ace9a5f01c6..4a189ba6b12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -333,6 +333,7 @@ struct mm_struct { void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long task_size; /* size of task vm space */ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ -- cgit v1.2.3 From 061cc2478cdacc2de2e3dd0cdfdd9bfcc5be5d93 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 16 Aug 2013 15:42:55 +0100 Subject: x86/xen: do not identity map UNUSABLE regions in the machine E820 commit 3bc38cbceb85881a8eb789ee1aa56678038b1909 upstream. If there are UNUSABLE regions in the machine memory map, dom0 will attempt to map them 1:1 which is not permitted by Xen and the kernel will crash. There isn't anything interesting in the UNUSABLE region that the dom0 kernel needs access to so we can avoid making the 1:1 mapping and treat it as RAM. We only do this for dom0, as that is where tboot case shows up. A PV domU could have an UNUSABLE region in its pseudo-physical map and would need to be handled in another patch. This fixes a boot failure on hosts with tboot. tboot marks a region in the e820 map as unusable and the dom0 kernel would attempt to map this region and Xen does not permit unusable regions to be mapped by guests. (XEN) 0000000000000000 - 0000000000060000 (usable) (XEN) 0000000000060000 - 0000000000068000 (reserved) (XEN) 0000000000068000 - 000000000009e000 (usable) (XEN) 0000000000100000 - 0000000000800000 (usable) (XEN) 0000000000800000 - 0000000000972000 (unusable) tboot marked this region as unusable. (XEN) 0000000000972000 - 00000000cf200000 (usable) (XEN) 00000000cf200000 - 00000000cf38f000 (reserved) (XEN) 00000000cf38f000 - 00000000cf3ce000 (ACPI data) (XEN) 00000000cf3ce000 - 00000000d0000000 (reserved) (XEN) 00000000e0000000 - 00000000f0000000 (reserved) (XEN) 00000000fe000000 - 0000000100000000 (reserved) (XEN) 0000000100000000 - 0000000630000000 (usable) Signed-off-by: David Vrabel [v1: Altered the patch and description with domU's with UNUSABLE regions] Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/setup.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 94eac5c85cd..0a9fb7a0b45 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) e820_add_region(start, end - start, type); } +void xen_ignore_unusable(struct e820entry *list, size_t map_size) +{ + struct e820entry *entry; + unsigned int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + if (entry->type == E820_UNUSABLE) + entry->type = E820_RAM; + } +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + if (xen_initial_domain()) + xen_ignore_unusable(map, memmap.nr_entries); + /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); -- cgit v1.2.3 From 7dae89cb15ebdc7e286b884cd79f3a4b94fcdff4 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Wed, 17 Jul 2013 15:13:15 +0300 Subject: mei: me: fix reset state machine commit 315a383ad7dbd484fafb93ef08038e3dbafbb7a8 upstream. ME HW ready bit is down after hw reset was asserted or on error. Only on error we need to enter the reset flow, additional reset need to be prevented when reset was triggered during initialization , power up/down or a reset is already in progress Tested-by: Shuah Khan Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-me.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 822170f0034..03108591c13 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -482,7 +482,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING && - dev->dev_state != MEI_DEV_INITIALIZING) { + dev->dev_state != MEI_DEV_INITIALIZING && + dev->dev_state != MEI_DEV_POWER_DOWN && + dev->dev_state != MEI_DEV_POWER_UP) { dev_dbg(&dev->pdev->dev, "FW not ready.\n"); mei_reset(dev, 1); mutex_unlock(&dev->device_lock); -- cgit v1.2.3 From 47e1cf336bc708bb59ebfcc01540dffecd16862e Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Wed, 17 Jul 2013 15:13:16 +0300 Subject: mei: don't have to clean the state on power up commit 99f22c4ef24cf87b0dae6aabe6b5e620b62961d9 upstream. When powering up, we don't have to clean up the device state nothing is connected. Tested-by: Shuah Khan Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index f580d30bb78..6eec689ba97 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -143,7 +143,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) dev->hbm_state = MEI_HBM_IDLE; - if (dev->dev_state != MEI_DEV_INITIALIZING) { + if (dev->dev_state != MEI_DEV_INITIALIZING && + dev->dev_state != MEI_DEV_POWER_UP) { if (dev->dev_state != MEI_DEV_DISABLED && dev->dev_state != MEI_DEV_POWER_DOWN) dev->dev_state = MEI_DEV_RESETTING; -- cgit v1.2.3 From 644f5d570740c90439fdf69e8b47647584c6dc2f Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Wed, 17 Jul 2013 15:13:17 +0300 Subject: mei: me: fix waiting for hw ready commit dab9bf41b23fe700c4a74133e41eb6a21706031e upstream. 1. MEI_INTEROP_TIMEOUT is in seconds not in jiffies so we use mei_secs_to_jiffies macro While cold boot is fast this is relevant in resume 2. wait_event_interruptible_timeout can return with -ERESTARTSYS so do not override it with -ETIMEDOUT 3.Adjust error message Tested-by: Shuah Khan Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-me.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 03108591c13..700fe55095b 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -238,14 +238,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) if (mei_me_hw_is_ready(dev)) return 0; + dev->recvd_hw_ready = false; mutex_unlock(&dev->device_lock); err = wait_event_interruptible_timeout(dev->wait_hw_ready, - dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT); + dev->recvd_hw_ready, + mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); mutex_lock(&dev->device_lock); if (!err && !dev->recvd_hw_ready) { + if (!err) + err = -ETIMEDOUT; dev_err(&dev->pdev->dev, - "wait hw ready failed. status = 0x%x\n", err); - return -ETIMEDOUT; + "wait hw ready failed. status = %d\n", err); + return err; } dev->recvd_hw_ready = false; -- cgit v1.2.3 From ba5c60fc8f5f574c7cec70740ca19d358b780c57 Mon Sep 17 00:00:00 2001 From: Kumar Amit Mehta Date: Tue, 28 May 2013 00:31:15 -0700 Subject: md: bcache: io.c: fix a potential NULL pointer dereference commit 5c694129c8db6d89c9be109049a16510b2f70f6d upstream. bio_alloc_bioset returns NULL on failure. This fix adds a missing check for potential NULL pointer dereferencing. Signed-off-by: Kumar Amit Mehta Signed-off-by: Kent Overstreet Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/io.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 48efd4dea64..d285cd49104 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -97,6 +97,8 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, if (bio->bi_rw & REQ_DISCARD) { ret = bio_alloc_bioset(gfp, 1, bs); + if (!ret) + return NULL; idx = 0; goto out; } -- cgit v1.2.3 From ae61fd4496f9d9290b9e84fc373818b2ee780137 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 26 Jun 2013 17:25:38 -0700 Subject: bcache: FUA fixes commit e49c7c374e7aacd1f04ecbc21d9dbbeeea4a77d6 upstream. Journal writes need to be marked FUA, not just REQ_FLUSH. And btree node writes have... weird ordering requirements. Signed-off-by: Kent Overstreet Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/btree.c | 23 +++++++++++++++++++++-- drivers/md/bcache/journal.c | 2 +- drivers/md/bcache/request.c | 13 ++++++++++++- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 7b687a6f3de..833c590806b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -326,10 +326,25 @@ static void do_btree_write(struct btree *b) i->csum = btree_csum_set(b, i); btree_bio_init(b); - b->bio->bi_rw = REQ_META|WRITE_SYNC; + b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); bch_bio_map(b->bio, i); + /* + * If we're appending to a leaf node, we don't technically need FUA - + * this write just needs to be persisted before the next journal write, + * which will be marked FLUSH|FUA. + * + * Similarly if we're writing a new btree root - the pointer is going to + * be in the next journal entry. + * + * But if we're writing a new btree node (that isn't a root) or + * appending to a non leaf btree node, we need either FUA or a flush + * when we write the parent with the new pointer. FUA is cheaper than a + * flush, and writes appending to leaf nodes aren't blocking anything so + * just make all btree node writes FUA to keep things sane. + */ + bkey_copy(&k.key, &b->key); SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); @@ -2142,6 +2157,9 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c) void bch_btree_set_root(struct btree *b) { unsigned i; + struct closure cl; + + closure_init_stack(&cl); BUG_ON(!b->written); @@ -2155,8 +2173,9 @@ void bch_btree_set_root(struct btree *b) b->c->root = b; __bkey_put(b->c, &b->key); - bch_journal_meta(b->c, NULL); + bch_journal_meta(b->c, &cl); pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0)); + closure_sync(&cl); } /* Cache lookup */ diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 8a54d3b4f51..b49abb246bb 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -622,7 +622,7 @@ static void journal_write_unlocked(struct closure *cl) bio_reset(bio); bio->bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH; + bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; bio->bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 2f36743ce70..afb9a998a73 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1053,9 +1053,20 @@ static void request_write(struct cached_dev *dc, struct search *s) trace_bcache_writethrough(s->orig_bio); closure_bio_submit(bio, cl, s->d); } else { - s->op.cache_bio = bio; trace_bcache_writeback(s->orig_bio); bch_writeback_add(dc, bio_sectors(bio)); + + if (s->op.flush_journal) { + /* Also need to send a flush to the backing device */ + s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, + dc->disk.bio_split); + + bio->bi_size = 0; + bio->bi_vcnt = 0; + closure_bio_submit(bio, cl, s->d); + } else { + s->op.cache_bio = bio; + } } out: closure_call(&s->op.cl, bch_insert_data, NULL, cl); -- cgit v1.2.3 From 8bf3379a74bc9132751bfa685bad2da318fd59d7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 29 Aug 2013 09:47:51 -0700 Subject: Linux 3.10.10 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4b31d6238f7..b119684ddb2 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 10 -SUBLEVEL = 9 +SUBLEVEL = 10 EXTRAVERSION = NAME = TOSSUG Baby Fish -- cgit v1.2.3