aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/asm-generic/bitops/lock.h14
-rw-r--r--include/asm-generic/cputime_nsecs.h5
-rw-r--r--include/asm-generic/early_ioremap.h6
-rw-r--r--include/asm-generic/msi.h32
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/crypto/hash.h7
-rw-r--r--include/crypto/if_alg.h11
-rw-r--r--include/drm/drm_cache.h11
-rw-r--r--include/drm/drm_dp_mst_helper.h25
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_fixed.h53
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h14
-rw-r--r--include/linux/acpi.h74
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bio.h37
-rw-r--r--include/linux/can/dev.h22
-rw-r--r--include/linux/compiler-gcc.h202
-rw-r--r--include/linux/compiler-gcc3.h23
-rw-r--r--include/linux/compiler-gcc4.h87
-rw-r--r--include/linux/compiler-gcc5.h65
-rw-r--r--include/linux/compiler.h95
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/cpufreq.h67
-rw-r--r--include/linux/dcache.h59
-rw-r--r--include/linux/device.h20
-rw-r--r--include/linux/devpts_fs.h4
-rw-r--r--include/linux/dma-mapping.h13
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/filter.h19
-rw-r--r--include/linux/fs.h25
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/iio/iio.h17
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irq.h67
-rw-r--r--include/linux/irqchip/arm-gic-v3.h163
-rw-r--r--include/linux/irqchip/arm-gic.h4
-rw-r--r--include/linux/irqdomain.h127
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/jbd2.h8
-rw-r--r--include/linux/jhash.h17
-rw-r--r--include/linux/kasan.h100
-rw-r--r--include/linux/kconfig.h9
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/mempool.h5
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h39
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/module.h19
-rw-r--r--include/linux/moduleloader.h7
-rw-r--r--include/linux/msi.h193
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/netfilter/x_tables.h16
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/of.h141
-rw-r--r--include/linux/of_address.h4
-rw-r--r--include/linux/of_device.h3
-rw-r--r--include/linux/of_iommu.h25
-rw-r--r--include/linux/of_irq.h1
-rw-r--r--include/linux/of_pci.h15
-rw-r--r--include/linux/of_pdt.h3
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--include/linux/pci.h35
-rw-r--r--include/linux/pipe_fs_i.h4
-rw-r--r--include/linux/platform_data/asoc-s3c.h4
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_opp.h107
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/property.h73
-rw-r--r--include/linux/ptrace.h24
-rw-r--r--include/linux/radix-tree.h23
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/regulator/consumer.h10
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/rmap.h14
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/slab.h11
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/thermal.h7
-rw-r--r--include/linux/tracepoint.h16
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/ucs2_string.h4
-rw-r--r--include/linux/usb.h5
-rw-r--r--include/linux/usb/ehci_def.h4
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/vmalloc.h14
-rw-r--r--include/net/af_unix.h7
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/dst.h33
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_ecn.h19
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/iw_handler.h6
-rw-r--r--include/net/mac802154.h8
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netns/sctp.h1
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h22
-rw-r--r--include/net/tcp_states.h4
-rw-r--r--include/net/vxlan.h5
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/sound/pcm.h160
-rw-r--r--include/sound/rawmidi.h4
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h904
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/target/target_core_fabric.h4
-rw-r--r--include/uapi/drm/radeon_drm.h1
-rw-r--r--include/uapi/linux/if_link.h8
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/usbdevice_fs.h3
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/xen/interface/sched.h8
-rw-r--r--include/xen/xen-ops.h26
141 files changed, 3361 insertions, 537 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 8de31d472fad..20c757cdd391 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -338,6 +338,12 @@ struct acpi_device_physical_node {
bool put_online:1;
};
+/* ACPI Device Specific Data (_DSD) */
+struct acpi_device_data {
+ const union acpi_object *pointer;
+ const union acpi_object *properties;
+};
+
/* Device */
struct acpi_device {
int device_type;
@@ -354,6 +360,7 @@ struct acpi_device {
struct acpi_device_wakeup wakeup;
struct acpi_device_perf performance;
struct acpi_device_dir dir;
+ struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
struct acpi_driver *driver;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 93ef42245647..f6406e70ea4a 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -573,6 +573,7 @@ typedef u64 acpi_integer;
#define ACPI_NO_ACPI_ENABLE 0x10
#define ACPI_NO_DEVICE_INIT 0x20
#define ACPI_NO_OBJECT_INIT 0x40
+#define ACPI_NO_FACS_INIT 0x80
/*
* Initialization state
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94806..8ef0ccbf8167 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0419485891f2..0f1c6f315cdc 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+ val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c04fb2..e539f27ec51b 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -33,6 +33,12 @@ extern void early_ioremap_setup(void);
*/
extern void early_ioremap_reset(void);
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
new file mode 100644
index 000000000000..61c58d8878ce
--- /dev/null
+++ b/include/asm-generic/msi.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_GENERIC_MSI_H
+#define __ASM_GENERIC_MSI_H
+
+#include <linux/types.h>
+
+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
+#endif
+
+struct msi_desc;
+
+/**
+ * struct msi_alloc_info - Default structure for MSI interrupt allocation.
+ * @desc: Pointer to msi descriptor
+ * @hwirq: Associated hw interrupt number in the domain
+ * @scratchpad: Storage for implementation specific scratch data
+ *
+ * Architectures can provide their own implementation by not including
+ * asm-generic/msi.h into their arch specific header file.
+ */
+typedef struct msi_alloc_info {
+ struct msi_desc *desc;
+ irq_hw_number_t hwirq;
+ union {
+ unsigned long ul;
+ void *ptr;
+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
+} msi_alloc_info_t;
+
+#define GENERIC_MSI_DOMAIN_OPS 1
+
+#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aa70cbda327c..ac78910d7416 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -164,6 +164,7 @@
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
@@ -477,6 +478,7 @@
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
+ *(SORT(.init_array.*)) \
*(.init_array) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
@@ -497,6 +499,7 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
+ IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 74b13ec1ebd4..6857e565a14d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -99,6 +99,7 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
+ bool has_setkey;
struct crypto_tfm base;
};
@@ -186,6 +187,12 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+
+static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
+{
+ return tfm->has_setkey;
+}
+
int crypto_ahash_finup(struct ahash_request *req);
int crypto_ahash_final(struct ahash_request *req);
int crypto_ahash_digest(struct ahash_request *req);
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index d61c11170213..bfefd8139e18 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -30,6 +30,9 @@ struct alg_sock {
struct sock *parent;
+ unsigned int refcnt;
+ unsigned int nokey_refcnt;
+
const struct af_alg_type *type;
void *private;
};
@@ -49,8 +52,10 @@ struct af_alg_type {
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
+ int (*accept_nokey)(void *private, struct sock *sk);
struct proto_ops *ops;
+ struct proto_ops *ops_nokey;
struct module *owner;
char name[14];
};
@@ -64,6 +69,7 @@ int af_alg_register_type(const struct af_alg_type *type);
int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
+void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock);
int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
@@ -80,11 +86,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
return (struct alg_sock *)sk;
}
-static inline void af_alg_release_parent(struct sock *sk)
-{
- sock_put(alg_sk(sk)->parent);
-}
-
static inline void af_alg_init_completion(struct af_alg_completion *completion)
{
init_completion(&completion->completion);
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 7bfb063029d8..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,4 +35,15 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+ return false;
+#else
+ return true;
+#endif
+}
+
#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 338fc1053835..e4196de8b77c 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
/**
* struct drm_dp_mst_port - MST port
* @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
* @port_num: port number
* @input: if this port is an input port.
* @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
struct drm_dp_mst_port {
struct kref kref;
- /* if dpcd 1.2 device is on this port - its GUID info */
- bool guid_valid;
- u8 guid[16];
-
u8 port_num;
bool input;
bool mcs;
@@ -107,10 +101,12 @@ struct drm_dp_mst_port {
* @tx_slots: transmission slots for this device.
* @last_seqno: last sequence number used to talk to this.
* @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
*
* This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
*/
struct drm_dp_mst_branch {
struct kref kref;
@@ -129,6 +125,9 @@ struct drm_dp_mst_branch {
struct drm_dp_sideband_msg_tx *tx_slots[2];
int last_seqno;
bool link_address_sent;
+
+ /* global unique identifier to identify branch devices */
+ u8 guid[16];
};
@@ -401,11 +400,9 @@ struct drm_dp_payload {
* @conn_base_id: DRM connector ID this mgr is connected to.
* @down_rep_recv: msg receiver state for down replies.
* @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
* @mst_state: if this manager is enabled for an MST capable port.
* @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
* @dpcd: cache of DPCD for primary port.
* @pbn_div: PBN to slots divisor.
*
@@ -427,13 +424,11 @@ struct drm_dp_mst_topology_mgr {
struct drm_dp_sideband_msg_rx up_req_recv;
/* pointer to info about the initial MST device */
- struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+ struct mutex lock; /* protects mst_state + primary + dpcd */
bool mst_state;
struct drm_dp_mst_branch *mst_primary;
- /* primary MST device GUID */
- bool guid_valid;
- u8 guid[16];
+
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 sink_count;
int pbn_div;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e3488..39d099aa09e1 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -119,7 +119,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index d639049a613d..553210c02ee0 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON 1LL
+#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
static inline s64 drm_int2fixp(int a)
{
return ((s64)a) << DRM_FIXED_POINT;
}
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
{
return ((s64)a) >> DRM_FIXED_POINT;
}
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+ if (a > 0)
+ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+ else
+ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
{
unsigned shift, sign = (a >> 63) & 1;
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
return result;
}
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+ s64 res;
+ bool a_neg = a < 0;
+ bool b_neg = b < 0;
+ u64 a_abs = a_neg ? -a : a;
+ u64 b_abs = b_neg ? -b : b;
+ u64 rem;
+
+ /* determine integer part */
+ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
+
+ /* determine fractional part */
+ {
+ u32 i = DRM_FIXED_POINT;
+
+ do {
+ rem <<= 1;
+ res_abs <<= 1;
+ if (rem >= b_abs) {
+ res_abs |= 1;
+ rem -= b_abs;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ u64 summand = (rem << 1) >= b_abs;
+
+ res_abs += summand;
+ }
+
+ res = (s64) res_abs;
+ if (a_neg ^ b_neg)
+ res = -res;
+ return res;
+}
+
static inline s64 drm_fixp_exp(s64 x)
{
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0ccf7f267ff9..3f3367c42896 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement: Return immediately if buffer is busy.
+ * @mem: The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags);
+
/**
* ttm_bo_validate
*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 407a12f663eb..1c7eaa718e65 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
#include <linux/device.h>
+#include <linux/property.h>
#ifndef _LINUX
#define _LINUX
@@ -414,6 +415,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
extern void acpi_early_init(void);
+extern void acpi_subsystem_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -449,6 +451,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
}
static inline void acpi_early_init(void) { }
+static inline void acpi_subsystem_init(void) { }
static inline int early_acpi_boot_init(void)
{
@@ -659,4 +662,75 @@ do { \
#endif
#endif
+/* Device properties */
+
+#define MAX_ACPI_REFERENCE_ARGS 8
+struct acpi_reference_args {
+ struct acpi_device *adev;
+ size_t nargs;
+ u64 args[MAX_ACPI_REFERENCE_ARGS];
+};
+
+#ifdef CONFIG_ACPI
+int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+ acpi_object_type type, const union acpi_object **obj);
+int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj);
+int acpi_dev_get_property_reference(struct acpi_device *adev, const char *name,
+ const char *cells_name, size_t index,
+ struct acpi_reference_args *args);
+
+int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
+ void **valptr);
+int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val);
+int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val, size_t nval);
+#else
+static inline int acpi_dev_get_property(struct acpi_device *adev,
+ const char *name, acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_array(struct acpi_device *adev,
+ const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
+ const char *name, const char *cells_name,
+ size_t index, struct acpi_reference_args *args)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_get(struct acpi_device *adev,
+ const char *propname,
+ void **valptr)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val, size_t nval)
+{
+ return -ENXIO;
+}
+
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f2f4d8da97c0..f7ff6554a354 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -484,8 +484,8 @@ enum ata_tf_protocols {
};
enum ata_ioctls {
- ATA_IOC_GET_IO32 = 0x309,
- ATA_IOC_SET_IO32 = 0x324,
+ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
+ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
};
/* core structures */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 729f48e6b20b..828198db5614 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -154,6 +154,7 @@ struct bcma_host_ops {
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
+#define BCMA_CORE_SIZE 0x1000
/* Chip IDs of PCIe devices */
#define BCMA_CHIP_ID_BCM4313 0x4313
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7347f486ceca..08ce93ede229 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -292,6 +292,43 @@ static inline unsigned bio_segments(struct bio *bio)
*/
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+ if (!bio_flagged(bio, BIO_CLONED)) {
+ *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
+ return;
+ }
+
+ if (unlikely(!bio_multiple_segments(bio))) {
+ *bv = bio_iovec(bio);
+ return;
+ }
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
+ if (!iter.bi_bvec_done)
+ idx = iter.bi_idx - 1;
+ else /* in the middle of bvec */
+ idx = iter.bi_idx;
+
+ *bv = bio->bi_io_vec[idx];
+
+ /*
+ * iter.bi_bvec_done records actual length of the last bvec
+ * if this bio ends in the middle of one io vector
+ */
+ if (iter.bi_bvec_done)
+ bv->bv_len = iter.bi_bvec_done;
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index b37ea95bc348..71e37afd7290 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -39,8 +39,11 @@ struct can_priv {
struct can_clock clock;
enum can_state state;
- u32 ctrlmode;
- u32 ctrlmode_supported;
+
+ /* CAN controller features - see include/uapi/linux/can/netlink.h */
+ u32 ctrlmode; /* current options setting */
+ u32 ctrlmode_supported; /* options that can be modified by netlink */
+ u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct timer_list restart_timer;
@@ -105,6 +108,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
return skb->len == CANFD_MTU;
}
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ priv->ctrlmode = static_mode;
+ priv->ctrlmode_static = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+}
+
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 02ae99e8e6d3..d26a858a4c0d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -5,9 +5,9 @@
/*
* Common definitions for all gcc versions go here.
*/
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
/* Optimization barrier */
@@ -32,54 +32,63 @@
* the inline assembly constraint from =g to =r, in this particular
* case either is valid.
*/
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
- (typeof(ptr)) (__ptr + (off)); })
+#define RELOC_HIDE(ptr, off) \
+({ \
+ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); \
+})
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__
-#define __must_be_array(arr) 0
+#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old:
*/
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline inline __attribute__((always_inline)) notrace
-# define __inline__ __inline__ __attribute__((always_inline)) notrace
-# define __inline __inline __attribute__((always_inline)) notrace
+#define inline inline __attribute__((always_inline)) notrace
+#define __inline__ __inline__ __attribute__((always_inline)) notrace
+#define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
-# define inline inline notrace
-# define __inline__ __inline__ notrace
-# define __inline __inline notrace
+#define inline inline notrace
+#define __inline__ __inline__ notrace
+#define __inline __inline notrace
#endif
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
+#define __always_inline inline __attribute__((always_inline))
+#define noinline __attribute__((noinline))
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
/*
- * it doesn't make sense on ARM (currently the only user of __naked) to trace
- * naked functions because then mcount is called without stack and frame pointer
- * being set up and there is no chance to restore the lr register to the value
- * before mcount was called.
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling
+ * conventions, therefore they must be noinline and noclone.
*
- * The asm() bodies of naked functions often depend on standard calling conventions,
- * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
- * this, so we must do so ourselves. See GCC PR44290.
+ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
+ * See GCC PR44290.
*/
-#define __naked __attribute__((naked)) noinline __noclone notrace
+#define __naked __attribute__((naked)) noinline __noclone notrace
-#define __noreturn __attribute__((noreturn))
+#define __noreturn __attribute__((noreturn))
/*
* From the GCC manual:
@@ -100,19 +109,142 @@
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used __attribute__((__unused__))
+#else
+# define __used __attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __compiler_offsetof(a, b) \
+ __builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely. This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 50000
+#define KASAN_ABI_VERSION 4
+#elif GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
+
+#if GCC_VERSION >= 40902
+/*
+ * Tell the compiler that address safety instrumentation (KASAN)
+ * should not be applied to that function.
+ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ */
+#define __no_sanitize_address __attribute__((no_sanitize_address))
+#endif
+
+#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
+#if !defined(__no_sanitize_address)
+#define __no_sanitize_address
+#endif
+
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
-
-#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89febe4d79..000000000000
--- a/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION >= 30300
-# define __used __attribute__((__used__))
-#else
-# define __used __attribute__((__unused__))
-#endif
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
deleted file mode 100644
index d1a558239b1a..000000000000
--- a/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
-#endif
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
-
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
deleted file mode 100644
index c8c565952548..000000000000
--- a/include/linux/compiler-gcc5.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33063f872ee3..4d1995792303 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -138,7 +138,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
- if (__builtin_constant_p((cond)) ? !!(cond) : \
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
@@ -188,32 +188,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h>
-static __always_inline void data_access_exceeds_word_size(void)
-#ifdef __compiletime_warning
-__compiletime_warning("data access exceeds word size and won't be atomic")
-#endif
-;
-
-static __always_inline void data_access_exceeds_word_size(void)
+#define __READ_ONCE_SIZE \
+({ \
+ switch (size) { \
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
+ default: \
+ barrier(); \
+ __builtin_memcpy((void *)res, (const void *)p, size); \
+ barrier(); \
+ } \
+})
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
{
+ __READ_ONCE_SIZE;
}
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+#ifdef CONFIG_KASAN
+/*
+ * This function is not 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+static __no_sanitize_address __maybe_unused
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
- switch (size) {
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-#ifdef CONFIG_64BIT
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
-#endif
- default:
- barrier();
- __builtin_memcpy((void *)res, (const void *)p, size);
- data_access_exceeds_word_size();
- barrier();
- }
+ __READ_ONCE_SIZE;
+}
+#else
+static __always_inline
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+ __READ_ONCE_SIZE;
}
+#endif
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
@@ -221,13 +234,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
-#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
-#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
- data_access_exceeds_word_size();
barrier();
}
}
@@ -254,11 +264,25 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering.
*/
-#define READ_ONCE(x) \
- ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+#define __READ_ONCE(x, check) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u; \
+ if (check) \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ else \
+ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+ * to hide memory access from KASAN.
+ */
+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
#define WRITE_ONCE(x, val) \
- ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+ ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
#endif /* __KERNEL__ */
@@ -454,6 +478,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU. That
+ * "something other" might be reference counting or simple immortality.
+ */
+#define lockless_dereference(p) \
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
+
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
diff --git a/include/linux/console.h b/include/linux/console.h
index 7571a16bd653..ac1599bda9fc 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -150,6 +150,7 @@ extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+extern void console_flush_on_panic(void);
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 503b085b7832..6734c9150ebb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
- char name[CPUFREQ_NAME_LEN];
- u8 flags;
- void *driver_data;
+ char name[CPUFREQ_NAME_LEN];
+ u8 flags;
+ void *driver_data;
/* needed by all drivers */
- int (*init) (struct cpufreq_policy *policy);
- int (*verify) (struct cpufreq_policy *policy);
+ int (*init)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy *policy);
/* define one out of two */
- int (*setpolicy) (struct cpufreq_policy *policy);
+ int (*setpolicy)(struct cpufreq_policy *policy);
/*
* On failure, should always restore frequency to policy->restore_freq
* (i.e. old freq).
*/
- int (*target) (struct cpufreq_policy *policy, /* Deprecated */
- unsigned int target_freq,
- unsigned int relation);
- int (*target_index) (struct cpufreq_policy *policy,
- unsigned int index);
+ int (*target)(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation); /* Deprecated */
+ int (*target_index)(struct cpufreq_policy *policy,
+ unsigned int index);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
@@ -252,27 +252,31 @@ struct cpufreq_driver {
* wish to switch to intermediate frequency for some target frequency.
* In that case core will directly call ->target_index().
*/
- unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
- unsigned int index);
- int (*target_intermediate)(struct cpufreq_policy *policy,
- unsigned int index);
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
/* should be defined, if possible */
- unsigned int (*get) (unsigned int cpu);
+ unsigned int (*get)(unsigned int cpu);
/* optional */
- int (*bios_limit) (int cpu, unsigned int *limit);
+ int (*bios_limit)(int cpu, unsigned int *limit);
+
+ int (*exit)(struct cpufreq_policy *policy);
+ void (*stop_cpu)(struct cpufreq_policy *policy);
+ int (*suspend)(struct cpufreq_policy *policy);
+ int (*resume)(struct cpufreq_policy *policy);
- int (*exit) (struct cpufreq_policy *policy);
- void (*stop_cpu) (struct cpufreq_policy *policy);
- int (*suspend) (struct cpufreq_policy *policy);
- int (*resume) (struct cpufreq_policy *policy);
- struct freq_attr **attr;
+ /* Will be called after the driver is fully initialized */
+ void (*ready)(struct cpufreq_policy *policy);
+
+ struct freq_attr **attr;
/* platform specific boost support code */
- bool boost_supported;
- bool boost_enabled;
- int (*set_boost) (int state);
+ bool boost_supported;
+ bool boost_enabled;
+ int (*set_boost)(int state);
};
/* flags */
@@ -570,6 +574,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_supported(void);
int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -583,12 +589,23 @@ static inline int cpufreq_boost_enabled(void)
{
return 0;
}
+
+static inline int cpufreq_enable_boost_support(void)
+{
+ return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ return false;
+}
#endif
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 1c2f1b84468b..340ee0dae93b 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -160,6 +160,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
+ struct inode *(*d_select_inode)(struct dentry *, unsigned);
} ____cacheline_aligned;
/*
@@ -222,6 +223,7 @@ struct dentry_operations {
#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
#define DCACHE_MAY_FREE 0x00800000
+#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
extern seqlock_t rename_lock;
@@ -468,4 +470,61 @@ static inline unsigned long vfs_pressure_ratio(unsigned long val)
{
return mult_frac(val, sysctl_vfs_cache_pressure, 100);
}
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+ return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+ return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file. The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+ struct inode *inode = upper->d_inode;
+
+ return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file. It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+ return upper;
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index ce1f21608b16..61a62c201264 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -690,6 +690,7 @@ struct acpi_dev_node {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -750,6 +751,9 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain;
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
@@ -837,6 +841,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 251a2090a554..e0ee0b3000b2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d5d388160f42..c3007cb4bfa6 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev);
-#ifndef set_arch_dma_coherent_ops
-static inline int set_arch_dma_coherent_ops(struct device *dev)
-{
- return 0;
-}
+#ifndef arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, struct iommu_ops *iommu,
+ bool coherent) { }
+#endif
+
+#ifndef arch_teardown_dma_ops
+static inline void arch_teardown_dma_ops(struct device *dev) { }
#endif
static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0949f9c7e872..777c57596863 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1155,7 +1155,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ size_t len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ca95abd2bed1..c80b1a9d0715 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -427,6 +427,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp)
#define BPF_ANC BIT(15)
+static inline bool bpf_needs_clear_a(const struct sock_filter *first)
+{
+ switch (first->code) {
+ case BPF_RET | BPF_K:
+ case BPF_LD | BPF_W | BPF_LEN:
+ return false;
+
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
+ if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
+ return true;
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
BUG_ON(ftest->code & BPF_ANC);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9ab779e8a63c..2a41353033d3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -906,12 +906,11 @@ int locks_in_grace(struct net *);
* FIXME: should we create a separate "struct lock_request" to help distinguish
* these two uses?
*
- * The i_flock list is ordered by:
+ * The varous i_flctx lists are ordered by:
*
- * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
- * 2) lock owner
- * 3) lock range start
- * 4) lock range end
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
*
* Obviously, the last two criteria only matter for POSIX locks.
*/
@@ -1552,7 +1551,6 @@ struct inode_operations {
int (*set_acl)(struct inode *, struct posix_acl *, int);
/* WARNING: probably going away soon, do not use! */
- int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
} ____cacheline_aligned;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1791,6 +1789,7 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
+#define FS_USERNS_VISIBLE 32 /* FS must already be visible */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
@@ -1878,7 +1877,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
-extern bool fs_fully_visible(struct file_system_type *);
extern int current_umask(void);
@@ -1947,8 +1945,9 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
if (inode->i_flock)
@@ -1960,8 +1959,9 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
if (inode->i_flock)
@@ -2067,8 +2067,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
+ const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..a75b1009d3f7 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -33,12 +33,28 @@
#error Wordsize not 32 or 64
#endif
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_64;
#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 14020c7796af..e6192934cdfa 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -415,15 +415,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
-static inline bool hugepages_supported(void)
-{
- /*
- * Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- return HPAGE_SHIFT != 0;
-}
+#ifndef hugepages_supported
+/*
+ * Some platform decide whether they support huge pages at boot
+ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
+ * when there is no such support
+ */
+#define hugepages_supported() (HPAGE_SHIFT != 0)
+#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 119130e9298b..da4929927f69 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -14,5 +14,6 @@ struct ifla_vf_info {
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
+ __u32 rss_query_en;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 15dc6bc2bdd2..6c17af80823c 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -614,6 +614,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
/**
+ * IIO_RAD_TO_DEGREE() - Convert rad to degree
+ * @rad: A value in rad
+ *
+ * Returns the given value converted from rad to degree
+ */
+#define IIO_RAD_TO_DEGREE(rad) \
+ (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
+
+/**
* IIO_G_TO_M_S_2() - Convert g to meter / second**2
* @g: A value in g
*
@@ -621,4 +630,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
*/
#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+/**
+ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
+ * @ms2: A value in meter / second**2
+ *
+ * Returns the given value converted from meter / second**2 to g
+ */
+#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
+
#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index dc778344d298..57e5a50f3b16 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -173,6 +173,13 @@ extern struct task_group root_task_group;
# define INIT_RT_MUTEXES(tsk)
#endif
+#ifdef CONFIG_KASAN
+# define INIT_KASAN(tsk) \
+ .kasan_depth = 1,
+#else
+# define INIT_KASAN(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -245,6 +252,7 @@ extern struct task_group root_task_group;
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
+ INIT_KASAN(tsk) \
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e6a7c9ff72f2..415c7613d02c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/types.h>
#include <trace/events/iommu.h>
@@ -102,7 +103,9 @@ enum iommu_attr {
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -132,7 +135,12 @@ struct iommu_ops {
/* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
unsigned long pgsize_bitmap;
+ void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff560537dd61..2725b03b4ae2 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -212,7 +212,7 @@ struct ipv6_pinfo {
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
- struct ipv6_txoptions *opt;
+ struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 86a08759072b..daca8731136c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -15,11 +15,13 @@
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/gfp.h>
+#include <linux/irqhandler.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/wait.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
@@ -27,11 +29,7 @@
struct seq_file;
struct module;
-struct irq_desc;
-struct irq_data;
-typedef void (*irq_flow_handler_t)(unsigned int irq,
- struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+struct msi_msg;
/*
* IRQ line status.
@@ -115,10 +113,14 @@ enum {
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
+ * support stacked irqchips, which indicates skipping
+ * all descendent irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
IRQ_SET_MASK_OK_NOCOPY,
+ IRQ_SET_MASK_OK_DONE,
};
struct msi_desc;
@@ -135,6 +137,8 @@ struct irq_domain;
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
+ * @parent_data: pointer to parent struct irq_data to support hierarchy
+ * irq_domain
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
@@ -153,6 +157,9 @@ struct irq_data {
unsigned int state_use_accessors;
struct irq_chip *chip;
struct irq_domain *domain;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_data *parent_data;
+#endif
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
@@ -317,6 +324,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* any other callback related to this irq
* @irq_release_resources: optional to release resources acquired with
* irq_request_resources
+ * @irq_compose_msi_msg: optional to compose message content for MSI
+ * @irq_write_msi_msg: optional to write message content for MSI
* @flags: chip specific flags
*/
struct irq_chip {
@@ -353,6 +362,9 @@ struct irq_chip {
int (*irq_request_resources)(struct irq_data *data);
void (*irq_release_resources)(struct irq_data *data);
+ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+
unsigned long flags;
};
@@ -440,6 +452,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
+extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_ack_parent(struct irq_data *data);
+extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
+extern void irq_chip_mask_parent(struct irq_data *data);
+extern void irq_chip_unmask_parent(struct irq_data *data);
+extern void irq_chip_eoi_parent(struct irq_data *data);
+extern int irq_chip_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force);
+#endif
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
@@ -584,7 +608,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
return d ? d->msi_desc : NULL;
}
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->msi_desc;
}
@@ -641,13 +665,6 @@ void arch_teardown_hwirq(unsigned int irq);
void irq_init_desc(unsigned int irq);
#endif
-#ifndef irq_reg_writel
-# define irq_reg_writel(val, addr) writel(val, addr)
-#endif
-#ifndef irq_reg_readl
-# define irq_reg_readl(addr) readl(addr)
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
@@ -694,6 +711,8 @@ struct irq_chip_type {
* struct irq_chip_generic - Generic irq chip data structure
* @lock: Lock to protect register and cache data access
* @reg_base: Register base address (virtual)
+ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
+ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -718,6 +737,8 @@ struct irq_chip_type {
struct irq_chip_generic {
raw_spinlock_t lock;
void __iomem *reg_base;
+ u32 (*reg_readl)(void __iomem *addr);
+ void (*reg_writel)(u32 val, void __iomem *addr);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
@@ -742,12 +763,14 @@ struct irq_chip_generic {
* the parent irq. Usually GPIO implementations
* @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
* @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
+ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
*/
enum irq_gc_flags {
IRQ_GC_INIT_MASK_CACHE = 1 << 0,
IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
IRQ_GC_NO_MASK = 1 << 3,
+ IRQ_GC_BE_IO = 1 << 4,
};
/*
@@ -823,4 +846,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+static inline void irq_reg_writel(struct irq_chip_generic *gc,
+ u32 val, int reg_offset)
+{
+ if (gc->reg_writel)
+ gc->reg_writel(val, gc->reg_base + reg_offset);
+ else
+ writel(val, gc->reg_base + reg_offset);
+}
+
+static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
+ int reg_offset)
+{
+ if (gc->reg_readl)
+ return gc->reg_readl(gc->reg_base + reg_offset);
+ else
+ return readl(gc->reg_base + reg_offset);
+}
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba86..f0b4c487a60e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_LPIS (1U << 17)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -76,9 +80,42 @@
#define GICR_MOVALLR 0x0110
#define GICR_PIDR2 GICD_PIDR2
+#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+
+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
+#define GICR_PROPBASER_NonShareable (0U << 10)
+#define GICR_PROPBASER_InnerShareable (1U << 10)
+#define GICR_PROPBASER_OuterShareable (2U << 10)
+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PROPBASER_nCnB (0U << 7)
+#define GICR_PROPBASER_nC (1U << 7)
+#define GICR_PROPBASER_RaWt (2U << 7)
+#define GICR_PROPBASER_RaWb (3U << 7)
+#define GICR_PROPBASER_WaWt (4U << 7)
+#define GICR_PROPBASER_WaWb (5U << 7)
+#define GICR_PROPBASER_RaWaWt (6U << 7)
+#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
+#define GICR_PENDBASER_NonShareable (0U << 10)
+#define GICR_PENDBASER_InnerShareable (1U << 10)
+#define GICR_PENDBASER_OuterShareable (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB (0U << 7)
+#define GICR_PENDBASER_nC (1U << 7)
+#define GICR_PENDBASER_RaWt (2U << 7)
+#define GICR_PENDBASER_RaWb (3U << 7)
+#define GICR_PENDBASER_WaWt (4U << 7)
+#define GICR_PENDBASER_WaWb (5U << 7)
+#define GICR_PENDBASER_RaWaWt (6U << 7)
+#define GICR_PENDBASER_RaWaWb (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
@@ -91,9 +128,100 @@
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define LPI_PROP_GROUP1 (1 << 1)
+#define LPI_PROP_ENABLED (1 << 0)
+
+/*
+ * ITS registers, offsets from ITS_base
+ */
+#define GITS_CTLR 0x0000
+#define GITS_IIDR 0x0004
+#define GITS_TYPER 0x0008
+#define GITS_CBASER 0x0080
+#define GITS_CWRITER 0x0088
+#define GITS_CREADR 0x0090
+#define GITS_BASER 0x0100
+#define GITS_PIDR2 GICR_PIDR2
+
+#define GITS_TRANSLATER 0x10040
+
+#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_QUIESCENT (1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT 13
+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_PTA (1UL << 19)
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_nCnB (0UL << 59)
+#define GITS_CBASER_nC (1UL << 59)
+#define GITS_CBASER_RaWt (2UL << 59)
+#define GITS_CBASER_RaWb (3UL << 59)
+#define GITS_CBASER_WaWt (4UL << 59)
+#define GITS_CBASER_WaWb (5UL << 59)
+#define GITS_CBASER_RaWaWt (6UL << 59)
+#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
+#define GITS_CBASER_NonShareable (0UL << 10)
+#define GITS_CBASER_InnerShareable (1UL << 10)
+#define GITS_CBASER_OuterShareable (2UL << 10)
+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+
+#define GITS_BASER_NR_REGS 8
+
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_nCnB (0UL << 59)
+#define GITS_BASER_nC (1UL << 59)
+#define GITS_BASER_RaWt (2UL << 59)
+#define GITS_BASER_RaWb (3UL << 59)
+#define GITS_BASER_WaWt (4UL << 59)
+#define GITS_BASER_WaWb (5UL << 59)
+#define GITS_BASER_RaWaWt (6UL << 59)
+#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
+#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_NonShareable (0UL << 10)
+#define GITS_BASER_InnerShareable (1UL << 10)
+#define GITS_BASER_OuterShareable (2UL << 10)
+#define GITS_BASER_SHAREABILITY_SHIFT (10)
+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_PAGE_SIZE_SHIFT (8)
+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+
+#define GITS_BASER_TYPE_NONE 0
+#define GITS_BASER_TYPE_DEVICE 1
+#define GITS_BASER_TYPE_VCPU 2
+#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_COLLECTION 4
+#define GITS_BASER_TYPE_RESERVED5 5
+#define GITS_BASER_TYPE_RESERVED6 6
+#define GITS_BASER_TYPE_RESERVED7 7
+
+/*
+ * ITS commands
+ */
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MOVI 0x01
+#define GITS_CMD_DISCARD 0x0f
+#define GITS_CMD_INV 0x0c
+#define GITS_CMD_MOVALL 0x0e
+#define GITS_CMD_INVALL 0x0d
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
+
/*
* CPU interface registers
*/
@@ -142,6 +270,18 @@
#define ICC_SRE_EL2_SRE (1 << 0)
#define ICC_SRE_EL2_ENABLE (1 << 3)
+#define ICC_SGI1R_TARGET_LIST_SHIFT 0
+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
+#define ICC_SGI1R_AFFINITY_1_SHIFT 16
+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_SGI_ID_SHIFT 24
+#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_SHIFT 32
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_AFFINITY_3_SHIFT 48
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+
/*
* System register definitions
*/
@@ -188,6 +328,24 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/msi.h>
+
+/*
+ * We need a value to serve as a irq-type for LPIs. Choose one that will
+ * hopefully pique the interest of the reviewer.
+ */
+#define GIC_IRQ_TYPE_LPI 0xa110c8ed
+
+struct rdists {
+ struct {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+ u64 flags;
+};
static inline void gic_write_eoir(u64 irq)
{
@@ -195,6 +353,11 @@ static inline void gic_write_eoir(u64 irq)
isb();
}
+struct irq_domain;
+int its_cpu_init(void);
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *domain);
+
#endif
#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 13eed92c7d24..71d706d5f169 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -91,6 +91,8 @@
#ifndef __ASSEMBLY__
+#include <linux/irqdomain.h>
+
struct device_node;
extern struct irq_chip gic_arch_extn;
@@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b0f9d16e48f6..3c5ca459ac98 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,15 +33,32 @@
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqhandler.h>
#include <linux/radix-tree.h>
struct device_node;
struct irq_domain;
struct of_device_id;
+struct irq_chip;
+struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+};
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -58,12 +75,23 @@ struct of_device_id;
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type);
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /* extended V2 interfaces to support hierarchy irq_domains */
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+#endif
};
extern struct irq_domain_ops irq_generic_chip_ops;
@@ -77,6 +105,7 @@ struct irq_domain_chip_generic;
* @ops: pointer to irq_domain methods
* @host_data: private data pointer for use by owner. Not touched by irq_domain
* core code.
+ * @flags: host per irq_domain flags
*
* Optional elements
* @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -84,6 +113,7 @@ struct irq_domain_chip_generic;
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
*
* Revmap data, used internally by irq_domain
* @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -97,10 +127,15 @@ struct irq_domain {
const char *name;
const struct irq_domain_ops *ops;
void *host_data;
+ unsigned int flags;
/* Optional data */
struct device_node *of_node;
+ enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_domain *parent;
+#endif
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
@@ -110,6 +145,22 @@ struct irq_domain {
unsigned int linear_revmap[];
};
+/* Irq domain flags */
+enum {
+ /* Irq domain is hierarchical */
+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
+
+ /* Core calls alloc/free recursive through the domain hierarchy. */
+ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
+
+ /*
+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
+ * for implementation specific purposes and ignored by the
+ * core code.
+ */
+ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
+};
+
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
irq_hw_number_t hwirq_max, int direct_max,
@@ -126,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+ return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
@@ -220,8 +277,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
+/* V2 interfaces to support hierarchy IRQ domains. */
+extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
+ unsigned int virq);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct device_node *node,
+ const struct irq_domain_ops *ops, void *host_data);
+extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc);
+extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+extern void irq_domain_activate_irq(struct irq_data *irq_data);
+extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
+}
+
+extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq,
+ struct irq_chip *chip,
+ void *chip_data);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+extern void irq_domain_free_irqs_common(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs);
+extern void irq_domain_free_irqs_top(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs);
+
+extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs);
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
+}
+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return -1;
+}
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return false;
+}
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000000..62d543004197
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_IRQHANDLER_H
+#define _LINUX_IRQHANDLER_H
+
+/*
+ * Interrupt flow handler typedefs are defined here to avoid circular
+ * include dependencies.
+ */
+
+struct irq_desc;
+struct irq_data;
+typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+
+#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 704b9a599b26..c035001df223 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1007,6 +1007,7 @@ struct journal_s
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
+#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
/*
* Function declarations for the journaling transaction and buffer
@@ -1035,15 +1036,16 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
unsigned long *block);
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
/* Commit management */
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1157,7 +1159,7 @@ extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
-extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
+extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 47cb09edec1a..348c6f47e4cc 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
}
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
- a += JHASH_INITVAL;
- b += JHASH_INITVAL;
+ a += initval;
+ b += initval;
c += initval;
__jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
return c;
}
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
- return jhash_3words(a, b, 0, initval);
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
- return jhash_3words(a, 0, 0, initval);
+ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 000000000000..0fdc798e3ff7
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/sched.h>
+#include <linux/types.h>
+
+struct kmem_cache;
+struct page;
+struct vm_struct;
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#include <asm/kasan.h>
+#include <asm/pgtable.h>
+
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+
+/* Enable reporting bugs after kasan_disable_current() */
+static inline void kasan_enable_current(void)
+{
+ current->kasan_depth++;
+}
+
+/* Disable reporting bugs for current task */
+static inline void kasan_disable_current(void)
+{
+ current->kasan_depth--;
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+void kasan_unpoison_task_stack(struct task_struct *task);
+
+void kasan_alloc_pages(struct page *page, unsigned int order);
+void kasan_free_pages(struct page *page, unsigned int order);
+
+void kasan_poison_slab(struct page *page);
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+
+void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
+void kasan_krealloc(const void *object, size_t new_size);
+
+void kasan_slab_alloc(struct kmem_cache *s, void *object);
+void kasan_slab_free(struct kmem_cache *s, void *object);
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_free_shadow(const struct vm_struct *vm);
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+
+static inline void kasan_poison_slab(struct page *page) {}
+static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object) {}
+
+static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size) {}
+static inline void kasan_krealloc(const void *object, size_t new_size) {}
+
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
+static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index be342b94c640..16cfb3448568 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -43,4 +43,13 @@
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) (config_enabled(option) || \
+ (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 08446635cbd8..2ac6814a7234 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -603,7 +603,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -647,7 +647,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -669,7 +669,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9d957b7ae095..9739607a6dfb 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -27,6 +27,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 057e95971014..19acfdc3828f 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -26,7 +26,8 @@
extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
-extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
+extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -69,7 +70,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
gfp_t gfp)
{
}
-static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp)
{
}
static inline void kmemleak_free(const void *ptr)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b3a816f4c0c4..d69ce28efab1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -429,6 +429,9 @@ enum {
ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
+ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
+
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -712,7 +715,7 @@ struct ata_device {
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
- };
+ } ____cacheline_aligned;
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 39ed62ab5b8a..69b6951e8fd2 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -29,14 +29,15 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
-extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
- * a slab that is passed in through pool_data.
+ * a slab cache that is passed in through pool_data.
+ * Note: the slab cache may not have a ctor function.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 7981a9d77d3f..ad81a1a7193f 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index ff44374a1a4e..c877cad61a13 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -395,4 +395,43 @@
#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+/* For imx6sx iomux gpr register field define */
+#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20)
+#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13)
+#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+
+#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
+#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
+
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 86a977bf4f79..9eef3a1f2291 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -598,7 +598,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
return (compound_page_dtor *)page[1].lru.next;
}
-static inline int compound_order(struct page *page)
+static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
@@ -1730,7 +1730,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
+ const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e307..6fc269ce701c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -135,7 +135,7 @@ void trim_init_extable(struct module *m);
#ifdef MODULE
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
- extern const struct type##_device_id __mod_##type##__##name##_device_table \
+extern const typeof(name) __mod_##type##__##name##_device_table \
__attribute__ ((unused, alias(__stringify(name))))
#else /* !MODULE */
#define MODULE_DEVICE_TABLE(type, name)
@@ -224,6 +224,12 @@ struct module_ref {
unsigned long decs;
} __attribute((aligned(2 * sizeof(unsigned long))));
+struct mod_kallsyms {
+ Elf_Sym *symtab;
+ unsigned int num_symtab;
+ char *strtab;
+};
+
struct module {
enum module_state state;
@@ -311,14 +317,9 @@ struct module {
#endif
#ifdef CONFIG_KALLSYMS
- /*
- * We keep the symbol and string tables for kallsyms.
- * The core_* fields below are temporary, loader-only (they
- * could really be discarded after module init).
- */
- Elf_Sym *symtab, *core_symtab;
- unsigned int num_symtab, core_num_syms;
- char *strtab, *core_strtab;
+ /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+ struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms core_kallsyms;
/* Section attributes */
struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7eeb9bbfb816..94f268e0ff2c 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -82,4 +82,11 @@ int module_finalize(const Elf_Ehdr *hdr,
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
#endif
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 44f4746d033b..5b612c460b51 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,17 +10,12 @@ struct msi_msg {
u32 data; /* 16 bits of msi message data */
};
+extern int pci_msi_ignore_mask;
/* Helper functions */
struct irq_data;
struct msi_desc;
-void mask_msi_irq(struct irq_data *data);
-void unmask_msi_irq(struct irq_data *data);
-void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void read_msi_msg(unsigned int irq, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
-void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
struct {
@@ -48,6 +43,59 @@ struct msi_desc {
struct msi_msg msg;
};
+/* Helpers to hide struct msi_desc implementation details */
+#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
+#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define first_msi_entry(dev) \
+ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
+#define for_each_msi_entry(desc, dev) \
+ list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+
+#ifdef CONFIG_PCI_MSI
+#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
+#define for_each_pci_msi_entry(desc, pdev) \
+ for_each_msi_entry((desc), &(pdev)->dev)
+
+static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return desc->dev;
+}
+
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+{
+ return NULL;
+}
+#endif /* CONFIG_PCI_MSI */
+
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+
+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
+
+/* Conversion helpers. Should be removed after merging */
+static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+{
+ __pci_write_msi_msg(entry, msg);
+}
+static inline void write_msi_msg(int irq, struct msi_msg *msg)
+{
+ pci_write_msi_msg(irq, msg);
+}
+static inline void mask_msi_irq(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+}
+static inline void unmask_msi_irq(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+}
+
/*
* The arch hooks to setup up msi irqs. Those functions are
* implemented as weak symbols so that they /can/ be overriden by
@@ -61,18 +109,141 @@ void arch_restore_msi_irqs(struct pci_dev *dev);
void default_teardown_msi_irqs(struct pci_dev *dev);
void default_restore_msi_irqs(struct pci_dev *dev);
-u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
-u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
+#define default_msi_mask_irq __msi_mask_irq
+#define default_msix_mask_irq __msix_mask_irq
-struct msi_chip {
+struct msi_controller {
struct module *owner;
struct device *dev;
struct device_node *of_node;
struct list_head list;
- int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
+ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
- void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
+ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
};
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
+#include <linux/irqhandler.h>
+#include <asm/msi.h>
+
+struct irq_domain;
+struct irq_chip;
+struct device_node;
+struct msi_domain_info;
+
+/**
+ * struct msi_domain_ops - MSI interrupt domain callbacks
+ * @get_hwirq: Retrieve the resulting hw irq number
+ * @msi_init: Domain specific init function for MSI interrupts
+ * @msi_free: Domain specific function to free a MSI interrupts
+ * @msi_check: Callback for verification of the domain/info/dev data
+ * @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_finish: Optional callbacl to finalize the allocation
+ * @set_desc: Set the msi descriptor for an interrupt
+ * @handle_error: Optional error handler if the allocation fails
+ *
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by
+ * msi_create_irq_domain() and related interfaces
+ *
+ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
+ * are callbacks used by msi_irq_domain_alloc_irqs() and related
+ * interfaces which are based on msi_desc.
+ */
+struct msi_domain_ops {
+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
+ msi_alloc_info_t *arg);
+ int (*msi_init)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg);
+ void (*msi_free)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq);
+ int (*msi_check)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ struct device *dev);
+ int (*msi_prepare)(struct irq_domain *domain,
+ struct device *dev, int nvec,
+ msi_alloc_info_t *arg);
+ void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*set_desc)(msi_alloc_info_t *arg,
+ struct msi_desc *desc);
+ int (*handle_error)(struct irq_domain *domain,
+ struct msi_desc *desc, int error);
+};
+
+/**
+ * struct msi_domain_info - MSI interrupt domain data
+ * @flags: Flags to decribe features and capabilities
+ * @ops: The callback data structure
+ * @chip: Optional: associated interrupt chip
+ * @chip_data: Optional: associated interrupt chip data
+ * @handler: Optional: associated interrupt flow handler
+ * @handler_data: Optional: associated interrupt flow handler data
+ * @handler_name: Optional: associated interrupt flow handler name
+ * @data: Optional: domain specific data
+ */
+struct msi_domain_info {
+ u32 flags;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+/* Flags for msi_domain_info */
+enum {
+ /*
+ * Init non implemented ops callbacks with default MSI domain
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
+ /*
+ * Init non implemented chip callbacks with default MSI chip
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
+ /* Build identity map between hwirq and irq */
+ MSI_FLAG_IDENTITY_MAP = (1 << 2),
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 4),
+};
+
+int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force);
+
+struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ int nvec);
+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
+ int nvec, int type);
+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+ struct msi_domain_info *info, struct irq_domain *parent);
+
+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
+ struct msi_desc *desc);
+int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info, struct device *dev);
+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+
#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index e4d451e4600b..49141aa5de3f 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -176,17 +176,17 @@ typedef enum {
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
-/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
#define NAND_BUSWIDTH_AUTO 0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f36e65c5ce78..ff0f50dcea4a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -859,6 +859,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
+ *
+ * Enable or disable the VF ability to query its RSS Redirection Table and
+ * Hash Key. This is needed since on some devices VF share this information
+ * with PF and querying it may adduce a theoretical security risk.
+ * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
* Called to setup 'tc' number of traffic classes in the net device. This
@@ -1071,6 +1076,9 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_rss_query_en)(
+ struct net_device *dev,
+ int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index f434fc08c325..1a6ba6d7ff8b 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -240,11 +240,22 @@ void xt_unregister_match(struct xt_match *target);
int xt_register_matches(struct xt_match *match, unsigned int n);
void xt_unregister_matches(struct xt_match *match, unsigned int n);
+int xt_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
+
+unsigned int *xt_alloc_entry_offsets(unsigned int size);
+bool xt_find_jump_offset(const unsigned int *offsets,
+ unsigned int target, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ struct xt_counters_info *info, bool compat);
+
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
struct xt_table_info *bootstrap,
@@ -428,7 +439,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
@@ -438,6 +449,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index c72d1ad41ad4..2446e1e21bc7 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -538,9 +538,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- if (size > (__u64) OFFSET_MAX - 1)
- return OFFSET_MAX - 1;
- return (loff_t) size;
+ return min_t(u64, size, OFFSET_MAX);
}
static inline ino_t
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a32ba0d7a98f..fd249ab2718f 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -216,7 +216,7 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-#define NFS_CAP_CHANGE_ATTR (1U << 5)
+/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
#define NFS_CAP_FILEID (1U << 6)
#define NFS_CAP_MODE (1U << 7)
#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d77a08d25223..d71682435f4a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1132,7 +1132,7 @@ struct nfs41_state_protection {
struct nfs4_op_map allow;
};
-#define NFS4_EXCHANGE_ID_LEN (48)
+#define NFS4_EXCHANGE_ID_LEN (127)
struct nfs41_exchange_id_args {
struct nfs_client *client;
nfs4_verifier *verifier;
diff --git a/include/linux/of.h b/include/linux/of.h
index 29f0adc5f3e4..45447c6f21f7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,6 +23,8 @@
#include <linux/spinlock.h>
#include <linux/topology.h>
#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/list.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -55,8 +57,6 @@ struct device_node {
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
struct kobject kobj;
unsigned long _flags;
void *data;
@@ -74,6 +74,12 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_reconfig_data {
+ struct device_node *dn;
+ struct property *prop;
+ struct property *old_prop;
+};
+
/* initialize a node */
extern struct kobj_type of_node_ktype;
static inline void of_node_init(struct device_node *node)
@@ -105,18 +111,17 @@ static inline struct device_node *of_node_get(struct device_node *node)
static inline void of_node_put(struct device_node *node) { }
#endif /* !CONFIG_OF_DYNAMIC */
-#ifdef CONFIG_OF
-
/* Pointer for first entry in chain of all nodes. */
-extern struct device_node *of_allnodes;
+extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
+#ifdef CONFIG_OF
static inline bool of_have_populated_dt(void)
{
- return of_allnodes != NULL;
+ return of_root != NULL;
}
static inline bool of_node_is_root(const struct device_node *node)
@@ -160,6 +165,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
clear_bit(flag, &p->_flags);
}
+extern struct device_node *__of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_find_all_nodes(struct device_node *prev);
/*
@@ -215,8 +221,9 @@ static inline const char *of_node_full_name(const struct device_node *np)
return np ? np->full_name : "<no-node>";
}
-#define for_each_of_allnodes(dn) \
- for (dn = of_allnodes; dn; dn = dn->allnext)
+#define for_each_of_allnodes_from(from, dn) \
+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -228,7 +235,13 @@ extern struct device_node *of_find_matching_node_and_match(
const struct of_device_id *matches,
const struct of_device_id **match);
-extern struct device_node *of_find_node_by_path(const char *path);
+extern struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts);
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+ return of_find_node_opts_by_path(path, NULL);
+}
+
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
@@ -263,6 +276,10 @@ extern int of_property_read_u32_array(const struct device_node *np,
size_t sz);
extern int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value);
+extern int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz);
extern int of_property_read_string(struct device_node *np,
const char *propname,
@@ -275,7 +292,7 @@ extern int of_property_read_string_helper(struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_is_available(const struct device_node *device);
+extern bool of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -317,16 +334,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
-struct of_prop_reconfig {
- struct device_node *dn;
- struct property *prop;
- struct property *old_prop;
-};
-
-extern int of_reconfig_notifier_register(struct notifier_block *);
-extern int of_reconfig_notifier_unregister(struct notifier_block *);
-extern int of_reconfig_notify(unsigned long, void *);
-
extern int of_attach_node(struct device_node *);
extern int of_detach_node(struct device_node *);
@@ -385,6 +392,12 @@ static inline struct device_node *of_find_node_by_path(const char *path)
return NULL;
}
+static inline struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
@@ -426,9 +439,9 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_is_available(const struct device_node *device)
+static inline bool of_device_is_available(const struct device_node *device)
{
- return 0;
+ return false;
}
static inline struct property *of_find_property(const struct device_node *np,
@@ -477,6 +490,13 @@ static inline int of_property_read_u32_array(const struct device_node *np,
return -ENOSYS;
}
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
static inline int of_property_read_string(struct device_node *np,
const char *propname,
const char **out_string)
@@ -581,7 +601,10 @@ static inline const char *of_prop_next_string(struct property *prop,
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
+static inline int of_node_to_nid(struct device_node *device)
+{
+ return NUMA_NO_NODE;
+}
#endif
static inline struct device_node *of_find_matching_node(
@@ -760,6 +783,13 @@ static inline int of_property_read_u32(const struct device_node *np,
return of_property_read_u32_array(np, propname, out_value, 1);
}
+static inline int of_property_read_s32(const struct device_node *np,
+ const char *propname,
+ s32 *out_value)
+{
+ return of_property_read_u32(np, propname, (u32*) out_value);
+}
+
#define of_property_for_each_u32(np, propname, prop, p, u) \
for (prop = of_find_property(np, propname, NULL), \
p = of_prop_next_u32(prop, NULL, &u); \
@@ -828,7 +858,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
-#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__attribute__((unused)) \
= { .compatible = compat, \
@@ -879,7 +909,19 @@ struct of_changeset {
struct list_head entries;
};
+enum of_reconfig_change {
+ OF_RECONFIG_NO_CHANGE = 0,
+ OF_RECONFIG_CHANGE_ADD,
+ OF_RECONFIG_CHANGE_REMOVE,
+};
+
#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg);
+
extern void of_changeset_init(struct of_changeset *ocs);
extern void of_changeset_destroy(struct of_changeset *ocs);
extern int of_changeset_apply(struct of_changeset *ocs);
@@ -917,9 +959,58 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
-#endif
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
/* CONFIG_OF_RESOLVE api */
extern int of_resolve_phandles(struct device_node *tree);
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 8cb14eb393d6..d88e81be6368 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
void __iomem *of_iomap(struct device_node *node, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name);
+ int index, const char *name);
#else
#include <linux/io.h>
@@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
}
static inline void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name)
+ int index, const char *name)
{
return IOMEM_ERR_PTR(-EINVAL);
}
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index ef370210ffb2..22801b10cef5 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -53,6 +53,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
+void of_dma_configure(struct device *dev, struct device_node *np);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -90,6 +91,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
+static inline void of_dma_configure(struct device *dev, struct device_node *np)
+{}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 51a560f34bca..ffbe4707d4aa 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,12 +1,20 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+
#ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
+extern void of_iommu_init(void);
+extern struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np);
+
#else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
@@ -16,6 +24,23 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL;
}
+static inline void of_iommu_init(void) { }
+static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
+{
+ return NULL;
+}
+
#endif /* CONFIG_OF_IOMMU */
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
+struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+
+extern struct of_device_id __iommu_of_table;
+
+typedef int (*of_iommu_init_fn)(struct device_node *);
+
+#define IOMMU_OF_DECLARE(name, compat, fn) \
+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index bfec136a6d1e..563ad28684c6 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -69,6 +69,7 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 1fd207e7a847..29fd3fe1c035 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
+void of_pci_dma_configure(struct pci_dev *pci_dev);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -50,6 +51,8 @@ of_get_pci_domain_nr(struct device_node *node)
{
return -1;
}
+
+static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
#endif
#if defined(CONFIG_OF_ADDRESS)
@@ -59,13 +62,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_chip *chip);
-void of_pci_msi_chip_remove(struct msi_chip *chip);
-struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node);
+int of_pci_msi_chip_add(struct msi_controller *chip);
+void of_pci_msi_chip_remove(struct msi_controller *chip);
+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
#else
-static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { }
-static inline struct msi_chip *
+static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
+static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
+static inline struct msi_controller *
of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
#endif
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index c65a18a0cfdf..7e09244bb679 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size);
/* for building the device tree */
extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
-extern void (*of_pdt_build_more)(struct device_node *dp,
- struct device_node ***nextp);
+extern void (*of_pdt_build_more)(struct device_node *dp);
#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index c2b0627a2317..8a860f096c35 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root,
static inline void of_platform_depopulate(struct device *parent) { }
#endif
+#ifdef CONFIG_OF_DYNAMIC
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
+#endif
+
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 2baeee12f48e..e942558b3585 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -44,7 +44,7 @@ enum pageblock_bits {
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Huge page sizes are variable */
-extern int pageblock_order;
+extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2882c13c6391..cde0e8a24694 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -177,6 +177,10 @@ enum pci_dev_flags {
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
/* Do not use bus resets for device */
PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ /* Do not use PM reset even if device advertises NoSoftRst- */
+ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
+ /* Get VPD from function 0 VPD */
+ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
};
enum pci_irq_reroute_variant {
@@ -351,6 +355,9 @@ struct pci_dev {
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
+ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -452,7 +459,7 @@ struct pci_bus {
struct resource busn_res; /* bus numbers routed to this bus */
struct pci_ops *ops; /* configuration access functions */
- struct msi_chip *msi; /* MSI controller */
+ struct msi_controller *msi; /* MSI controller */
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
@@ -512,6 +519,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
return dev->bus->self;
}
+struct device *pci_get_host_bridge_device(struct pci_dev *dev);
+void pci_put_host_bridge_device(struct device *dev);
+
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -561,6 +571,7 @@ static inline int pcibios_err_to_errno(int err)
/* Low-level architecture-dependent routines */
struct pci_ops {
+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
};
@@ -858,6 +869,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -993,6 +1014,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
+void pci_ignore_hotplug(struct pci_dev *dev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1028,11 +1050,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-static inline void pci_ignore_hotplug(struct pci_dev *dev)
-{
- dev->ignore_hotplug = 1;
-}
-
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
{
@@ -1823,10 +1840,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
+struct irq_domain;
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1847,6 +1866,10 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index eb8b8ac6df3c..24f5470d3944 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -42,6 +42,7 @@ struct pipe_buffer {
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
+ * @user: the user who created this pipe
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -57,6 +58,7 @@ struct pipe_inode_info {
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
+ struct user_struct *user;
};
/*
@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
extern unsigned int pipe_max_size, pipe_min_size;
+extern unsigned long pipe_user_pages_hard;
+extern unsigned long pipe_user_pages_soft;
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index a6591c693ebb..e8d9f84ec931 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -38,6 +38,10 @@ struct samsung_i2s {
*/
struct s3c_audio_pdata {
int (*cfg_gpio)(struct platform_device *);
+ void *dma_playback;
+ void *dma_capture;
+ void *dma_play_sec;
+ void *dma_capture_mic;
union {
struct samsung_i2s i2s;
} type;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 383fd68aaee1..03f3ae96842e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -610,7 +610,7 @@ struct dev_pm_info {
extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
-extern int dev_pm_put_subsys_data(struct device *dev);
+extern void dev_pm_put_subsys_data(struct device *dev);
/*
* Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0330217abfad..cccaf4a29e9f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -21,7 +21,7 @@ struct dev_pm_opp;
struct device;
enum dev_pm_opp_event {
- OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
};
#if defined(CONFIG_PM_OPP)
@@ -30,7 +30,13 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -44,12 +50,21 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
+void dev_pm_opp_remove(struct device *dev, unsigned long freq);
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+ unsigned int count);
+void dev_pm_opp_put_supported_hw(struct device *dev);
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
+void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -61,11 +76,36 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ return false;
+}
+
static inline int dev_pm_opp_get_opp_count(struct device *dev)
{
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ return NULL;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
@@ -90,6 +130,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
return -EINVAL;
}
+static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+{
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
@@ -105,15 +149,72 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
{
return ERR_PTR(-EINVAL);
}
+
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
+
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
+int dev_pm_opp_of_add_table(struct device *dev);
+void dev_pm_opp_of_remove_table(struct device *dev);
+int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
+void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
#else
-static inline int of_init_opp_table(struct device *dev)
+static inline int dev_pm_opp_of_add_table(struct device *dev)
{
return -EINVAL;
}
+
+static inline void dev_pm_opp_of_remove_table(struct device *dev)
+{
+}
+
+static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81c5e2a..253c9b4198ef 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -19,8 +19,8 @@
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/property.h b/include/linux/property.h
new file mode 100644
index 000000000000..9242fb0221ba
--- /dev/null
+++ b/include/linux/property.h
@@ -0,0 +1,73 @@
+/*
+ * property.h - Unified device property interface.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_PROPERTY_H_
+#define _LINUX_PROPERTY_H_
+
+#include <linux/types.h>
+
+struct device;
+
+enum dev_prop_type {
+ DEV_PROP_U8,
+ DEV_PROP_U16,
+ DEV_PROP_U32,
+ DEV_PROP_U64,
+ DEV_PROP_STRING,
+ DEV_PROP_MAX,
+};
+
+bool device_property_present(struct device *dev, const char *propname);
+int device_property_read_u8_array(struct device *dev, const char *propname,
+ u8 *val, size_t nval);
+int device_property_read_u16_array(struct device *dev, const char *propname,
+ u16 *val, size_t nval);
+int device_property_read_u32_array(struct device *dev, const char *propname,
+ u32 *val, size_t nval);
+int device_property_read_u64_array(struct device *dev, const char *propname,
+ u64 *val, size_t nval);
+int device_property_read_string_array(struct device *dev, const char *propname,
+ const char **val, size_t nval);
+int device_property_read_string(struct device *dev, const char *propname,
+ const char **val);
+
+static inline bool device_property_read_bool(struct device *dev,
+ const char *propname)
+{
+ return device_property_present(dev, propname);
+}
+
+static inline int device_property_read_u8(struct device *dev,
+ const char *propname, u8 *val)
+{
+ return device_property_read_u8_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u16(struct device *dev,
+ const char *propname, u16 *val)
+{
+ return device_property_read_u16_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u32(struct device *dev,
+ const char *propname, u32 *val)
+{
+ return device_property_read_u32_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u64(struct device *dev,
+ const char *propname, u64 *val)
+{
+ return device_property_read_u64_array(dev, propname, val, 1);
+}
+
+#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index cc79eff4a1ad..608d90444b6f 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -56,7 +56,29 @@ extern void exit_ptrace(struct task_struct *tracer);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 933ff6977c96..9a80663a1574 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -375,12 +375,29 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter: iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true. If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index;
+ iter->tags = 0;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
-static __always_inline unsigned
+static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
@@ -414,9 +431,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return slot + offset + 1;
}
} else {
- unsigned size = radix_tree_chunk_size(iter) - 1;
+ long size = radix_tree_chunk_size(iter);
- while (size--) {
+ while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 10b72bd4ec2e..74dbf3df737f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -635,21 +635,6 @@ static inline void rcu_preempt_sleep_check(void)
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
-/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f540b1496e2f..7f96b1b8f7bb 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -533,6 +533,16 @@ static inline int regulator_count_voltages(struct regulator *regulator)
}
#endif
+static inline int regulator_set_voltage_triplet(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
+ return 0;
+
+ return regulator_set_voltage(regulator, min_uV, max_uV);
+}
+
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index fb298e9d6d3a..84056743780a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -108,7 +108,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags);
int rhashtable_shrink(struct rhashtable *ht, gfp_t flags);
void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup_compare(const struct rhashtable *ht, void *key,
bool (*compare)(void *, void *), void *arg);
void rhashtable_destroy(const struct rhashtable *ht);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d9d7e7e56352..89195a456069 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -113,20 +113,6 @@ static inline struct anon_vma *page_anon_vma(struct page *page)
return page_rmapping(page);
}
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- up_write(&anon_vma->root->rwsem);
-}
-
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 05353a40a462..439c6ca684da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -757,6 +757,8 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
@@ -1642,6 +1644,9 @@ struct task_struct {
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
+#ifdef CONFIG_KASAN
+ unsigned int kasan_depth;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
diff --git a/include/linux/security.h b/include/linux/security.h
index ba96471c11ba..ea9eda4abdd5 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2471,7 +2471,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg4,
unsigned long arg5)
{
- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e20dc2ca2a26..9f24c65c9d34 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2556,6 +2556,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c265bec6a57d..5f97037a1759 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -104,6 +104,7 @@
(unsigned long)ZERO_SIZE_PTR)
#include <linux/kmemleak.h>
+#include <linux/kasan.h>
struct mem_cgroup;
/*
@@ -326,7 +327,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
gfp_t flags, size_t size)
{
- return kmem_cache_alloc(s, flags);
+ void *ret = kmem_cache_alloc(s, flags);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
static __always_inline void *
@@ -334,7 +338,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
- return kmem_cache_alloc_node(s, gfpflags, node);
+ void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
#endif /* CONFIG_TRACING */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d82abd40a3c0..e565d7c10c9c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -110,4 +110,23 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
}
#endif
+
+/**
+ * virt_to_obj - returns address of the beginning of object.
+ * @s: object's kmem_cache
+ * @slab_page: address of slab page
+ * @x: address within object memory range
+ *
+ * Returns address of the beginning of object
+ */
+static inline void *virt_to_obj(struct kmem_cache *s,
+ const void *slab_page,
+ const void *x)
+{
+ return (void *)x - ((x - slab_page) % s->size);
+}
+
+void object_err(struct kmem_cache *s, struct page *page,
+ u8 *object, char *reason);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index b363a0f6dfe1..3fbae6853df3 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -131,8 +131,6 @@ struct rpc_create_args {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
- struct rpc_xprt *xprt);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
const struct rpc_program *, u32);
void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 00aa9b4a8273..e3652ddc384f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -317,7 +317,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void deactivate_file_page(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 037e9df2f610..fbb64253a27b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -43,6 +43,9 @@
/* Default weight of a bound cooling device */
#define THERMAL_WEIGHT_DEFAULT 0
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID -274000
+
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
((long)t-2732+5)/10 : ((long)t-2732-5)/10)
@@ -155,6 +158,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -169,6 +173,7 @@ struct thermal_attr {
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
@@ -189,6 +194,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
@@ -196,6 +202,7 @@ struct thermal_zone_device {
int emul_temperature;
int passive;
unsigned int forced_passive;
+ atomic_t need_update;
struct thermal_zone_device_ops *ops;
struct thermal_zone_params *tzp;
struct thermal_governor *governor;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c72851328ca9..e267dc488cdf 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
* See the file COPYING for more details.
*/
+#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/static_key.h>
@@ -319,15 +321,19 @@ extern void syscall_unregfunc(void);
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
- __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+ __DECLARE_TRACE(name, void, , \
+ cpu_online(raw_smp_processor_id()), \
+ void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5171ef8f7b85..4858a3b79b7a 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -574,7 +574,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
count = min_t(int, count, ld->tty->receive_room);
- if (count)
+ if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
return count;
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20afdbc01..bb679b48f408 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+ unsigned long maxlength);
+
#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index bdbd19fb1ff8..0f963c15d0cd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -367,14 +367,13 @@ struct usb_bus {
int devnum_next; /* Next open device number in
* round-robin allocation */
+ struct mutex devnum_next_mutex; /* devnum_next mutex */
struct usb_devmap devmap; /* device address allocation map */
struct usb_device *root_hub; /* Root hub */
struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
struct list_head bus_list; /* list of busses */
- struct mutex usb_address0_mutex; /* unaddressed device mutex */
-
int bandwidth_allocated; /* on this bus: how much of the time
* reserved for periodic (intr/iso)
* requests is used, on average?
@@ -1060,7 +1059,7 @@ struct usbdrv_wrap {
* for interfaces bound to this driver.
* @soft_unbind: if set to 1, the USB core will not kill URBs and disable
* endpoints before calling the driver's disconnect method.
- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
+ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
* to initiate lower power link state transitions when an idle timeout
* occurs. Device-initiated USB 3.0 link PM will still be allowed.
*
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index daec99af5d54..1c88b177cb9c 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -178,11 +178,11 @@ struct ehci_regs {
* PORTSCx
*/
/* HOSTPC: offset 0x84 */
- u32 hostpc[1]; /* HOSTPC extension */
+ u32 hostpc[0]; /* HOSTPC extension */
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
- u32 reserved5[16];
+ u32 reserved5[17];
/* USBMODE_EX: offset 0xc8 */
u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 2f48e1756cbd..a4ef2e7c243a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -169,6 +169,7 @@ struct usb_hcd {
* bandwidth_mutex should be dropped after a successful control message
* to the device, or resetting the bandwidth after a failed attempt.
*/
+ struct mutex *address0_mutex;
struct mutex *bandwidth_mutex;
struct usb_hcd *shared_hcd;
struct usb_hcd *primary_hcd;
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 9948c874e3f1..1d0043dc34e4 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
+/* device can't handle Link Power Management */
+#define USB_QUIRK_NO_LPM BIT(10)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
US_FLAG(MAX_SECTORS_240, 0x08000000) \
/* Sets max_sectors to 240 */ \
+ US_FLAG(NO_REPORT_LUNS, 0x10000000) \
+ /* Cannot handle REPORT_LUNS */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b87696fdf06a..0ec598381f97 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -75,7 +77,9 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller);
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller);
+
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
@@ -96,8 +100,12 @@ void vmalloc_sync_all(void);
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
- /* return actual size without guard page */
- return area->size - PAGE_SIZE;
+ if (!(area->flags & VM_NO_GUARD))
+ /* return actual size without guard page */
+ return area->size - PAGE_SIZE;
+ else
+ return area->size;
+
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4a7adb..e830c3dff61a 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,8 +63,13 @@ struct unix_sock {
#define UNIX_GC_CANDIDATE 0
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
+ wait_queue_t peer_wake;
};
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(struct sock *sk)
+{
+ return (struct unix_sock *)sk;
+}
#define peer_wait peer_wq.wait
diff --git a/include/net/codel.h b/include/net/codel.h
index aeee28081245..7302a4df618c 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -158,11 +158,13 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
+ * @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
+ u32 drop_len;
u32 ecn_mark;
};
@@ -297,6 +299,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
goto end;
}
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
@@ -319,6 +322,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
diff --git a/include/net/dst.h b/include/net/dst.h
index b6a790646729..74baade721d6 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -312,6 +312,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
}
}
+/**
+ * dst_hold_safe - Take a reference on a dst if possible
+ * @dst: pointer to dst entry
+ *
+ * This helper returns false if it could not safely
+ * take a reference on a dst.
+ */
+static inline bool dst_hold_safe(struct dst_entry *dst)
+{
+ if (dst->flags & DST_NOCACHE)
+ return atomic_inc_not_zero(&dst->__refcnt);
+ dst_hold(dst);
+ return true;
+}
+
+/**
+ * skb_dst_force_safe - makes sure skb dst is refcounted
+ * @skb: buffer
+ *
+ * If dst is not yet refcounted and not destroyed, grab a ref on it.
+ */
+static inline void skb_dst_force_safe(struct sk_buff *skb)
+{
+ if (skb_dst_is_noref(skb)) {
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (!dst_hold_safe(dst))
+ dst = NULL;
+
+ skb->_skb_refdst = (unsigned long)dst;
+ }
+}
+
/**
* __skb_tunnel_rx - prepare skb for rx reinsert
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b2828a06a5a6..a7d812d05fba 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -42,7 +42,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
- sk_release_kernel(sk);
+ if (sk)
+ sk_release_kernel(sk);
}
#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 848e85cb5c61..24d5c099d3ac 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -98,7 +98,8 @@ struct inet_connection_sock {
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state;
+ __u8 icsk_ca_state:7,
+ icsk_ca_setsockopt:1;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 84b20835b736..0dc0a51da38f 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
struct ipv6hdr;
-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
+/* Note:
+ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
+ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
+ * In IPv6 case, no checksum compensates the change in IPv6 header,
+ * so we have to update skb->csum.
+ */
+static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
{
+ __be32 from, to;
+
if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
return 0;
- *(__be32*)iph |= htonl(INET_ECN_CE << 20);
+
+ from = *(__be32 *)iph;
+ to = from | htonl(INET_ECN_CE << 20);
+ *(__be32 *)iph = to;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(csum_sub(skb->csum, from), to);
return 1;
}
@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
case cpu_to_be16(ETH_P_IPV6):
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb_tail_pointer(skb))
- return IP6_ECN_set_ce(ipv6_hdr(skb));
+ return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
break;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index c0c26c3deeb5..d00ebdf14ca4 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -160,6 +160,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
}
/* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index a5593dab6af7..ef9557683fec 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -79,11 +79,12 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
err = ip6_local_out(skb);
if (net_xmit_eval(err) == 0) {
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
} else {
stats->tx_errors++;
stats->tx_aborted_errors++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5bc6edeb7143..a3f2f8005126 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -186,12 +186,13 @@ static inline void iptunnel_xmit_stats(int err,
struct pcpu_sw_netstats __percpu *stats)
{
if (err > 0) {
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
+ struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += err;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
} else if (err < 0) {
err_stats->tx_errors++;
err_stats->tx_aborted_errors++;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929392b0..a5169a4e9ef7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -207,6 +207,7 @@ extern rwlock_t ip6_ra_lock;
*/
struct ipv6_txoptions {
+ atomic_t refcnt;
/* Length of this structure */
int tot_len;
@@ -219,7 +220,7 @@ struct ipv6_txoptions {
struct ipv6_opt_hdr *dst0opt;
struct ipv6_rt_hdr *srcrt; /* Routing Header */
struct ipv6_opt_hdr *dst1opt;
-
+ struct rcu_head rcu;
/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
};
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
struct rcu_head rcu;
};
+static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
+{
+ struct ipv6_txoptions *opt;
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt && !atomic_inc_not_zero(&opt->refcnt))
+ opt = NULL;
+ rcu_read_unlock();
+ return opt;
+}
+
+static inline void txopt_put(struct ipv6_txoptions *opt)
+{
+ if (opt && atomic_dec_and_test(&opt->refcnt))
+ kfree_rcu(opt, rcu);
+}
+
struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
@@ -490,6 +509,7 @@ struct ip6_create_arg {
u32 user;
const struct in6_addr *src;
const struct in6_addr *dst;
+ int iif;
u8 ecn;
};
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index a830b01baba4..e8aa72eaded5 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
/* We may need a function to send a stream of events to user space.
* More on that later... */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 2e67cdd19cdc..0d34ce2ff07b 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -35,13 +35,13 @@
*/
/* indicates that the Short Address changed */
-#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001
+#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001
/* indicates that the IEEE Address changed */
-#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002
+#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002
/* indicates that the PAN ID changed */
-#define IEEE802515_AFILT_PANID_CHANGED 0x00000004
+#define IEEE802154_AFILT_PANID_CHANGED 0x00000004
/* indicates that PAN Coordinator status changed */
-#define IEEE802515_AFILT_PANC_CHANGED 0x00000008
+#define IEEE802154_AFILT_PANC_CHANGED 0x00000008
struct ieee802154_hw_addr_filt {
__le16 pan_id; /* Each independent PAN selects a unique
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 84a53d780306..1a91f979160e 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,6 +28,8 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ struct nf_hook_ops *ops);
};
void nf_register_queue_handler(const struct nf_queue_handler *qh);
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 3573a81815ad..8ba379f9e467 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -31,6 +31,7 @@ struct netns_sctp {
struct list_head addr_waitq;
struct timer_list addr_wq_timer;
struct list_head auto_asconf_splist;
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
spinlock_t addr_wq_lock;
/* Lock that protects the local_addr_list writers */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 5ccfe161f359..f10b01467f07 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -393,7 +393,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+ unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -689,6 +690,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
sch->qstats.backlog = 0;
}
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc **pold)
+{
+ struct Qdisc *old;
+
+ sch_tree_lock(sch);
+ old = *pold;
+ *pold = new;
+ if (old != NULL) {
+ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ qdisc_reset(old);
+ }
+ sch_tree_unlock(sch);
+
+ return old;
+}
+
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 856f01cb51dd..230775f5952a 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -571,11 +571,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
/* Map v4 address to v4-mapped v6 address */
static inline void sctp_v4_map_v6(union sctp_addr *addr)
{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_scope_id = 0;
- addr->v6.sin6_port = addr->v4.sin_port;
- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_addr.s6_addr32[0] = 0;
addr->v6.sin6_addr.s6_addr32[1] = 0;
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4ff3f67be62c..2ad3a7bc8385 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -223,6 +223,10 @@ struct sctp_sock {
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
+
+ /* These must be the last fields, as they will skipped on copies,
+ * like on accept and peeloff operations
+ */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index c8146ed9e66a..a40bc8c0af4b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -67,6 +67,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
struct cgroup;
@@ -378,6 +379,7 @@ struct sock {
sk_no_check_rx : 1,
sk_userlocks : 4,
sk_protocol : 8,
+#define SK_PROTOCOL_MAX U8_MAX
sk_type : 16;
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
@@ -714,6 +716,8 @@ enum sock_flags {
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
};
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
@@ -788,7 +792,7 @@ void sk_stream_write_space(struct sock *sk);
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
/* dont let skb dst not refcounted, we are going to leave rcu lock */
- skb_dst_force(skb);
+ skb_dst_force_safe(skb);
if (!sk->sk_backlog.tail)
sk->sk_backlog.head = skb;
@@ -818,6 +822,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
if (sk_rcvqueues_full(sk, limit))
return -ENOBUFS;
+ /*
+ * If the skb was allocated from pfmemalloc reserves, only
+ * allow SOCK_MEMALLOC sockets to use it as this socket is
+ * helping free memory
+ */
+ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+ return -ENOMEM;
+
__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
@@ -2267,6 +2279,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index b0b645988bd8..50e78a74d0df 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -25,6 +25,7 @@ enum {
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
+ TCP_NEW_SYN_RECV,
TCP_MAX_STATES /* Leave at the end! */
};
@@ -44,7 +45,8 @@ enum {
TCPF_CLOSE_WAIT = (1 << 8),
TCPF_LAST_ACK = (1 << 9),
TCPF_LISTEN = (1 << 10),
- TCPF_CLOSING = (1 << 11)
+ TCPF_CLOSING = (1 << 11),
+ TCPF_NEW_SYN_RECV = (1 << 12),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 903461aa5644..2f3572f03f58 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -43,6 +43,11 @@ struct vxlan_sock {
#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
+/* Flags that are used in the receive path. These flags must match in
+ * order for a socket to be shareable
+ */
+#define VXLAN_F_RCV_FLAGS VXLAN_F_UDP_ZERO_CSUM6_RX
+
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data,
bool no_share, u32 flags);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab7c380..96e3f56519e7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
#endif
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8bb00a27e219..e02b1b8d8ee4 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -535,6 +535,12 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* PCM library
*/
+/**
+ * snd_pcm_stream_linked - Check whether the substream is linked with others
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is being linked with others.
+ */
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
return substream->group != &substream->self_group;
@@ -545,6 +551,16 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+
+/**
+ * snd_pcm_stream_lock_irqsave - Lock the PCM stream
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock() but with the local
+ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
+ * as snd_pcm_stream_lock().
+ */
#define snd_pcm_stream_lock_irqsave(substream, flags) \
do { \
typecheck(unsigned long, flags); \
@@ -553,9 +569,25 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
+/**
+ * snd_pcm_group_for_each_entry - iterate over the linked substreams
+ * @s: the iterator
+ * @substream: the substream
+ *
+ * Iterate over the all linked substreams to the given @substream.
+ * When @substream isn't linked with any others, this gives returns @substream
+ * itself once.
+ */
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
+/**
+ * snd_pcm_running - Check whether the substream is in a running state
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is in the state RUNNING, or in the
+ * state DRAINING for playback.
+ */
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
@@ -563,45 +595,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
+/**
+ * bytes_to_samples - Unit conversion of the size from bytes to samples
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->sample_bits;
}
+/**
+ * bytes_to_frames - Unit conversion of the size from bytes to frames
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->frame_bits;
}
+/**
+ * samples_to_bytes - Unit conversion of the size from samples to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in samples
+ */
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * runtime->sample_bits / 8;
}
+/**
+ * frames_to_bytes - Unit conversion of the size from frames to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in frames
+ */
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
return size * runtime->frame_bits / 8;
}
+/**
+ * frame_aligned - Check whether the byte size is aligned to frames
+ * @runtime: PCM runtime instance
+ * @bytes: size in bytes
+ */
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
return bytes % runtime->byte_align == 0;
}
+/**
+ * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->buffer_size);
}
+/**
+ * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->period_size);
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (writable) space for playback
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -613,8 +681,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
return avail;
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (readable) space for capture
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -624,11 +695,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
return avail;
}
+/**
+ * snd_pcm_playback_hw_avail - Get the queued space for playback
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_playback_avail(runtime);
}
+/**
+ * snd_pcm_capture_hw_avail - Get the free space for capture
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_capture_avail(runtime);
@@ -708,6 +787,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream)
return snd_pcm_capture_avail(runtime) == 0;
}
+/**
+ * snd_pcm_trigger_done - Mark the master substream
+ * @substream: the pcm substream instance
+ * @master: the linked master substream
+ *
+ * When multiple substreams of the same card are linked and the hardware
+ * supports the single-shot operation, the driver calls this in the loop
+ * in snd_pcm_group_for_each_entry() for marking the substream as "done".
+ * Then most of trigger operations are performed only to the given master
+ * substream.
+ *
+ * The trigger_master mark is cleared at timestamp updates at the end
+ * of trigger operations.
+ */
static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream,
struct snd_pcm_substream *master)
{
@@ -883,6 +976,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
unsigned int rates_b);
+/**
+ * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
+ * @substream: PCM substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream,
struct snd_dma_buffer *bufp)
{
@@ -908,6 +1009,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
void snd_pcm_timer_init(struct snd_pcm_substream *substream);
void snd_pcm_timer_done(struct snd_pcm_substream *substream);
+/**
+ * snd_pcm_gettime - Fill the timespec depending on the timestamp mode
+ * @runtime: PCM runtime instance
+ * @tv: timespec to fill
+ */
static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
struct timespec *tv)
{
@@ -998,18 +1104,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
+/**
+ * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline void *
snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
+ * page from the given size
+ * @substream: PCM substream
+ * @ofs: byte offset
+ * @size: byte size to examine
+ */
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
unsigned int ofs, unsigned int size)
@@ -1017,13 +1140,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size);
}
-/* handle mmap counter - PCM mmap callback should handle this counter properly */
+/**
+ * snd_pcm_mmap_data_open - increase the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
atomic_inc(&substream->mmap_count);
}
+/**
+ * snd_pcm_mmap_data_close - decrease the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
@@ -1043,6 +1177,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_vmalloc NULL
+/**
+ * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
+ * @dma: DMA number
+ * @max: pointer to store the max size
+ */
static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
{
*max = dma < 4 ? 64 * 1024 : 128 * 1024;
@@ -1095,7 +1234,11 @@ struct snd_pcm_chmap {
void *private_data; /* optional: private data pointer */
};
-/* get the PCM substream assigned to the given chmap info */
+/**
+ * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
+ * @info: chmap information
+ * @idx: the substream number index
+ */
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
{
@@ -1122,7 +1265,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
unsigned long private_value,
struct snd_pcm_chmap **info_ret);
-/* Strong-typed conversion of pcm_format to bitwise */
+/**
+ * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
+ * @pcm_format: PCM format
+ */
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
return 1ULL << (__force int) pcm_format;
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 311dafe6cc4b..5bd2bfbf6dd1 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -165,6 +165,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+ int count);
/* main midi functions */
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 898be3a8db9a..6d8f8fba3341 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
#define WM8904_MIC_REGS 2
#define WM8904_GPIO_REGS 4
#define WM8904_DRC_REGS 4
-#define WM8904_EQ_REGS 25
+#define WM8904_EQ_REGS 24
/**
* DRC configurations are specified with a label and a set of register
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
new file mode 100644
index 000000000000..7bd03f867fca
--- /dev/null
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,904 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION "v4.1.0"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
+#define SECONDS_FOR_ASYNC_LOGOUT 10
+#define SECONDS_FOR_ASYNC_TEXT 10
+#define SECONDS_FOR_LOGOUT_COMP 15
+#define WHITE_SPACE " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS 16
+#define ISCSIT_EXTRA_TAGS 8
+#define ISCSIT_TCP_BACKLOG 256
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT 3
+#define NA_DATAOUT_TIMEOUT_MAX 60
+#define NA_DATAOUT_TIMEOUT_MIX 2
+#define NA_DATAOUT_TIMEOUT_RETRIES 5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT 15
+#define NA_NOPIN_TIMEOUT_MAX 60
+#define NA_NOPIN_TIMEOUT_MIN 3
+#define NA_NOPIN_RESPONSE_TIMEOUT 30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
+#define NA_RANDOM_R2T_OFFSETS 0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION 1
+#define TA_LOGIN_TIMEOUT 15
+#define TA_LOGIN_TIMEOUT_MAX 30
+#define TA_LOGIN_TIMEOUT_MIN 5
+#define TA_NETIF_TIMEOUT 2
+#define TA_NETIF_TIMEOUT_MAX 15
+#define TA_NETIF_TIMEOUT_MIN 2
+#define TA_GENERATE_NODE_ACLS 0
+#define TA_DEFAULT_CMDSN_DEPTH 64
+#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
+#define TA_CACHE_DYNAMIC_ACLS 0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT 1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_DEMO_MODE_DISCOVERY 1
+#define TA_DEFAULT_ERL 0
+#define TA_CACHE_CORE_NPS 0
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI 0
+#define TA_DEFAULT_FABRIC_PROT_TYPE 0
+
+#define ISCSI_IOV_DATA_BUFFER 5
+
+enum iscsit_transport_type {
+ ISCSI_TCP = 0,
+ ISCSI_SCTP_TCP = 1,
+ ISCSI_SCTP_UDP = 2,
+ ISCSI_IWARP_TCP = 3,
+ ISCSI_IWARP_SCTP = 4,
+ ISCSI_INFINIBAND = 5,
+};
+
+/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+ TARG_CONN_STATE_FREE = 0x1,
+ TARG_CONN_STATE_XPT_UP = 0x3,
+ TARG_CONN_STATE_IN_LOGIN = 0x4,
+ TARG_CONN_STATE_LOGGED_IN = 0x5,
+ TARG_CONN_STATE_IN_LOGOUT = 0x6,
+ TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
+ TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
+};
+
+/* RFC-3720 7.3.2 Session State Diagram for a Target */
+enum target_sess_state_table {
+ TARG_SESS_STATE_FREE = 0x1,
+ TARG_SESS_STATE_ACTIVE = 0x2,
+ TARG_SESS_STATE_LOGGED_IN = 0x3,
+ TARG_SESS_STATE_FAILED = 0x4,
+ TARG_SESS_STATE_IN_CONTINUE = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+ ISCSI_RX_DATA = 1,
+ ISCSI_TX_DATA = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+ DATAIN_COMPLETE_NORMAL = 1,
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+ DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+ DATAIN_WITHIN_COMMAND_RECOVERY = 1,
+ DATAIN_CONNECTION_RECOVERY = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+ TPG_STATE_FREE = 0,
+ TPG_STATE_ACTIVE = 1,
+ TPG_STATE_INACTIVE = 2,
+ TPG_STATE_COLD_RESET = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+ TIQN_STATE_ACTIVE = 1,
+ TIQN_STATE_SHUTDOWN = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+ ICF_GOT_LAST_DATAOUT = 0x00000001,
+ ICF_GOT_DATACK_SNACK = 0x00000002,
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
+ ICF_SENT_LAST_R2T = 0x00000008,
+ ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
+ ICF_CONTIG_MEMORY = 0x00000020,
+ ICF_ATTACHED_TO_RQUEUE = 0x00000040,
+ ICF_OOO_CMDSN = 0x00000080,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+ ISTATE_NO_STATE = 0,
+ ISTATE_NEW_CMD = 1,
+ ISTATE_DEFERRED_CMD = 2,
+ ISTATE_UNSOLICITED_DATA = 3,
+ ISTATE_RECEIVE_DATAOUT = 4,
+ ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+ ISTATE_RECEIVED_LAST_DATAOUT = 6,
+ ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
+ ISTATE_IN_CONNECTION_RECOVERY = 8,
+ ISTATE_RECEIVED_TASKMGT = 9,
+ ISTATE_SEND_ASYNCMSG = 10,
+ ISTATE_SENT_ASYNCMSG = 11,
+ ISTATE_SEND_DATAIN = 12,
+ ISTATE_SEND_LAST_DATAIN = 13,
+ ISTATE_SENT_LAST_DATAIN = 14,
+ ISTATE_SEND_LOGOUTRSP = 15,
+ ISTATE_SENT_LOGOUTRSP = 16,
+ ISTATE_SEND_NOPIN = 17,
+ ISTATE_SENT_NOPIN = 18,
+ ISTATE_SEND_REJECT = 19,
+ ISTATE_SENT_REJECT = 20,
+ ISTATE_SEND_R2T = 21,
+ ISTATE_SENT_R2T = 22,
+ ISTATE_SEND_R2T_RECOVERY = 23,
+ ISTATE_SENT_R2T_RECOVERY = 24,
+ ISTATE_SEND_LAST_R2T = 25,
+ ISTATE_SENT_LAST_R2T = 26,
+ ISTATE_SEND_LAST_R2T_RECOVERY = 27,
+ ISTATE_SENT_LAST_R2T_RECOVERY = 28,
+ ISTATE_SEND_STATUS = 29,
+ ISTATE_SEND_STATUS_BROKEN_PC = 30,
+ ISTATE_SENT_STATUS = 31,
+ ISTATE_SEND_STATUS_RECOVERY = 32,
+ ISTATE_SENT_STATUS_RECOVERY = 33,
+ ISTATE_SEND_TASKMGTRSP = 34,
+ ISTATE_SENT_TASKMGTRSP = 35,
+ ISTATE_SEND_TEXTRSP = 36,
+ ISTATE_SENT_TEXTRSP = 37,
+ ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+ ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+ ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
+ ISTATE_REMOVE = 41,
+ ISTATE_FREE = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+ CMDSN_ERROR_CANNOT_RECOVER = -1,
+ CMDSN_NORMAL_OPERATION = 0,
+ CMDSN_LOWER_THAN_EXP = 1,
+ CMDSN_HIGHER_THAN_EXP = 2,
+ CMDSN_MAXCMDSN_OVERRUN = 3,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+ IMMEDIATE_DATA_CANNOT_RECOVER = -1,
+ IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+ IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+ DATAOUT_CANNOT_RECOVER = -1,
+ DATAOUT_NORMAL = 0,
+ DATAOUT_SEND_R2T = 1,
+ DATAOUT_SEND_TO_TRANSPORT = 2,
+ DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+ NAF_USERID_SET = 0x01,
+ NAF_PASSWORD_SET = 0x02,
+ NAF_USERID_IN_SET = 0x04,
+ NAF_PASSWORD_IN_SET = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+ ISCSI_TF_RUNNING = 0x01,
+ ISCSI_TF_STOP = 0x02,
+ ISCSI_TF_EXPIRED = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+ ISCSI_NP_THREAD_ACTIVE = 1,
+ ISCSI_NP_THREAD_INACTIVE = 2,
+ ISCSI_NP_THREAD_RESET = 3,
+ ISCSI_NP_THREAD_SHUTDOWN = 4,
+ ISCSI_NP_THREAD_EXIT = 5,
+};
+
+struct iscsi_conn_ops {
+ u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
+ u8 DataDigest; /* [0,1] == [None,CRC32C] */
+ u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
+ u8 OFMarker; /* [0,1] == [No,Yes] */
+ u8 IFMarker; /* [0,1] == [No,Yes] */
+ u32 OFMarkInt; /* [1..65535] */
+ u32 IFMarkInt; /* [1..65535] */
+ /*
+ * iSER specific connection parameters
+ */
+ u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
+};
+
+struct iscsi_sess_ops {
+ char InitiatorName[224];
+ char InitiatorAlias[256];
+ char TargetName[224];
+ char TargetAlias[256];
+ char TargetAddress[256];
+ u16 TargetPortalGroupTag; /* [0..65535] */
+ u16 MaxConnections; /* [1..65535] */
+ u8 InitialR2T; /* [0,1] == [No,Yes] */
+ u8 ImmediateData; /* [0,1] == [No,Yes] */
+ u32 MaxBurstLength; /* [512..2**24-1] */
+ u32 FirstBurstLength; /* [512..2**24-1] */
+ u16 DefaultTime2Wait; /* [0..3600] */
+ u16 DefaultTime2Retain; /* [0..3600] */
+ u16 MaxOutstandingR2T; /* [1..65535] */
+ u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
+ u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
+ u8 ErrorRecoveryLevel; /* [0..2] */
+ u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+ /*
+ * iSER specific session parameters
+ */
+ u8 RDMAExtensions; /* [0,1] == [No,Yes] */
+};
+
+struct iscsi_queue_req {
+ int state;
+ struct iscsi_cmd *cmd;
+ struct list_head qr_list;
+};
+
+struct iscsi_data_count {
+ int data_length;
+ int sync_and_steering;
+ enum data_count_type type;
+ u32 iov_count;
+ u32 ss_iov_count;
+ u32 ss_marker_count;
+ struct kvec *iov;
+};
+
+struct iscsi_param_list {
+ bool iser;
+ struct list_head param_list;
+ struct list_head extra_response_list;
+};
+
+struct iscsi_datain_req {
+ enum datain_req_comp_table dr_complete;
+ int generate_recovery_values;
+ enum datain_req_rec_table recovery;
+ u32 begrun;
+ u32 runlength;
+ u32 data_length;
+ u32 data_offset;
+ u32 data_sn;
+ u32 next_burst_len;
+ u32 read_data_done;
+ u32 seq_send_order;
+ struct list_head cmd_datain_node;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+ u16 cid;
+ u32 batch_count;
+ u32 cmdsn;
+ u32 exp_cmdsn;
+ struct iscsi_cmd *cmd;
+ struct list_head ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+ int seq_complete;
+ int recovery_r2t;
+ int sent_r2t;
+ u32 r2t_sn;
+ u32 offset;
+ u32 targ_xfer_tag;
+ u32 xfer_len;
+ struct list_head r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+ enum iscsi_timer_flags_table dataout_timer_flags;
+ /* DataOUT timeout retries */
+ u8 dataout_timeout_retries;
+ /* Within command recovery count */
+ u8 error_recovery_count;
+ /* iSCSI dependent state for out or order CmdSNs */
+ enum cmd_i_state_table deferred_i_state;
+ /* iSCSI dependent state */
+ enum cmd_i_state_table i_state;
+ /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+ u8 immediate_cmd;
+ /* Immediate data present */
+ u8 immediate_data;
+ /* iSCSI Opcode */
+ u8 iscsi_opcode;
+ /* iSCSI Response Code */
+ u8 iscsi_response;
+ /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_reason;
+ /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_response;
+ /* MaxCmdSN has been incremented */
+ u8 maxcmdsn_inc;
+ /* Immediate Unsolicited Dataout */
+ u8 unsolicited_data;
+ /* Reject reason code */
+ u8 reject_reason;
+ /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+ u16 logout_cid;
+ /* Command flags */
+ enum cmd_flags_table cmd_flags;
+ /* Initiator Task Tag assigned from Initiator */
+ itt_t init_task_tag;
+ /* Target Transfer Tag assigned from Target */
+ u32 targ_xfer_tag;
+ /* CmdSN assigned from Initiator */
+ u32 cmd_sn;
+ /* ExpStatSN assigned from Initiator */
+ u32 exp_stat_sn;
+ /* StatSN assigned to this ITT */
+ u32 stat_sn;
+ /* DataSN Counter */
+ u32 data_sn;
+ /* R2TSN Counter */
+ u32 r2t_sn;
+ /* Last DataSN acknowledged via DataAck SNACK */
+ u32 acked_data_sn;
+ /* Used for echoing NOPOUT ping data */
+ u32 buf_ptr_size;
+ /* Used to store DataDigest */
+ u32 data_crc;
+ /* Counter for MaxOutstandingR2T */
+ u32 outstanding_r2ts;
+ /* Next R2T Offset when DataSequenceInOrder=Yes */
+ u32 r2t_offset;
+ /* Iovec current and orig count for iscsi_cmd->iov_data */
+ u32 iov_data_count;
+ u32 orig_iov_data_count;
+ /* Number of miscellaneous iovecs used for IP stack calls */
+ u32 iov_misc_count;
+ /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_count;
+ /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ u32 pdu_send_order;
+ /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_start;
+ /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ u32 seq_send_order;
+ /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_count;
+ /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_no;
+ /* Lowest offset in current DataOUT sequence */
+ u32 seq_start_offset;
+ /* Highest offset in current DataOUT sequence */
+ u32 seq_end_offset;
+ /* Total size in bytes received so far of READ data */
+ u32 read_data_done;
+ /* Total size in bytes received so far of WRITE data */
+ u32 write_data_done;
+ /* Counter for FirstBurstLength key */
+ u32 first_burst_len;
+ /* Counter for MaxBurstLength key */
+ u32 next_burst_len;
+ /* Transfer size used for IP stack calls */
+ u32 tx_size;
+ /* Buffer used for various purposes */
+ void *buf_ptr;
+ /* Used by SendTargets=[iqn.,eui.] discovery */
+ void *text_in_ptr;
+ /* See include/linux/dma-mapping.h */
+ enum dma_data_direction data_direction;
+ /* iSCSI PDU Header + CRC */
+ unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+ /* Number of times struct iscsi_cmd is present in immediate queue */
+ atomic_t immed_queue_count;
+ atomic_t response_queue_count;
+ spinlock_t datain_lock;
+ spinlock_t dataout_timeout_lock;
+ /* spinlock for protecting struct iscsi_cmd->i_state */
+ spinlock_t istate_lock;
+ /* spinlock for adding within command recovery entries */
+ spinlock_t error_lock;
+ /* spinlock for adding R2Ts */
+ spinlock_t r2t_lock;
+ /* DataIN List */
+ struct list_head datain_list;
+ /* R2T List */
+ struct list_head cmd_r2t_list;
+ /* Timer for DataOUT */
+ struct timer_list dataout_timer;
+ /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+ struct kvec *iov_data;
+ /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS 5
+ struct kvec iov_misc[ISCSI_MISC_IOVECS];
+ /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_list;
+ /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_ptr;
+ /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_list;
+ /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_ptr;
+ /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+ struct iscsi_tmr_req *tmr_req;
+ /* Connection this command is alligient to */
+ struct iscsi_conn *conn;
+ /* Pointer to connection recovery entry */
+ struct iscsi_conn_recovery *cr;
+ /* Session the command is part of, used for connection recovery */
+ struct iscsi_session *sess;
+ /* list_head for connection list */
+ struct list_head i_conn_node;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
+ unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+ u32 padding;
+ u8 pad_bytes[4];
+
+ struct scatterlist *first_data_sg;
+ u32 first_data_sg_off;
+ u32 kmapped_nents;
+ sense_reason_t sense_reason;
+} ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+ bool task_reassign:1;
+ u32 exp_data_sn;
+ struct iscsi_cmd *ref_cmd;
+ struct iscsi_conn_recovery *conn_recovery;
+ struct se_tmr_req *se_tmr_req;
+};
+
+struct iscsi_conn {
+ wait_queue_head_t queues_wq;
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+ u8 conn_state;
+ u8 conn_logout_reason;
+ u8 network_transport;
+ enum iscsi_timer_flags_table nopin_timer_flags;
+ enum iscsi_timer_flags_table nopin_response_timer_flags;
+ /* Used to know what thread encountered a transport failure */
+ u8 which_thread;
+ /* connection id assigned by the Initiator */
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
+ u16 local_port;
+ int net_size;
+ int login_family;
+ u32 auth_id;
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ itt_t login_itt;
+ u32 exp_statsn;
+ /* Per connection status sequence number */
+ u32 stat_sn;
+ /* IFMarkInt's Current Value */
+ u32 if_marker;
+ /* OFMarkInt's Current Value */
+ u32 of_marker;
+ /* Used for calculating OFMarker offset to next PDU */
+ u32 of_marker_offset;
+#define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ unsigned char local_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+ atomic_t conn_logout_remove;
+ atomic_t connection_exit;
+ atomic_t connection_recovery;
+ atomic_t connection_reinstatement;
+ atomic_t connection_wait_rcfr;
+ atomic_t sleep_on_conn_wait_comp;
+ atomic_t transport_failed;
+ struct completion conn_post_wait_comp;
+ struct completion conn_wait_comp;
+ struct completion conn_wait_rcfr_comp;
+ struct completion conn_waiting_on_uc_comp;
+ struct completion conn_logout_comp;
+ struct completion tx_half_close_comp;
+ struct completion rx_half_close_comp;
+ /* socket used by this connection */
+ struct socket *sock;
+ void (*orig_data_ready)(struct sock *);
+ void (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE 1
+#define LOGIN_FLAGS_CLOSED 2
+#define LOGIN_FLAGS_READY 4
+ unsigned long login_flags;
+ struct delayed_work login_work;
+ struct delayed_work login_cleanup_work;
+ struct iscsi_login *login;
+ struct timer_list nopin_timer;
+ struct timer_list nopin_response_timer;
+ struct timer_list transport_timer;
+ struct task_struct *login_kworker;
+ /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+ spinlock_t cmd_lock;
+ spinlock_t conn_usage_lock;
+ spinlock_t immed_queue_lock;
+ spinlock_t nopin_timer_lock;
+ spinlock_t response_queue_lock;
+ spinlock_t state_lock;
+ /* libcrypto RX and TX contexts for crc32c */
+ struct hash_desc conn_rx_hash;
+ struct hash_desc conn_tx_hash;
+ /* Used for scheduling TX and RX connection kthreads */
+ cpumask_var_t conn_cpumask;
+ unsigned int conn_rx_reset_cpumask:1;
+ unsigned int conn_tx_reset_cpumask:1;
+ /* list_head of struct iscsi_cmd for this connection */
+ struct list_head conn_cmd_list;
+ struct list_head immed_queue_list;
+ struct list_head response_queue_list;
+ struct iscsi_conn_ops *conn_ops;
+ struct iscsi_login *conn_login;
+ struct iscsit_transport *conn_transport;
+ struct iscsi_param_list *param_list;
+ /* Used for per connection auth state machine */
+ void *auth_protocol;
+ void *context;
+ struct iscsi_login_thread_s *login_thread;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ /* Pointer to parent session */
+ struct iscsi_session *sess;
+ int bitmap_id;
+ int rx_thread_active;
+ struct task_struct *rx_thread;
+ struct completion rx_login_comp;
+ int tx_thread_active;
+ struct task_struct *tx_thread;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+ u16 cid;
+ u32 cmd_count;
+ u32 maxrecvdatasegmentlength;
+ u32 maxxmitdatasegmentlength;
+ int ready_for_reallegiance;
+ struct list_head conn_recovery_cmd_list;
+ spinlock_t conn_recovery_cmd_lock;
+ struct timer_list time2retain_timer;
+ struct iscsi_session *sess;
+ struct list_head cr_list;
+} ____cacheline_aligned;
+
+struct iscsi_session {
+ u8 initiator_vendor;
+ u8 isid[6];
+ enum iscsi_timer_flags_table time2retain_timer_flags;
+ u8 version_active;
+ u16 cid_called;
+ u16 conn_recovery_count;
+ u16 tsih;
+ /* state session is currently in */
+ u32 session_state;
+ /* session wide counter: initiator assigned task tag */
+ itt_t init_task_tag;
+ /* session wide counter: target assigned task tag */
+ u32 targ_xfer_tag;
+ u32 cmdsn_window;
+
+ /* protects cmdsn values */
+ struct mutex cmdsn_mutex;
+ /* session wide counter: expected command sequence number */
+ u32 exp_cmd_sn;
+ /* session wide counter: maximum allowed command sequence number */
+ u32 max_cmd_sn;
+ struct list_head sess_ooo_cmdsn_list;
+
+ /* LIO specific session ID */
+ u32 sid;
+ char auth_type[8];
+ /* unique within the target */
+ int session_index;
+ /* Used for session reference counting */
+ int session_usage_count;
+ int session_waiting_on_uc;
+ atomic_long_t cmd_pdus;
+ atomic_long_t rsp_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+ atomic_long_t conn_digest_errors;
+ atomic_long_t conn_timeout_errors;
+ u64 creation_time;
+ /* Number of active connections */
+ atomic_t nconn;
+ atomic_t session_continuation;
+ atomic_t session_fall_back_to_erl0;
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+ atomic_t sleep_on_sess_wait_comp;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+ struct list_head cr_inactive_list;
+ spinlock_t conn_lock;
+ spinlock_t cr_a_lock;
+ spinlock_t cr_i_lock;
+ spinlock_t session_usage_lock;
+ spinlock_t ttt_lock;
+ struct completion async_msg_comp;
+ struct completion reinstatement_comp;
+ struct completion session_wait_comp;
+ struct completion session_waiting_on_uc_comp;
+ struct timer_list time2retain_timer;
+ struct iscsi_sess_ops *sess_ops;
+ struct se_session *se_sess;
+ struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+ u8 auth_complete;
+ u8 checked_for_existing;
+ u8 current_stage;
+ u8 leading_connection;
+ u8 first_request;
+ u8 version_min;
+ u8 version_max;
+ u8 login_complete;
+ u8 login_failed;
+ bool zero_tsih;
+ char isid[6];
+ u32 cmd_sn;
+ itt_t init_task_tag;
+ u32 initial_exp_statsn;
+ u32 rsp_length;
+ u16 cid;
+ u16 tsih;
+ char req[ISCSI_HDR_LEN];
+ char rsp[ISCSI_HDR_LEN];
+ char *req_buf;
+ char *rsp_buf;
+ struct iscsi_conn *conn;
+ struct iscsi_np *np;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+ u32 dataout_timeout;
+ u32 dataout_timeout_retries;
+ u32 default_erl;
+ u32 nopin_timeout;
+ u32 nopin_response_timeout;
+ u32 random_datain_pdu_offsets;
+ u32 random_datain_seq_offsets;
+ u32 random_r2t_offsets;
+ u32 tmr_cold_reset;
+ u32 tmr_warm_reset;
+ struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+ enum naf_flags_table naf_flags;
+ int authenticate_target;
+ /* Used for iscsit_global->discovery_auth,
+ * set to zero (auth disabled) by default */
+ int enforce_discovery_auth;
+#define MAX_USER_LEN 256
+#define MAX_PASS_LEN 256
+ char userid[MAX_USER_LEN];
+ char password[MAX_PASS_LEN];
+ char userid_mutual[MAX_USER_LEN];
+ char password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+ struct config_group iscsi_sess_stats_group;
+ struct config_group iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+ struct iscsi_node_attrib node_attrib;
+ struct iscsi_node_auth node_auth;
+ struct iscsi_node_stat_grps node_stat_grps;
+ struct se_node_acl se_node_acl;
+};
+
+struct iscsi_tpg_attrib {
+ u32 authentication;
+ u32 login_timeout;
+ u32 netif_timeout;
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 default_cmdsn_depth;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_discovery;
+ u32 default_erl;
+ u8 t10_pi;
+ u32 fabric_prot_type;
+ struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+ int np_network_transport;
+ int np_ip_proto;
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ bool enabled;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+ u16 np_port;
+ spinlock_t np_thread_lock;
+ struct completion np_restart_comp;
+ struct socket *np_socket;
+ struct __kernel_sockaddr_storage np_sockaddr;
+ struct task_struct *np_thread;
+ struct timer_list np_login_timer;
+ void *np_context;
+ struct iscsit_transport *np_transport;
+ struct list_head np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+ struct iscsi_np *tpg_np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np_parent;
+ struct list_head tpg_np_list;
+ struct list_head tpg_np_child_list;
+ struct list_head tpg_np_parent_list;
+ struct se_tpg_np se_tpg_np;
+ spinlock_t tpg_np_parent_lock;
+ struct completion tpg_np_comp;
+ struct kref tpg_np_kref;
+};
+
+struct iscsi_portal_group {
+ unsigned char tpg_chap_id;
+ /* TPG State */
+ enum tpg_state_table tpg_state;
+ /* Target Portal Group Tag */
+ u16 tpgt;
+ /* Id assigned to target sessions */
+ u16 ntsih;
+ /* Number of active sessions */
+ u32 nsessions;
+ /* Number of Network Portals available for this TPG */
+ u32 num_tpg_nps;
+ /* Per TPG LIO specific session ID. */
+ u32 sid;
+ /* Spinlock for adding/removing Network Portals */
+ spinlock_t tpg_np_lock;
+ spinlock_t tpg_state_lock;
+ struct se_portal_group tpg_se_tpg;
+ struct mutex tpg_access_lock;
+ struct semaphore np_login_sem;
+ struct iscsi_tpg_attrib tpg_attrib;
+ struct iscsi_node_auth tpg_demo_auth;
+ /* Pointer to default list of iSCSI parameters for TPG */
+ struct iscsi_param_list *param_list;
+ struct iscsi_tiqn *tpg_tiqn;
+ struct list_head tpg_gnp_list;
+ struct list_head tpg_list;
+} ____cacheline_aligned;
+
+struct iscsi_wwn_stat_grps {
+ struct config_group iscsi_stat_group;
+ struct config_group iscsi_instance_group;
+ struct config_group iscsi_sess_err_group;
+ struct config_group iscsi_tgt_attr_group;
+ struct config_group iscsi_login_stats_group;
+ struct config_group iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN 224
+ unsigned char tiqn[ISCSI_IQN_LEN];
+ enum tiqn_state_table tiqn_state;
+ int tiqn_access_count;
+ u32 tiqn_active_tpgs;
+ u32 tiqn_ntpgs;
+ u32 tiqn_num_tpg_nps;
+ u32 tiqn_nsessions;
+ struct list_head tiqn_list;
+ struct list_head tiqn_tpg_list;
+ spinlock_t tiqn_state_lock;
+ spinlock_t tiqn_tpg_lock;
+ struct se_wwn tiqn_wwn;
+ struct iscsi_wwn_stat_grps tiqn_stat_grps;
+ int tiqn_index;
+ struct iscsi_sess_err_stats sess_err_stats;
+ struct iscsi_login_stats login_stats;
+ struct iscsi_logout_stats logout_stats;
+} ____cacheline_aligned;
+
+struct iscsit_global {
+ /* In core shutdown */
+ u32 in_shutdown;
+ u32 active_ts;
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
+#define ISCSIT_BITMAP_BITS 262144
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
+ spinlock_t ts_bitmap_lock;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+};
+
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+ u32 ttt;
+
+ spin_lock_bh(&session->ttt_lock);
+ ttt = session->targ_xfer_tag++;
+ if (ttt == 0xFFFFFFFF)
+ ttt = session->targ_xfer_tag++;
+ spin_unlock_bh(&session->ttt_lock);
+
+ return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c4b85a5889ba..ad9ce86428a6 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -520,7 +520,7 @@ struct se_cmd {
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
u32, enum dma_data_direction);
- sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -535,6 +535,8 @@ struct se_cmd {
#define CMD_T_DEV_ACTIVE (1 << 7)
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
+#define CMD_T_TAS (1 << 10)
+#define CMD_T_FABRIC_STOP (1 << 11)
spinlock_t t_state_lock;
struct completion t_transport_stop_comp;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 22a4e98eec80..e21dbdb0fe3d 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -126,8 +126,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
-int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
-int target_put_sess_cmd(struct se_session *, struct se_cmd *);
+int target_get_sess_cmd(struct se_cmd *, bool);
+int target_put_sess_cmd(struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *);
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 50d0fb41a3bf..76d2edea5bd1 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1034,6 +1034,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
struct drm_radeon_info {
uint32_t request;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 0bdb77e16875..fe017d125839 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -436,6 +436,9 @@ enum {
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
__IFLA_VF_MAX,
};
@@ -480,6 +483,11 @@ struct ifla_vf_link_state {
__u32 link_state;
};
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
/* VF ports management section
*
* Nested layout of set/get msg is:
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a1d0cc38ff2..7dd8d80edfb6 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X Table entry format */
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index abe5f4bd4d82..019ba1e0799a 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -128,11 +128,12 @@ struct usbdevfs_hub_portinfo {
char port [127]; /* e.g. port 3 connects to device 27 */
};
-/* Device capability flags */
+/* System and bus capability flags */
#define USBDEVFS_CAP_ZERO_PACKET 0x01
#define USBDEVFS_CAP_BULK_CONTINUATION 0x02
#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04
#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..63467ce7c3eb 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -175,70 +175,80 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, 0) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, 0) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, 0) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, 0) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
}
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 9ce083960a25..f18490985fc8 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -107,5 +107,13 @@ struct sched_watchdog {
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 7491ee5d8164..83338210ee04 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void)
}
#endif
+#ifdef CONFIG_PREEMPT
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+}
+
+#else
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, true);
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+}
+
+#endif /* CONFIG_PREEMPT */
+
#endif /* INCLUDE_XEN_OPS_H */