aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-01-10 14:48:02 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-01-10 14:48:02 -0500
commit68c404b18f6fba404b2753622d0459c68ee128ae (patch)
treec1ec0bb12f19d91071b461cc2831d9d3dd4c74f3 /include
parentd035c36c58dd9183ad6aa7875dea89893faedb55 (diff)
parent6650239a4b01077e80d5a4468562756d77afaa59 (diff)
Merge branch 'bugfixes' into nfs-for-2.6.38
Conflicts: fs/nfs/nfs2xdr.c fs/nfs/nfs3xdr.c fs/nfs/nfs4xdr.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/ceph/libceph.h6
-rw-r--r--include/linux/cnt32_to_63.h20
-rw-r--r--include/linux/dmaengine.h13
-rw-r--r--include/linux/fanotify.h10
-rw-r--r--include/linux/fsnotify.h3
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/kthread.h45
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pm_runtime.h3
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/ssb/ssb_driver_gige.h17
-rw-r--r--include/linux/sunrpc/xdr.h4
-rw-r--r--include/linux/taskstats.h3
-rw-r--r--include/linux/unaligned/packed_struct.h6
-rw-r--r--include/media/saa7146.h2
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/wm8775.h3
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/ip6_route.h10
-rw-r--r--include/net/mac80211.h28
-rw-r--r--include/net/pkt_cls.h4
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--include/net/sock.h3
27 files changed, 154 insertions, 56 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd10c4..36ab42c9bb9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -250,7 +250,7 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
- unsigned char no_cluster;
+ unsigned char cluster;
signed char discard_zeroes_data;
};
@@ -380,7 +380,6 @@ struct request_queue
#endif
};
-#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
@@ -403,7 +402,6 @@ struct request_queue
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+static inline unsigned int blk_queue_cluster(struct request_queue *q)
+{
+ return q->limits.cluster;
+}
+
/*
* We regard a request as sync, if either a read or a sync write
*/
@@ -805,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 266ab929123..499dfe982a0 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_align(x, align) \
+ __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages(x) \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9e76d35670d..72c72bfccb8 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -227,8 +227,10 @@ extern int ceph_open_session(struct ceph_client *client);
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_get_direct_page_vector(const char __user *data,
- int num_pages);
-extern void ceph_put_page_vector(struct page **pages, int num_pages);
+ int num_pages,
+ bool write_page);
+extern void ceph_put_page_vector(struct page **pages, int num_pages,
+ bool dirty);
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_copy_user_to_page_vector(struct page **pages,
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 7605fdd1eb6..e3d8bf26e5e 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -61,13 +61,31 @@ union cnt32_to_63 {
*
* 2) this code must not be preempted for a duration longer than the
* 32-bit counter half period minus the longest period between two
- * calls to this code.
+ * calls to this code;
*
* Those requirements ensure proper update to the state bit in memory.
* This is usually not a problem in practice, but if it is then a kernel
* timer should be scheduled to manage for this code to be executed often
* enough.
*
+ * And finally:
+ *
+ * 3) the cnt_lo argument must be seen as a globally incrementing value,
+ * meaning that it should be a direct reference to the counter data which
+ * can be evaluated according to a specific ordering within the macro,
+ * and not the result of a previous evaluation stored in a variable.
+ *
+ * For example, this is wrong:
+ *
+ * u32 partial = get_hw_count();
+ * u64 full = cnt32_to_63(partial);
+ * return full;
+ *
+ * This is fine:
+ *
+ * u64 full = cnt32_to_63(get_hw_count());
+ * return full;
+ *
* Note that the top bit (bit 63) in the returned value should be considered
* as garbage. It is not cleared here because callers are likely to use a
* multiplier on the returned value which can get rid of the top bit
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 9d8688b92d8..8cd00ad98d3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -824,6 +824,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
+void dma_release_channel(struct dma_chan *chan);
#else
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
@@ -831,7 +833,14 @@ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descript
}
static inline void dma_issue_pending_all(void)
{
- do { } while (0);
+}
+static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
+{
+ return NULL;
+}
+static inline void dma_release_channel(struct dma_chan *chan)
+{
}
#endif
@@ -842,8 +851,6 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
-void dma_release_channel(struct dma_chan *chan);
/* --- Helper iov-locking functions --- */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 0f0121467fc..6c6133f76e1 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -83,11 +83,13 @@
FAN_ALL_PERM_EVENTS |\
FAN_Q_OVERFLOW)
-#define FANOTIFY_METADATA_VERSION 2
+#define FANOTIFY_METADATA_VERSION 3
struct fanotify_event_metadata {
__u32 event_len;
- __u32 vers;
+ __u8 vers;
+ __u8 reserved;
+ __u16 metadata_len;
__aligned_u64 mask;
__s32 fd;
__s32 pid;
@@ -96,11 +98,13 @@ struct fanotify_event_metadata {
struct fanotify_response {
__s32 fd;
__u32 response;
-} __attribute__ ((packed));
+};
/* Legit userspace responses to a _PERM event */
#define FAN_ALLOW 0x01
#define FAN_DENY 0x02
+/* No fd set in event */
+#define FAN_NOFD -1
/* Helper functions to deal with fanotify_event_metadata buffers */
#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 5c185fa2708..b10bcdeaef7 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -235,9 +235,6 @@ static inline void fsnotify_open(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- /* FMODE_NONOTIFY must never be set from user */
- file->f_mode &= ~FMODE_NONOTIFY;
-
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0a68f924f06..7380763595d 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -166,7 +166,7 @@ struct fsnotify_group {
struct mutex access_mutex;
struct list_head access_list;
wait_queue_head_t access_waitq;
- bool bypass_perm; /* protected by access_mutex */
+ atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index d377ea815d4..e9bb22cba76 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -112,7 +112,6 @@ struct resource_list {
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
-extern int resource_alloc_from_bottom;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
@@ -124,6 +123,7 @@ extern void reserve_region_with_split(struct resource *root,
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
+extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 685ea65eb80..ce0775aa64c 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -81,16 +81,41 @@ struct kthread_work {
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
-static inline void init_kthread_worker(struct kthread_worker *worker)
-{
- *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker);
-}
-
-static inline void init_kthread_work(struct kthread_work *work,
- kthread_work_func_t fn)
-{
- *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn);
-}
+/*
+ * kthread_worker.lock and kthread_work.done need their own lockdep class
+ * keys if they are defined on stack with lockdep enabled. Use the
+ * following macros when defining them on stack.
+ */
+#ifdef CONFIG_LOCKDEP
+# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
+ ({ init_kthread_worker(&worker); worker; })
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
+ struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
+# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \
+ ({ init_kthread_work((&work), fn); work; })
+# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \
+ struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
+#else
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
+# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
+#endif
+
+extern void __init_kthread_worker(struct kthread_worker *worker,
+ const char *name, struct lock_class_key *key);
+
+#define init_kthread_worker(worker) \
+ do { \
+ static struct lock_class_key __key; \
+ __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+ } while (0)
+
+#define init_kthread_work(work, fn) \
+ do { \
+ memset((work), 0, sizeof(struct kthread_work)); \
+ INIT_LIST_HEAD(&(work)->node); \
+ (work)->func = (fn); \
+ init_waitqueue_head(&(work)->done); \
+ } while (0)
int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 123566912d7..e2b9e63afa6 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -70,7 +70,7 @@ struct nlmsghdr {
Check NLM_F_EXCL
*/
-#define NLMSG_ALIGNTO 4
+#define NLMSG_ALIGNTO 4U
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index de2c41758e2..4f1279e105e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -887,6 +887,7 @@ struct perf_cpu_context {
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
+ struct pmu *active_pmu;
};
struct perf_output_handle {
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 3ec2358f869..d19f1cca7f7 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -77,7 +77,8 @@ static inline void device_set_run_wake(struct device *dev, bool enable)
static inline bool pm_runtime_suspended(struct device *dev)
{
- return dev->power.runtime_status == RPM_SUSPENDED;
+ return dev->power.runtime_status == RPM_SUSPENDED
+ && !dev->power.disable_depth;
}
static inline void pm_runtime_mark_last_busy(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c79e921a68..223874538b3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
-extern void calc_global_load(void);
+extern void calc_global_load(unsigned long ticks);
extern unsigned long get_parent_ip(unsigned long addr);
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 942e3873690..eba52a10053 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -96,16 +96,21 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
return 0;
}
-extern char * nvram_get(const char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
/* Get the device MAC address */
static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
-#ifdef CONFIG_BCM47XX
- char *res = nvram_get("et0macaddr");
- if (res)
- memcpy(macaddr, res, 6);
-#endif
+ char buf[20];
+ if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0)
+ return;
+ nvram_parse_macaddr(buf, macaddr);
}
+#else
+static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+{
+}
+#endif
extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
struct pci_dev *pdev);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 9a21e8102c4..fc84b7a19ca 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -201,6 +201,8 @@ struct xdr_stream {
__be32 *end; /* end of available buffer space */
struct kvec *iov; /* pointer to the current kvec */
+ struct kvec scratch; /* Scratch buffer */
+ struct page **page_ptr; /* pointer to the current page */
};
/*
@@ -214,7 +216,7 @@ extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index 341dddb5509..2466e550a41 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -33,7 +33,7 @@
*/
-#define TASKSTATS_VERSION 7
+#define TASKSTATS_VERSION 8
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -188,6 +188,7 @@ enum {
TASKSTATS_TYPE_STATS, /* taskstats structure */
TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
+ TASKSTATS_TYPE_NULL, /* contains nothing */
__TASKSTATS_TYPE_MAX,
};
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index 2498bb9fe00..c9a6abd972a 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -3,9 +3,9 @@
#include <linux/kernel.h>
-struct __una_u16 { u16 x __attribute__((packed)); };
-struct __una_u32 { u32 x __attribute__((packed)); };
-struct __una_u64 { u64 x __attribute__((packed)); };
+struct __una_u16 { u16 x; } __attribute__((packed));
+struct __una_u32 { u32 x; } __attribute__((packed));
+struct __una_u64 { u64 x; } __attribute__((packed));
static inline u16 __get_unaligned_cpu16(const void *p)
{
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index 7a9f76ecbbb..ac7ce00f39c 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -161,7 +161,7 @@ extern struct list_head saa7146_devices;
extern struct mutex saa7146_devices_lock;
int saa7146_register_extension(struct saa7146_extension*);
int saa7146_unregister_extension(struct saa7146_extension*);
-struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
+struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 6648036b728..b16f307d471 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -51,6 +51,8 @@ struct v4l2_device {
unsigned int notification, void *arg);
/* The control handler. May be NULL. */
struct v4l2_ctrl_handler *ctrl_handler;
+ /* BKL replacement mutex. Temporary solution only. */
+ struct mutex ioctl_lock;
};
/* Initialize v4l2_dev and make dev->driver_data point to v4l2_dev.
diff --git a/include/media/wm8775.h b/include/media/wm8775.h
index a1c4d417dfa..60739c5a23a 100644
--- a/include/media/wm8775.h
+++ b/include/media/wm8775.h
@@ -32,7 +32,4 @@
#define WM8775_AIN3 4
#define WM8775_AIN4 8
-/* subdev group ID */
-#define WM8775_GID (1 << 0)
-
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 0ac3fb5e097..bb08692a20b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -49,7 +49,6 @@ struct flowi {
__u8 proto;
__u8 flags;
#define FLOWI_FLAG_ANYSRC 0x01
-#define FLOWI_FLAG_MATCH_ANY_IIF 0x02
union {
struct {
__be16 sport;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 278312c95f9..2ab926860cd 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -164,5 +164,15 @@ static inline int ipv6_unicast_destination(struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
+int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+
+static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+
+ return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
+ skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+}
+
#endif
#endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9fdf982d128..365359b2417 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2024,8 +2024,8 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
*
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls
- * to this function and ieee80211_tx_status_irqsafe() may not be mixed
- * for a single hardware.
+ * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
+ * may not be mixed for a single hardware.
*
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
@@ -2034,13 +2034,33 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_ni - transmit status callback (in process context)
+ *
+ * Like ieee80211_tx_status() but can be called in process context.
+ *
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_irqsafe() may not be mixed
+ * for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ local_bh_disable();
+ ieee80211_tx_status(hw, skb);
+ local_bh_enable();
+}
+
+/**
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
*
* Like ieee80211_tx_status() but can be called in IRQ context
* (internally defers to a tasklet.)
*
- * Calls to this function and ieee80211_tx_status() may not be mixed for a
- * single hardware.
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_ni() may not be mixed for a single hardware.
*
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index dd3031aed9d..9fcc680ab6b 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -323,7 +323,9 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
static inline int tcf_valid_offset(const struct sk_buff *skb,
const unsigned char *ptr, const int len)
{
- return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head);
+ return likely((ptr + len) <= skb_tail_pointer(skb) &&
+ ptr >= skb->head &&
+ (ptr <= (ptr + len)));
}
#ifdef CONFIG_NET_CLS_IND
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ea1f8a83160..79f34e2b752 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -610,11 +610,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
{
struct sk_buff *n;
- if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
- !skb_shared(skb))
- n = skb_get(skb);
- else
- n = skb_clone(skb, gfp_mask);
+ n = skb_clone(skb, gfp_mask);
if (n) {
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
diff --git a/include/net/sock.h b/include/net/sock.h
index 659d968d95c..7d3f7ce239b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -754,6 +754,7 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
+ void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
@@ -852,6 +853,8 @@ static inline void __sk_prot_rehash(struct sock *sk)
sk->sk_prot->hash(sk);
}
+void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
+
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)