From 2affffa41d18a48d2b1a793de6c3dfb612ee7797 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 5 Apr 2017 09:24:38 +1000 Subject: sparc64: NG4 memset 32 bits overflow Early in boot Linux patches memset and memcpy to branch to platform optimized versions of these routines. The NG4 (Niagra 4) versions are currently used on all platforms starting from T4. Recently, there were M7 optimized routines added into UEK4 but not into mainline yet. So, even with M7 optimized routines NG4 are still going to be used on T4, T5, M5, and M6 processors. While investigating how to improve initialization time of dentry_hashtable which is 8G long on M6 ldom with 7T of main memory, I noticed that memset() does not reset all the memory in this array, after studying the code, I realized that NG4memset() branches use %icc register instead of %xcc to check compare, so if value of length is over 32-bit long, which is true for 8G array, these routines fail to work properly. The fix is to replace all %icc with %xcc in these routines. (Alternative is to use %ncc, but this is misleading, as the code already has sparcv9 only instructions, and cannot be compiled on 32-bit). This is important to fix this bug, because even older T4-4 can have 2T of memory, and there are large memory proportional data structures in kernel which can be larger than 4G in size. The failing of memset() is silent and corruption is hard to detect. Link: http://lkml.kernel.org/r/1488432825-92126-2-git-send-email-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Reviewed-by: Babu Moger Cc: David Miller Cc: Al Viro Signed-off-by: Andrew Morton --- arch/sparc/lib/NG4memset.S | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S index 7c0c81f18837..c07834f3a6fc 100644 --- a/arch/sparc/lib/NG4memset.S +++ b/arch/sparc/lib/NG4memset.S @@ -13,14 +13,14 @@ .globl NG4memset NG4memset: andcc %o1, 0xff, %o4 - be,pt %icc, 1f + be,pt %xcc, 1f mov %o2, %o1 sllx %o4, 8, %g1 or %g1, %o4, %o2 sllx %o2, 16, %g1 or %g1, %o2, %o2 sllx %o2, 32, %g1 - ba,pt %icc, 1f + ba,pt %xcc, 1f or %g1, %o2, %o4 .size NG4memset,.-NG4memset @@ -29,7 +29,7 @@ NG4memset: NG4bzero: clr %o4 1: cmp %o1, 16 - ble %icc, .Ltiny + ble %xcc, .Ltiny mov %o0, %o3 sub %g0, %o0, %g1 and %g1, 0x7, %g1 @@ -37,7 +37,7 @@ NG4bzero: sub %o1, %g1, %o1 1: stb %o4, [%o0 + 0x00] subcc %g1, 1, %g1 - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 1, %o0 .Laligned8: cmp %o1, 64 + (64 - 8) @@ -48,7 +48,7 @@ NG4bzero: sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 8, %g1 - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 0x8, %o0 .Laligned64: andn %o1, 64 - 1, %g1 @@ -58,30 +58,30 @@ NG4bzero: 1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P subcc %g1, 0x40, %g1 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 0x40, %o0 .Lpostloop: cmp %o1, 8 - bl,pn %icc, .Ltiny + bl,pn %xcc, .Ltiny membar #StoreStore|#StoreLoad .Lmedium: andn %o1, 0x7, %g1 sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 0x8, %g1 - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 0x08, %o0 andcc %o1, 0x4, %g1 - be,pt %icc, .Ltiny + be,pt %xcc, .Ltiny sub %o1, %g1, %o1 stw %o4, [%o0 + 0x00] add %o0, 0x4, %o0 .Ltiny: cmp %o1, 0 - be,pn %icc, .Lexit + be,pn %xcc, .Lexit 1: subcc %o1, 1, %o1 stb %o4, [%o0 + 0x00] - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 1, %o0 .Lexit: retl @@ -99,8 +99,8 @@ NG4bzero: stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P - bne,pt %icc, 1b + bne,pt %xcc, 1b add %o0, 0x30, %o0 - ba,a,pt %icc, .Lpostloop + ba,a,pt %xcc, .Lpostloop nop .size NG4bzero,.-NG4bzero -- cgit v1.2.3 From 62cffd1d9954259e09f8ae0b9866fbbf01023949 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 5 Apr 2017 09:24:38 +1000 Subject: mm: zero hash tables in allocator Add a new flag HASH_ZERO which when provided grantees that the hash table that is returned by alloc_large_system_hash() is zeroed. In most cases that is what is needed by the caller. Use page level allocator's __GFP_ZERO flags to zero the memory. It is using memset() which is efficient method to zero memory and is optimized for most platforms. Link: http://lkml.kernel.org/r/1488432825-92126-3-git-send-email-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Reviewed-by: Babu Moger Cc: David Miller Cc: Al Viro Signed-off-by: Andrew Morton --- include/linux/bootmem.h | 1 + mm/page_alloc.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 962164d36506..e223d91b6439 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -358,6 +358,7 @@ extern void *alloc_large_system_hash(const char *tablename, #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min * shift passed via *_hash_shift */ +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5a494c8b8ea0..043e59551849 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7195,6 +7195,7 @@ void *__init alloc_large_system_hash(const char *tablename, unsigned long long max = high_limit; unsigned long log2qty, size; void *table = NULL; + gfp_t gfp_flags; /* allow the kernel cmdline to have a say */ if (!numentries) { @@ -7239,12 +7240,17 @@ void *__init alloc_large_system_hash(const char *tablename, log2qty = ilog2(numentries); + /* + * memblock allocator returns zeroed memory already, so HASH_ZERO is + * currently not used when HASH_EARLY is specified. + */ + gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; do { size = bucketsize << log2qty; if (flags & HASH_EARLY) table = memblock_virt_alloc_nopanic(size, 0); else if (hashdist) - table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); + table = __vmalloc(size, gfp_flags, PAGE_KERNEL); else { /* * If bucketsize is not a power-of-two, we may free @@ -7252,8 +7258,8 @@ void *__init alloc_large_system_hash(const char *tablename, * alloc_pages_exact() automatically does */ if (get_order(size) < MAX_ORDER) { - table = alloc_pages_exact(size, GFP_ATOMIC); - kmemleak_alloc(table, size, 1, GFP_ATOMIC); + table = alloc_pages_exact(size, gfp_flags); + kmemleak_alloc(table, size, 1, gfp_flags); } } } while (!table && size > PAGE_SIZE && --log2qty); -- cgit v1.2.3 From e10cd2c9bc13dd63a0617c58be833352cbf4cb49 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 5 Apr 2017 09:24:38 +1000 Subject: mm: update callers to use HASH_ZERO flag Update dcache, inode, pid, mountpoint, and mount hash tables to use HASH_ZERO, and remove initialization after allocations. In case of places where HASH_EARLY was used such as in __pv_init_lock_hash the zeroed hash table was already assumed, because memblock zeroes the memory. CPU: SPARC M6, Memory: 7T Before fix: Dentry cache hash table entries: 1073741824 Inode-cache hash table entries: 536870912 Mount-cache hash table entries: 16777216 Mountpoint-cache hash table entries: 16777216 ftrace: allocating 20414 entries in 40 pages Total time: 11.798s After fix: Dentry cache hash table entries: 1073741824 Inode-cache hash table entries: 536870912 Mount-cache hash table entries: 16777216 Mountpoint-cache hash table entries: 16777216 ftrace: allocating 20414 entries in 40 pages Total time: 3.198s CPU: Intel Xeon E5-2630, Memory: 2.2T: Before fix: Dentry cache hash table entries: 536870912 Inode-cache hash table entries: 268435456 Mount-cache hash table entries: 8388608 Mountpoint-cache hash table entries: 8388608 CPU: Physical Processor ID: 0 Total time: 3.245s After fix: Dentry cache hash table entries: 536870912 Inode-cache hash table entries: 268435456 Mount-cache hash table entries: 8388608 Mountpoint-cache hash table entries: 8388608 CPU: Physical Processor ID: 0 Total time: 3.244s Link: http://lkml.kernel.org/r/1488432825-92126-4-git-send-email-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Reviewed-by: Babu Moger Cc: David Miller Cc: Al Viro Signed-off-by: Andrew Morton --- fs/dcache.c | 18 ++++-------------- fs/inode.c | 14 ++------------ fs/namespace.c | 10 ++-------- kernel/locking/qspinlock_paravirt.h | 3 ++- kernel/pid.c | 7 ++----- 5 files changed, 12 insertions(+), 40 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 95d71eda8142..363502faa328 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3548,8 +3548,6 @@ __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { - unsigned int loop; - /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ @@ -3561,24 +3559,19 @@ static void __init dcache_init_early(void) sizeof(struct hlist_bl_head), dhash_entries, 13, - HASH_EARLY, + HASH_EARLY | HASH_ZERO, &d_hash_shift, &d_hash_mask, 0, 0); - - for (loop = 0; loop < (1U << d_hash_shift); loop++) - INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } static void __init dcache_init(void) { - unsigned int loop; - - /* + /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature - * of the dcache. + * of the dcache. */ dentry_cache = KMEM_CACHE(dentry, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT); @@ -3592,14 +3585,11 @@ static void __init dcache_init(void) sizeof(struct hlist_bl_head), dhash_entries, 13, - 0, + HASH_ZERO, &d_hash_shift, &d_hash_mask, 0, 0); - - for (loop = 0; loop < (1U << d_hash_shift); loop++) - INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } /* SLAB cache for __getname() consumers */ diff --git a/fs/inode.c b/fs/inode.c index 88110fd0b282..1b15a7cc78ce 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1916,8 +1916,6 @@ __setup("ihash_entries=", set_ihash_entries); */ void __init inode_init_early(void) { - unsigned int loop; - /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ @@ -1929,20 +1927,15 @@ void __init inode_init_early(void) sizeof(struct hlist_head), ihash_entries, 14, - HASH_EARLY, + HASH_EARLY | HASH_ZERO, &i_hash_shift, &i_hash_mask, 0, 0); - - for (loop = 0; loop < (1U << i_hash_shift); loop++) - INIT_HLIST_HEAD(&inode_hashtable[loop]); } void __init inode_init(void) { - unsigned int loop; - /* inode slab cache */ inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), @@ -1960,14 +1953,11 @@ void __init inode_init(void) sizeof(struct hlist_head), ihash_entries, 14, - 0, + HASH_ZERO, &i_hash_shift, &i_hash_mask, 0, 0); - - for (loop = 0; loop < (1U << i_hash_shift); loop++) - INIT_HLIST_HEAD(&inode_hashtable[loop]); } void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) diff --git a/fs/namespace.c b/fs/namespace.c index cc1375eff88c..c8d4324857bd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3241,7 +3241,6 @@ static void __init init_mount_tree(void) void __init mnt_init(void) { - unsigned u; int err; mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), @@ -3250,22 +3249,17 @@ void __init mnt_init(void) mount_hashtable = alloc_large_system_hash("Mount-cache", sizeof(struct hlist_head), mhash_entries, 19, - 0, + HASH_ZERO, &m_hash_shift, &m_hash_mask, 0, 0); mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", sizeof(struct hlist_head), mphash_entries, 19, - 0, + HASH_ZERO, &mp_hash_shift, &mp_hash_mask, 0, 0); if (!mount_hashtable || !mountpoint_hashtable) panic("Failed to allocate mount hash table\n"); - for (u = 0; u <= m_hash_mask; u++) - INIT_HLIST_HEAD(&mount_hashtable[u]); - for (u = 0; u <= mp_hash_mask; u++) - INIT_HLIST_HEAD(&mountpoint_hashtable[u]); - kernfs_init(); err = sysfs_init(); diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index e6b2f7ad3e51..4ccfcaae5b89 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -193,7 +193,8 @@ void __init __pv_init_lock_hash(void) */ pv_lock_hash = alloc_large_system_hash("PV qspinlock", sizeof(struct pv_hash_entry), - pv_hash_size, 0, HASH_EARLY, + pv_hash_size, 0, + HASH_EARLY | HASH_ZERO, &pv_lock_hash_bits, NULL, pv_hash_size, pv_hash_size); } diff --git a/kernel/pid.c b/kernel/pid.c index 0143ac0ddceb..a1f8459c70ac 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -573,16 +573,13 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) */ void __init pidhash_init(void) { - unsigned int i, pidhash_size; + unsigned int pidhash_size; pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, - HASH_EARLY | HASH_SMALL, + HASH_EARLY | HASH_SMALL | HASH_ZERO, &pidhash_shift, NULL, 0, 4096); pidhash_size = 1U << pidhash_shift; - - for (i = 0; i < pidhash_size; i++) - INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) -- cgit v1.2.3 From 1b982fb8ccb8bfcab5237cc822d8fe8579e8c843 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 5 Apr 2017 09:24:39 +1000 Subject: mm: adaptive hash table scaling Allow hash tables to scale with memory but at slower pace, when HASH_ADAPT is provided every time memory quadruples the sizes of hash tables will only double instead of quadrupling as well. This algorithm starts working only when memory size reaches a certain point, currently set to 64G. This is example of dentry hash table size, before and after four various memory configurations: MEMORY SCALE HASH_SIZE old new old new 8G 13 13 8M 8M 16G 13 13 16M 16M 32G 13 13 32M 32M 64G 13 13 64M 64M 128G 13 14 128M 64M 256G 13 14 256M 128M 512G 13 15 512M 128M 1024G 13 15 1024M 256M 2048G 13 16 2048M 256M 4096G 13 16 4096M 512M 8192G 13 17 8192M 512M 16384G 13 17 16384M 1024M 32768G 13 18 32768M 1024M 65536G 13 18 65536M 2048M Link: http://lkml.kernel.org/r/1488432825-92126-5-git-send-email-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Cc: David Miller Cc: Al Viro Cc: Babu Moger Signed-off-by: Andrew Morton --- fs/dcache.c | 2 +- fs/inode.c | 2 +- include/linux/bootmem.h | 1 + mm/page_alloc.c | 19 +++++++++++++++++++ 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 363502faa328..808ea99062c2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3585,7 +3585,7 @@ static void __init dcache_init(void) sizeof(struct hlist_bl_head), dhash_entries, 13, - HASH_ZERO, + HASH_ZERO | HASH_ADAPT, &d_hash_shift, &d_hash_mask, 0, diff --git a/fs/inode.c b/fs/inode.c index 1b15a7cc78ce..32c8ee454ef0 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1953,7 +1953,7 @@ void __init inode_init(void) sizeof(struct hlist_head), ihash_entries, 14, - HASH_ZERO, + HASH_ZERO | HASH_ADAPT, &i_hash_shift, &i_hash_mask, 0, diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index e223d91b6439..dbaf312b3317 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -359,6 +359,7 @@ extern void *alloc_large_system_hash(const char *tablename, #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min * shift passed via *_hash_shift */ #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ +#define HASH_ADAPT 0x00000008 /* Adaptive scale for large memory */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 043e59551849..28cfeb7d553d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7176,6 +7176,17 @@ static unsigned long __init arch_reserved_kernel_pages(void) } #endif +/* + * Adaptive scale is meant to reduce sizes of hash tables on large memory + * machines. As memory size is increased the scale is also increased but at + * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory + * quadruples the scale is increased by one, which means the size of hash table + * only doubles, instead of quadrupling as well. + */ +#define ADAPT_SCALE_BASE (64ul << 30) +#define ADAPT_SCALE_SHIFT 2 +#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) + /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 @@ -7207,6 +7218,14 @@ void *__init alloc_large_system_hash(const char *tablename, if (PAGE_SHIFT < 20) numentries = round_up(numentries, (1<<20)/PAGE_SIZE); + if (flags & HASH_ADAPT) { + unsigned long adapt; + + for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; + adapt <<= ADAPT_SCALE_SHIFT) + scale++; + } + /* limit to 1 bucket per 2^scale bytes of low memory */ if (scale > PAGE_SHIFT) numentries >>= (scale - PAGE_SHIFT); -- cgit v1.2.3 From d32b2ce9a5552f49142fb0b7ece256b04f233aa1 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:39 +1000 Subject: mm: introduce kv[mz]alloc helpers Patch series "kvmalloc", v5. There are many open coded kmalloc with vmalloc fallback instances in the tree. Most of them are not careful enough or simply do not care about the underlying semantic of the kmalloc/page allocator which means that a) some vmalloc fallbacks are basically unreachable because the kmalloc part will keep retrying until it succeeds b) the page allocator can invoke a really disruptive steps like the OOM killer to move forward which doesn't sound appropriate when we consider that the vmalloc fallback is available. As it can be seen implementing kvmalloc requires quite an intimate knowledge if the page allocator and the memory reclaim internals which strongly suggests that a helper should be implemented in the memory subsystem proper. Most callers, I could find, have been converted to use the helper instead. This is patch 6. There are some more relying on __GFP_REPEAT in the networking stack which I have converted as well and Eric Dumazet was not opposed [2] to convert them as well. [1] http://lkml.kernel.org/r/20170130094940.13546-1-mhocko@kernel.org [2] http://lkml.kernel.org/r/1485273626.16328.301.camel@edumazet-glaptop3.roam.corp.google.com This patch (of 9): Using kmalloc with the vmalloc fallback for larger allocations is a common pattern in the kernel code. Yet we do not have any common helper for that and so users have invented their own helpers. Some of them are really creative when doing so. Let's just add kv[mz]alloc and make sure it is implemented properly. This implementation makes sure to not make a large memory pressure for > PAGE_SZE requests (__GFP_NORETRY) and also to not warn about allocation failures. This also rules out the OOM killer as the vmalloc is a more approapriate fallback than a disruptive user visible action. This patch also changes some existing users and removes helpers which are specific for them. In some cases this is not possible (e.g. ext4_kvmalloc, libcfs_kvzalloc) because those seems to be broken and require GFP_NO{FS,IO} context which is not vmalloc compatible in general (note that the page table allocation is GFP_KERNEL). Those need to be fixed separately. While we are at it, document that __vmalloc{_node} about unsupported gfp mask because there seems to be a lot of confusion out there. kvmalloc_node will warn about GFP_KERNEL incompatible (which are not superset) flags to catch new abusers. Existing ones would have to die slowly. Link: http://lkml.kernel.org/r/20170306103032.2540-2-mhocko@kernel.org Signed-off-by: Michal Hocko Reviewed-by: Andreas Dilger [ext4 part] Acked-by: Vlastimil Babka Cc: John Hubbard Cc: David Miller Signed-off-by: Andrew Morton --- arch/x86/kvm/lapic.c | 4 ++-- arch/x86/kvm/page_track.c | 4 ++-- arch/x86/kvm/x86.c | 4 ++-- drivers/md/dm-stats.c | 7 +----- fs/ext4/mballoc.c | 2 +- fs/ext4/super.c | 4 ++-- fs/f2fs/f2fs.h | 20 ----------------- fs/f2fs/file.c | 4 ++-- fs/f2fs/node.c | 4 ++-- fs/f2fs/segment.c | 14 ++++++------ fs/seq_file.c | 16 +------------- include/linux/kvm_host.h | 2 -- include/linux/mm.h | 14 ++++++++++++ include/linux/vmalloc.h | 1 + ipc/util.c | 7 +----- mm/nommu.c | 5 +++++ mm/util.c | 45 +++++++++++++++++++++++++++++++++++++++ mm/vmalloc.c | 9 +++++++- security/apparmor/apparmorfs.c | 2 +- security/apparmor/include/lib.h | 11 ---------- security/apparmor/lib.c | 30 -------------------------- security/apparmor/match.c | 2 +- security/apparmor/policy_unpack.c | 2 +- virt/kvm/kvm_main.c | 18 +++------------- 24 files changed, 102 insertions(+), 129 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bad6a25067bc..d2a892fc92bf 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -177,8 +177,8 @@ static void recalculate_apic_map(struct kvm *kvm) if (kvm_apic_present(vcpu)) max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); - new = kvm_kvzalloc(sizeof(struct kvm_apic_map) + - sizeof(struct kvm_lapic *) * ((u64)max_id + 1)); + new = kvzalloc(sizeof(struct kvm_apic_map) + + sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL); if (!new) goto out; diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 60168cdd0546..ea67dc876316 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -40,8 +40,8 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, int i; for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - slot->arch.gfn_track[i] = kvm_kvzalloc(npages * - sizeof(*slot->arch.gfn_track[i])); + slot->arch.gfn_track[i] = kvzalloc(npages * + sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL); if (!slot->arch.gfn_track[i]) goto track_free; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ccbd45ecd41a..ee22226e3807 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8199,13 +8199,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, slot->base_gfn, level) + 1; slot->arch.rmap[i] = - kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); + kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; - linfo = kvm_kvzalloc(lpages * sizeof(*linfo)); + linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL); if (!linfo) goto out_free; diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 0250e7e521ab..6028d8247f58 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -146,12 +146,7 @@ static void *dm_kvzalloc(size_t alloc_size, int node) if (!claim_shared_memory(alloc_size)) return NULL; - if (alloc_size <= KMALLOC_MAX_SIZE) { - p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node); - if (p) - return p; - } - p = vzalloc_node(alloc_size, node); + p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node); if (p) return p; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 354dc1a894c2..b60698c104fd 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2393,7 +2393,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) return 0; size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); - new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); + new_groupinfo = kvzalloc(size, GFP_KERNEL); if (!new_groupinfo) { ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); return -ENOMEM; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a9448db1cf7e..73cae0cc34ca 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2132,7 +2132,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) return 0; size = roundup_pow_of_two(size * sizeof(struct flex_groups)); - new_groups = ext4_kvzalloc(size, GFP_KERNEL); + new_groups = kvzalloc(size, GFP_KERNEL); if (!new_groups) { ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", size / (int) sizeof(struct flex_groups)); @@ -3866,7 +3866,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } } - sbi->s_group_desc = ext4_kvmalloc(db_count * + sbi->s_group_desc = kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index fd39db681226..e6b2ee21fd96 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2014,26 +2014,6 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, return kmalloc(size, flags); } -static inline void *f2fs_kvmalloc(size_t size, gfp_t flags) -{ - void *ret; - - ret = kmalloc(size, flags | __GFP_NOWARN); - if (!ret) - ret = __vmalloc(size, flags, PAGE_KERNEL); - return ret; -} - -static inline void *f2fs_kvzalloc(size_t size, gfp_t flags) -{ - void *ret; - - ret = kzalloc(size, flags | __GFP_NOWARN); - if (!ret) - ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); - return ret; -} - #define get_inode_mode(i) \ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f3be240ef129..45a216018a25 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1011,11 +1011,11 @@ static int __exchange_data_block(struct inode *src_inode, while (len) { olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); - src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL); + src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL); if (!src_blkaddr) return -ENOMEM; - do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL); + do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL); if (!do_replace) { kvfree(src_blkaddr); return -ENOMEM; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 6e87178d34a2..84a9ea4c3e2d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2641,12 +2641,12 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); - nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks * + nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks * NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; - nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8, + nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8, GFP_KERNEL); if (!nm_i->nat_block_bitmap) return -ENOMEM; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index bff3f3bc7827..010324c8e6c6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2606,13 +2606,13 @@ static int build_sit_info(struct f2fs_sb_info *sbi) SM_I(sbi)->sit_info = sit_i; - sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * + sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry), GFP_KERNEL); if (!sit_i->sentries) return -ENOMEM; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); - sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); + sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL); if (!sit_i->dirty_sentries_bitmap) return -ENOMEM; @@ -2645,7 +2645,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi) return -ENOMEM; if (sbi->segs_per_sec > 1) { - sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * + sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) * sizeof(struct sec_entry), GFP_KERNEL); if (!sit_i->sec_entries) return -ENOMEM; @@ -2696,12 +2696,12 @@ static int build_free_segmap(struct f2fs_sb_info *sbi) SM_I(sbi)->free_info = free_i; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); - free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); + free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL); if (!free_i->free_segmap) return -ENOMEM; sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); - free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); + free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL); if (!free_i->free_secmap) return -ENOMEM; @@ -2869,7 +2869,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi) struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); - dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); + dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->victim_secmap) return -ENOMEM; return 0; @@ -2891,7 +2891,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); for (i = 0; i < NR_DIRTY_TYPE; i++) { - dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); + dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->dirty_segmap[i]) return -ENOMEM; } diff --git a/fs/seq_file.c b/fs/seq_file.c index ca69fb99e41a..dc7c2be963ed 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -25,21 +25,7 @@ static void seq_set_overflow(struct seq_file *m) static void *seq_buf_alloc(unsigned long size) { - void *buf; - gfp_t gfp = GFP_KERNEL; - - /* - * For high order allocations, use __GFP_NORETRY to avoid oom-killing - - * it's better to fall back to vmalloc() than to kill things. For small - * allocations, just use GFP_KERNEL which will oom kill, thus no need - * for vmalloc fallback. - */ - if (size > PAGE_SIZE) - gfp |= __GFP_NORETRY | __GFP_NOWARN; - buf = kmalloc(size, gfp); - if (!buf && size > PAGE_SIZE) - buf = vmalloc(size); - return buf; + return kvmalloc(size, GFP_KERNEL); } /** diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d0250744507a..5d9b2a08e553 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -767,8 +767,6 @@ void kvm_arch_check_processor_compat(void *rtn); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); -void *kvm_kvzalloc(unsigned long size); - #ifndef __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { diff --git a/include/linux/mm.h b/include/linux/mm.h index cbdbe0be3127..5d36be8f4620 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -518,6 +518,20 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif +extern void *kvmalloc_node(size_t size, gfp_t flags, int node); +static inline void *kvmalloc(size_t size, gfp_t flags) +{ + return kvmalloc_node(size, flags, NUMA_NO_NODE); +} +static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) +{ + return kvmalloc_node(size, flags | __GFP_ZERO, node); +} +static inline void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | __GFP_ZERO); +} + extern void kvfree(const void *addr); static inline atomic_t *compound_mapcount_ptr(struct page *page) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index d68edffbf142..46991ad3ddd5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -80,6 +80,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller); +extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/ipc/util.c b/ipc/util.c index 3459a16a9df9..caec7b1bfaa3 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -403,12 +403,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) */ void *ipc_alloc(int size) { - void *out; - if (size > PAGE_SIZE) - out = vmalloc(size); - else - out = kmalloc(size, GFP_KERNEL); - return out; + return kvmalloc(size, GFP_KERNEL); } /** diff --git a/mm/nommu.c b/mm/nommu.c index 2d131b97a851..a80411d258fc 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -237,6 +237,11 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) } EXPORT_SYMBOL(__vmalloc); +void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) +{ + return __vmalloc(size, flags, PAGE_KERNEL); +} + void *vmalloc_user(unsigned long size) { void *ret; diff --git a/mm/util.c b/mm/util.c index 656dc5e37a87..10a14a0ac3c2 100644 --- a/mm/util.c +++ b/mm/util.c @@ -329,6 +329,51 @@ unsigned long vm_mmap(struct file *file, unsigned long addr, } EXPORT_SYMBOL(vm_mmap); +/** + * kvmalloc_node - attempt to allocate physically contiguous memory, but upon + * failure, fall back to non-contiguous (vmalloc) allocation. + * @size: size of the request. + * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. + * @node: numa node to allocate from + * + * Uses kmalloc to get the memory but if the allocation fails then falls back + * to the vmalloc allocator. Use kvfree for freeing the memory. + * + * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported + * + * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. + */ +void *kvmalloc_node(size_t size, gfp_t flags, int node) +{ + gfp_t kmalloc_flags = flags; + void *ret; + + /* + * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) + * so the given set of flags has to be compatible. + */ + WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); + + /* + * Make sure that larger requests are not too disruptive - no OOM + * killer and no allocation failure warnings as we have a fallback + */ + if (size > PAGE_SIZE) + kmalloc_flags |= __GFP_NORETRY | __GFP_NOWARN; + + ret = kmalloc_node(size, kmalloc_flags, node); + + /* + * It doesn't really make sense to fallback to vmalloc for sub page + * requests + */ + if (ret || size <= PAGE_SIZE) + return ret; + + return __vmalloc_node_flags(size, node, flags | __GFP_HIGHMEM); +} +EXPORT_SYMBOL(kvmalloc_node); + void kvfree(const void *addr) { if (is_vmalloc_addr(addr)) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b52aeed3f58e..33603239560e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1786,6 +1786,13 @@ fail: * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. + * + * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT + * and __GFP_NOFAIL are not supported + * + * Any use of gfp flags outside of GFP_KERNEL should be consulted + * with mm people. + * */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, @@ -1802,7 +1809,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) } EXPORT_SYMBOL(__vmalloc); -static inline void *__vmalloc_node_flags(unsigned long size, +void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) { return __vmalloc_node(size, 1, flags, PAGE_KERNEL, diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 41073f70eb41..be0b49897a67 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -98,7 +98,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf, return ERR_PTR(-ESPIPE); /* freed by caller to simple_write_to_buffer */ - data = kvmalloc(sizeof(*data) + alloc_size); + data = kvmalloc(sizeof(*data) + alloc_size, GFP_KERNEL); if (data == NULL) return ERR_PTR(-ENOMEM); kref_init(&data->count); diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h index 65ff492a9807..75733baa6702 100644 --- a/security/apparmor/include/lib.h +++ b/security/apparmor/include/lib.h @@ -64,17 +64,6 @@ char *aa_split_fqname(char *args, char **ns_name); const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, size_t *ns_len); void aa_info_message(const char *str); -void *__aa_kvmalloc(size_t size, gfp_t flags); - -static inline void *kvmalloc(size_t size) -{ - return __aa_kvmalloc(size, 0); -} - -static inline void *kvzalloc(size_t size) -{ - return __aa_kvmalloc(size, __GFP_ZERO); -} /** * aa_strneq - compare null terminated @str to a non null terminated substring diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 66475bda6f72..1a13494bc7c7 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -128,36 +128,6 @@ void aa_info_message(const char *str) printk(KERN_INFO "AppArmor: %s\n", str); } -/** - * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc - * @size: how many bytes of memory are required - * @flags: the type of memory to allocate (see kmalloc). - * - * Return: allocated buffer or NULL if failed - * - * It is possible that policy being loaded from the user is larger than - * what can be allocated by kmalloc, in those cases fall back to vmalloc. - */ -void *__aa_kvmalloc(size_t size, gfp_t flags) -{ - void *buffer = NULL; - - if (size == 0) - return NULL; - - /* do not attempt kmalloc if we need more than 16 pages at once */ - if (size <= (16*PAGE_SIZE)) - buffer = kmalloc(size, flags | GFP_KERNEL | __GFP_NORETRY | - __GFP_NOWARN); - if (!buffer) { - if (flags & __GFP_ZERO) - buffer = vzalloc(size); - else - buffer = vmalloc(size); - } - return buffer; -} - /** * aa_policy_init - initialize a policy structure * @policy: policy to initialize (NOT NULL) diff --git a/security/apparmor/match.c b/security/apparmor/match.c index eb0efef746f5..960c913381e2 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -88,7 +88,7 @@ static struct table_header *unpack_table(char *blob, size_t bsize) if (bsize < tsize) goto out; - table = kvzalloc(tsize); + table = kvzalloc(tsize, GFP_KERNEL); if (table) { table->td_id = th.td_id; table->td_flags = th.td_flags; diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 2e37c9c26bbd..f3422a91353c 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -487,7 +487,7 @@ fail: static void *kvmemdup(const void *src, size_t len) { - void *p = kvmalloc(len); + void *p = kvmalloc(len, GFP_KERNEL); if (p) memcpy(p, src, len); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 88257b311cb5..aca22d36be9c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -504,7 +504,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void) int i; struct kvm_memslots *slots; - slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); + slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) return NULL; @@ -689,18 +689,6 @@ out_err_no_disable: return ERR_PTR(r); } -/* - * Avoid using vmalloc for a small buffer. - * Should not be used when the size is statically known. - */ -void *kvm_kvzalloc(unsigned long size) -{ - if (size > PAGE_SIZE) - return vzalloc(size); - else - return kzalloc(size, GFP_KERNEL); -} - static void kvm_destroy_devices(struct kvm *kvm) { struct kvm_device *dev, *tmp; @@ -782,7 +770,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) { unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); - memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); + memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL); if (!memslot->dirty_bitmap) return -ENOMEM; @@ -1008,7 +996,7 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); + slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); -- cgit v1.2.3 From 4a58e3f0edcf0cb19e5781f2477ef1ca809e50bd Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 5 Apr 2017 09:24:39 +1000 Subject: mm: introduce kv[mz]alloc helpers - f2fs fix up f2fs fixup Link: http://lkml.kernel.org/r/20170320163735.332e64b7@canb.auug.org.au Signed-off-by: Stephen Rothwell Cc: Michal Hocko Cc: Jaegeuk Kim Signed-off-by: Andrew Morton --- fs/f2fs/node.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 84a9ea4c3e2d..9886c2ff788f 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2651,7 +2651,7 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) if (!nm_i->nat_block_bitmap) return -ENOMEM; - nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * + nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks * sizeof(unsigned short), GFP_KERNEL); if (!nm_i->free_nid_count) return -ENOMEM; -- cgit v1.2.3 From ce1080cb115e52141803d006aea148d66c357696 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:40 +1000 Subject: mm: support __GFP_REPEAT in kvmalloc_node for >32kB vhost code uses __GFP_REPEAT when allocating vhost_virtqueue resp. vhost_vsock because it would really like to prefer kmalloc to the vmalloc fallback - see 23cc5a991c7a ("vhost-net: extend device allocation to vmalloc") for more context. Michael Tsirkin has also noted: "__GFP_REPEAT overhead is during allocation time. Using vmalloc means all accesses are slowed down. Allocation is not on data path, accesses are." The similar applies to other vhost_kvzalloc users. Let's teach kvmalloc_node to handle __GFP_REPEAT properly. There are two things to be careful about. First we should prevent from the OOM killer and so have to involve __GFP_NORETRY by default and secondly override __GFP_REPEAT for !costly order requests as the __GFP_REPEAT is ignored for !costly orders. Supporting __GFP_REPEAT like semantic for !costly request is possible it would require changes in the page allocator. This is out of scope of this patch. This patch shouldn't introduce any functional change. Link: http://lkml.kernel.org/r/20170306103032.2540-3-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Acked-by: Michael S. Tsirkin Cc: David Miller Signed-off-by: Andrew Morton --- drivers/vhost/net.c | 9 +++------ drivers/vhost/vhost.c | 15 +++------------ drivers/vhost/vsock.c | 9 +++------ mm/util.c | 18 +++++++++++++++--- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9b519897cc17..f61f852d6cfd 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -817,12 +817,9 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct vhost_virtqueue **vqs; int i; - n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!n) { - n = vmalloc(sizeof *n); - if (!n) - return -ENOMEM; - } + n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT); + if (!n) + return -ENOMEM; vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f0ba362d4c10..042030e5a035 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -534,18 +534,9 @@ err_mm: } EXPORT_SYMBOL_GPL(vhost_dev_set_owner); -static void *vhost_kvzalloc(unsigned long size) -{ - void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - - if (!n) - n = vzalloc(size); - return n; -} - struct vhost_umem *vhost_dev_reset_owner_prepare(void) { - return vhost_kvzalloc(sizeof(struct vhost_umem)); + return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); @@ -1276,7 +1267,7 @@ EXPORT_SYMBOL_GPL(vhost_vq_access_ok); static struct vhost_umem *vhost_umem_alloc(void) { - struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem)); + struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL); if (!umem) return NULL; @@ -1302,7 +1293,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EOPNOTSUPP; if (mem.nregions > max_mem_regions) return -E2BIG; - newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); + newmem = kvzalloc(size + mem.nregions * sizeof(*m->regions), GFP_KERNEL); if (!newmem) return -ENOMEM; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 44eed8eb0725..d3bd0e707b3c 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -500,12 +500,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) /* This struct is large and allocation could fail, fall back to vmalloc * if there is no other way. */ - vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!vsock) { - vsock = vmalloc(sizeof(*vsock)); - if (!vsock) - return -ENOMEM; - } + vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT); + if (!vsock) + return -ENOMEM; vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); if (!vqs) { diff --git a/mm/util.c b/mm/util.c index 10a14a0ac3c2..f4e590b2c0da 100644 --- a/mm/util.c +++ b/mm/util.c @@ -339,7 +339,9 @@ EXPORT_SYMBOL(vm_mmap); * Uses kmalloc to get the memory but if the allocation fails then falls back * to the vmalloc allocator. Use kvfree for freeing the memory. * - * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported + * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT + * is supported only for large (>32kB) allocations, and it should be used only if + * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. * * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. */ @@ -358,8 +360,18 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) * Make sure that larger requests are not too disruptive - no OOM * killer and no allocation failure warnings as we have a fallback */ - if (size > PAGE_SIZE) - kmalloc_flags |= __GFP_NORETRY | __GFP_NOWARN; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + + /* + * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly + * requests because there is no other way to tell the allocator + * that we want to fail rather than retry endlessly. + */ + if (!(kmalloc_flags & __GFP_REPEAT) || + (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } ret = kmalloc_node(size, kmalloc_flags, node); -- cgit v1.2.3 From 197efea4defbd0fb43a19b65dfaf8b74e501188a Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:40 +1000 Subject: lib/rhashtable.c: simplify a strange allocation pattern alloc_bucket_locks allocation pattern is quite unusual. We are preferring vmalloc when CONFIG_NUMA is enabled. The rationale is that vmalloc will respect the memory policy of the current process and so the backing memory will get distributed over multiple nodes if the requester is configured properly. At least that is the intention, in reality rhastable is shrunk and expanded from a kernel worker so no mempolicy can be assumed. Let's just simplify the code and use kvmalloc helper, which is a transparent way to use kmalloc with vmalloc fallback, if the caller is allowed to block and use the flag otherwise. Link: http://lkml.kernel.org/r/20170306103032.2540-4-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Tom Herbert Cc: Eric Dumazet Signed-off-by: Andrew Morton --- lib/rhashtable.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f8635fd57442..2c2c8afcde15 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -86,16 +86,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, size = min(size, 1U << tbl->nest); if (sizeof(spinlock_t) != 0) { - tbl->locks = NULL; -#ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE && - gfp == GFP_KERNEL) - tbl->locks = vmalloc(size * sizeof(spinlock_t)); -#endif - if (gfp != GFP_KERNEL) - gfp |= __GFP_NOWARN | __GFP_NORETRY; - - if (!tbl->locks) + if (gfpflags_allow_blocking(gfp)) + tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp); + else tbl->locks = kmalloc_array(size, sizeof(spinlock_t), gfp); if (!tbl->locks) -- cgit v1.2.3 From 80157d1a61eaad4c41dc7d2726c4dfd0712acab8 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:40 +1000 Subject: net/ipv6/ila/ila_xlat.c: simplify a strange allocation pattern alloc_ila_locks seemed to c&p from alloc_bucket_locks allocation pattern which is quite unusual. The default allocation size is 320 * sizeof(spinlock_t) which is sub page unless lockdep is enabled when the performance benefit is really questionable and not worth the subtle code IMHO. Also note that the context when we call ila_init_net (modprobe or a task creating a net namespace) has to be properly configured. Let's just simplify the code and use kvmalloc helper which is a transparent way to use kmalloc with vmalloc fallback. Link: http://lkml.kernel.org/r/20170306103032.2540-5-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Tom Herbert Cc: Eric Dumazet Cc: David Miller Signed-off-by: Andrew Morton --- net/ipv6/ila/ila_xlat.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index af8f52ee7180..2fd5ca151dcf 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -41,13 +41,7 @@ static int alloc_ila_locks(struct ila_net *ilan) size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU); if (sizeof(spinlock_t) != 0) { -#ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE) - ilan->locks = vmalloc(size * sizeof(spinlock_t)); - else -#endif - ilan->locks = kmalloc_array(size, sizeof(spinlock_t), - GFP_KERNEL); + ilan->locks = kvmalloc(size * sizeof(spinlock_t), GFP_KERNEL); if (!ilan->locks) return -ENOMEM; for (i = 0; i < size; i++) -- cgit v1.2.3 From 6ccd7f4a71d179c957c5a0e14cfb80fdeaed5a86 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:41 +1000 Subject: fs/xattr.c: zero out memory copied to userspace in getxattr getxattr uses vmalloc to allocate memory if kzalloc fails. This is filled by vfs_getxattr and then copied to the userspace. vmalloc, however, doesn't zero out the memory so if the specific implementation of the xattr handler is sloppy we can theoretically expose a kernel memory. There is no real sign this is really the case but let's make sure this will not happen and use vzalloc instead. Fixes: 779302e67835 ("fs/xattr.c:getxattr(): improve handling of allocation failures") Link: http://lkml.kernel.org/r/20170306103327.2766-1-mhocko@kernel.org Acked-by: Kees Cook Reported-by: Vlastimil Babka Signed-off-by: Michal Hocko Cc: [3.6+] Signed-off-by: Andrew Morton --- fs/xattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xattr.c b/fs/xattr.c index 7e3317cf4045..94f49a082dd2 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, size = XATTR_SIZE_MAX; kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!kvalue) { - kvalue = vmalloc(size); + kvalue = vzalloc(size); if (!kvalue) return -ENOMEM; } -- cgit v1.2.3 From ac1a583dc9757fecc42863c18e51039c892dd3e4 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:41 +1000 Subject: treewide: use kv[mz]alloc* rather than opencoded variants There are many code paths opencoding kvmalloc. Let's use the helper instead. The main difference to kvmalloc is that those users are usually not considering all the aspects of the memory allocator. E.g. allocation requests <= 32kB (with 4kB pages) are basically never failing and invoke OOM killer to satisfy the allocation. This sounds too disruptive for something that has a reasonable fallback - the vmalloc. On the other hand those requests might fallback to vmalloc even when the memory allocator would succeed after several more reclaim/compaction attempts previously. There is no guarantee something like that happens though. This patch converts many of those places to kv[mz]alloc* helpers because they are more conservative. Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org Signed-off-by: Michal Hocko Reviewed-by: Boris Ostrovsky # Xen bits Acked-by: Kees Cook Acked-by: Vlastimil Babka Acked-by: Andreas Dilger # Lustre Acked-by: Christian Borntraeger # KVM/s390 Acked-by: Dan Williams # nvdim Acked-by: David Sterba # btrfs Acked-by: Ilya Dryomov # Ceph Acked-by: Tariq Toukan # mlx4 Acked-by: Leon Romanovsky # mlx5 Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Herbert Xu Cc: Anton Vorontsov Cc: Colin Cross Cc: Tony Luck Cc: "Rafael J. Wysocki" Cc: Ben Skeggs Cc: Kent Overstreet Cc: Santosh Raspatur Cc: Hariprasad S Cc: Yishai Hadas Cc: Oleg Drokin Cc: "Yan, Zheng" Cc: Alexander Viro Cc: Alexei Starovoitov Cc: Eric Dumazet Cc: David Miller Signed-off-by: Andrew Morton --- arch/s390/kvm/kvm-s390.c | 10 ++----- crypto/lzo.c | 4 +-- drivers/acpi/apei/erst.c | 8 ++---- drivers/char/agp/generic.c | 8 +----- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- drivers/md/bcache/util.h | 12 ++------ drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h | 3 -- drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | 29 +++---------------- drivers/net/ethernet/chelsio/cxgb3/l2t.c | 8 +----- drivers/net/ethernet/chelsio/cxgb3/l2t.h | 1 - drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 12 ++++---- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 -- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 10 +++---- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 8 +++--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 31 ++++---------------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 14 ++++----- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/sched.c | 12 ++++---- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 9 ++---- drivers/net/ethernet/mellanox/mlx4/mr.c | 9 ++---- drivers/nvdimm/dimm_devs.c | 5 +--- .../staging/lustre/lnet/libcfs/linux/linux-mem.c | 11 +------- drivers/xen/evtchn.c | 14 +-------- fs/btrfs/ctree.c | 9 ++---- fs/btrfs/ioctl.c | 9 ++---- fs/btrfs/send.c | 27 ++++++------------ fs/ceph/file.c | 9 ++---- fs/select.c | 5 +--- fs/xattr.c | 27 ++++++------------ include/linux/mlx5/driver.h | 7 +---- include/linux/mm.h | 8 ++++++ lib/iov_iter.c | 5 +--- mm/frame_vector.c | 5 +--- net/ipv4/inet_hashtables.c | 6 +--- net/ipv4/tcp_metrics.c | 5 +--- net/mpls/af_mpls.c | 5 +--- net/netfilter/x_tables.c | 21 +++----------- net/netfilter/xt_recent.c | 5 +--- net/sched/sch_choke.c | 5 +--- net/sched/sch_fq_codel.c | 26 ++++------------- net/sched/sch_hhf.c | 33 ++++++---------------- net/sched/sch_netem.c | 6 +--- net/sched/sch_sfq.c | 6 +--- security/keys/keyctl.c | 22 ++++----------- 44 files changed, 128 insertions(+), 350 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f83f18b77f3d..be8b46cceea0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1183,10 +1183,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) return -EINVAL; - keys = kmalloc_array(args->count, sizeof(uint8_t), - GFP_KERNEL | __GFP_NOWARN); - if (!keys) - keys = vmalloc(sizeof(uint8_t) * args->count); + keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); if (!keys) return -ENOMEM; @@ -1228,10 +1225,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) return -EINVAL; - keys = kmalloc_array(args->count, sizeof(uint8_t), - GFP_KERNEL | __GFP_NOWARN); - if (!keys) - keys = vmalloc(sizeof(uint8_t) * args->count); + keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); if (!keys) return -ENOMEM; diff --git a/crypto/lzo.c b/crypto/lzo.c index 168df784da84..218567d717d6 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -32,9 +32,7 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm) { void *ctx; - ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); - if (!ctx) - ctx = vmalloc(LZO1X_MEM_COMPRESS); + ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 7207e5fc9d3d..2c462beee551 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -513,7 +513,7 @@ retry: if (i < erst_record_id_cache.len) goto retry; if (erst_record_id_cache.len >= erst_record_id_cache.size) { - int new_size, alloc_size; + int new_size; u64 *new_entries; new_size = erst_record_id_cache.size * 2; @@ -524,11 +524,7 @@ retry: pr_warn(FW_WARN "too many record IDs!\n"); return 0; } - alloc_size = new_size * sizeof(entries[0]); - if (alloc_size < PAGE_SIZE) - new_entries = kmalloc(alloc_size, GFP_KERNEL); - else - new_entries = vmalloc(alloc_size); + new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); if (!new_entries) return -ENOMEM; memcpy(new_entries, entries, diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index f002fa5d1887..bdf418cac8ef 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -88,13 +88,7 @@ static int agp_get_key(void) void agp_alloc_page_array(size_t size, struct agp_memory *mem) { - mem->pages = NULL; - - if (size <= 2*PAGE_SIZE) - mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (mem->pages == NULL) { - mem->pages = vmalloc(size); - } + mem->pages = kvmalloc(size, GFP_KERNEL); } EXPORT_SYMBOL(agp_alloc_page_array); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ca5397beb357..2170534101ca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -568,9 +568,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) size *= nmemb; - mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!mem) - mem = vmalloc(size); + mem = kvmalloc(size, GFP_KERNEL); if (!mem) return ERR_PTR(-ENOMEM); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 5d13930f0f22..cb8d2ccbb6c6 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -43,11 +43,7 @@ struct closure; (heap)->used = 0; \ (heap)->size = (_size); \ _bytes = (heap)->size * sizeof(*(heap)->data); \ - (heap)->data = NULL; \ - if (_bytes < KMALLOC_MAX_SIZE) \ - (heap)->data = kmalloc(_bytes, (gfp)); \ - if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \ - (heap)->data = vmalloc(_bytes); \ + (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ (heap)->data; \ }) @@ -136,12 +132,8 @@ do { \ \ (fifo)->mask = _allocated_size - 1; \ (fifo)->front = (fifo)->back = 0; \ - (fifo)->data = NULL; \ \ - if (_bytes < KMALLOC_MAX_SIZE) \ - (fifo)->data = kmalloc(_bytes, (gfp)); \ - if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \ - (fifo)->data = vmalloc(_bytes); \ + (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ (fifo)->data; \ }) diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h index 920d918ed193..f04e81f33795 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h @@ -41,9 +41,6 @@ #define VALIDATE_TID 1 -void *cxgb_alloc_mem(unsigned long size); -void cxgb_free_mem(void *addr); - /* * Map an ATID or STID to their entries in the corresponding TID tables. */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 76684dcb874c..fa81445e334c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1151,27 +1151,6 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, l2t_release(tdev, e); } -/* - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. - * The allocated memory is cleared. - */ -void *cxgb_alloc_mem(unsigned long size) -{ - void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - - if (!p) - p = vzalloc(size); - return p; -} - -/* - * Free memory allocated through t3_alloc_mem(). - */ -void cxgb_free_mem(void *addr) -{ - kvfree(addr); -} - /* * Allocate and initialize the TID tables. Returns 0 on success. */ @@ -1182,7 +1161,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids, unsigned long size = ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); - t->tid_tab = cxgb_alloc_mem(size); + t->tid_tab = kvzalloc(size, GFP_KERNEL); if (!t->tid_tab) return -ENOMEM; @@ -1218,7 +1197,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids, static void free_tid_maps(struct tid_info *t) { - cxgb_free_mem(t->tid_tab); + kvfree(t->tid_tab); } static inline void add_adapter(struct adapter *adap) @@ -1293,7 +1272,7 @@ int cxgb3_offload_activate(struct adapter *adapter) return 0; out_free_l2t: - t3_free_l2t(l2td); + kvfree(l2td); out_free: kfree(t); return err; @@ -1302,7 +1281,7 @@ out_free: static void clean_l2_data(struct rcu_head *head) { struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); - t3_free_l2t(d); + kvfree(d); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 52063587e1e9..26264125865f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -444,7 +444,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) struct l2t_data *d; int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); - d = cxgb_alloc_mem(size); + d = kvzalloc(size, GFP_KERNEL); if (!d) return NULL; @@ -462,9 +462,3 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) } return d; } - -void t3_free_l2t(struct l2t_data *d) -{ - cxgb_free_mem(d); -} - diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index 8cffcdfd5678..c2fd323c4078 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -115,7 +115,6 @@ int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e); void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); -void t3_free_l2t(struct l2t_data *d); int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 7ad43af6bde1..3103ef9b561d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -290,8 +290,8 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, if (clipt_size < CLIPT_MIN_HASH_BUCKETS) return NULL; - ctbl = t4_alloc_mem(sizeof(*ctbl) + - clipt_size*sizeof(struct list_head)); + ctbl = kvzalloc(sizeof(*ctbl) + + clipt_size*sizeof(struct list_head), GFP_KERNEL); if (!ctbl) return NULL; @@ -305,9 +305,9 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, for (i = 0; i < ctbl->clipt_size; ++i) INIT_LIST_HEAD(&ctbl->hash_list[i]); - cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); + cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL); if (!cl_list) { - t4_free_mem(ctbl); + kvfree(ctbl); return NULL; } ctbl->cl_list = (void *)cl_list; @@ -326,8 +326,8 @@ void t4_cleanup_clip_tbl(struct adapter *adap) if (ctbl) { if (ctbl->cl_list) - t4_free_mem(ctbl->cl_list); - t4_free_mem(ctbl); + kvfree(ctbl->cl_list); + kvfree(ctbl); } } EXPORT_SYMBOL(t4_cleanup_clip_tbl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 163543b1ea0b..1d2be2dd19dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1184,8 +1184,6 @@ extern const char cxgb4_driver_version[]; void t4_os_portmod_changed(const struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); -void *t4_alloc_mem(size_t size); - void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); @@ -1557,7 +1555,6 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize); void t4_sge_decode_idma_state(struct adapter *adapter, int state); -void t4_free_mem(void *addr); void t4_idma_monitor_init(struct adapter *adapter, struct sge_idma_monitor_state *idma); void t4_idma_monitor(struct adapter *adapter, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index f6e739da7bb7..1fa34b009891 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2634,7 +2634,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, if (count > avail - pos) count = avail - pos; - data = t4_alloc_mem(count); + data = kvzalloc(count, GFP_KERNEL); if (!data) return -ENOMEM; @@ -2642,12 +2642,12 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); spin_unlock(&adap->win0_lock); if (ret) { - t4_free_mem(data); + kvfree(data); return ret; } ret = copy_to_user(buf, data, count); - t4_free_mem(data); + kvfree(data); if (ret) return -EFAULT; @@ -2753,7 +2753,7 @@ static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf, adap->sge.egr_sz, adap->sge.blocked_fl); len += sprintf(buf + len, "\n"); size = simple_read_from_buffer(ubuf, count, ppos, buf, len); - t4_free_mem(buf); + kvfree(buf); return size; } @@ -2773,7 +2773,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, return err; bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); - t4_free_mem(t); + kvfree(t); return count; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 02f80febeb91..0ba7866c8259 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -969,7 +969,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, { int i, err = 0; struct adapter *adapter = netdev2adap(dev); - u8 *buf = t4_alloc_mem(EEPROMSIZE); + u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -980,7 +980,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, if (!err) memcpy(data, buf + e->offset, e->len); - t4_free_mem(buf); + kvfree(buf); return err; } @@ -1009,7 +1009,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { /* RMW possibly needed for first or last words. */ - buf = t4_alloc_mem(aligned_len); + buf = kvzalloc(aligned_len, GFP_KERNEL); if (!buf) return -ENOMEM; err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); @@ -1037,7 +1037,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, err = t4_seeprom_wp(adapter, true); out: if (buf != data) - t4_free_mem(buf); + kvfree(buf); return err; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index afb0967d2ce6..b717c6d6b83a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -880,27 +880,6 @@ freeout: return err; } -/* - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. - * The allocated memory is cleared. - */ -void *t4_alloc_mem(size_t size) -{ - void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - - if (!p) - p = vzalloc(size); - return p; -} - -/* - * Free memory allocated through alloc_mem(). - */ -void t4_free_mem(void *addr) -{ - kvfree(addr); -} - static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -1299,7 +1278,7 @@ static int tid_init(struct tid_info *t) max_ftids * sizeof(*t->ftid_tab) + ftid_bmap_size * sizeof(long); - t->tid_tab = t4_alloc_mem(size); + t->tid_tab = kvzalloc(size, GFP_KERNEL); if (!t->tid_tab) return -ENOMEM; @@ -3441,7 +3420,7 @@ static int adap_init0(struct adapter *adap) /* allocate memory to read the header of the firmware on the * card */ - card_fw = t4_alloc_mem(sizeof(*card_fw)); + card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); /* Get FW from from /lib/firmware/ */ ret = request_firmware(&fw, fw_info->fw_mod_name, @@ -3461,7 +3440,7 @@ static int adap_init0(struct adapter *adap) /* Cleaning up */ release_firmware(fw); - t4_free_mem(card_fw); + kvfree(card_fw); if (ret < 0) goto bye; @@ -4457,9 +4436,9 @@ static void free_some_resources(struct adapter *adapter) { unsigned int i; - t4_free_mem(adapter->l2t); + kvfree(adapter->l2t); t4_cleanup_sched(adapter); - t4_free_mem(adapter->tids.tid_tab); + kvfree(adapter->tids.tid_tab); cxgb4_cleanup_tc_u32(adapter); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index a1b19422b339..ef06ce8247ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -432,9 +432,9 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap) for (i = 0; i < t->size; i++) { struct cxgb4_link *link = &t->table[i]; - t4_free_mem(link->tid_map); + kvfree(link->tid_map); } - t4_free_mem(adap->tc_u32); + kvfree(adap->tc_u32); } struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) @@ -446,8 +446,8 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) if (!max_tids) return NULL; - t = t4_alloc_mem(sizeof(*t) + - (max_tids * sizeof(struct cxgb4_link))); + t = kvzalloc(sizeof(*t) + + (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL); if (!t) return NULL; @@ -458,7 +458,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) unsigned int bmap_size; bmap_size = BITS_TO_LONGS(max_tids); - link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); + link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL); if (!link->tid_map) goto out_no_mem; bitmap_zero(link->tid_map, max_tids); @@ -471,11 +471,11 @@ out_no_mem: struct cxgb4_link *link = &t->table[i]; if (link->tid_map) - t4_free_mem(link->tid_map); + kvfree(link->tid_map); } if (t) - t4_free_mem(t); + kvfree(t); return NULL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 7c8c5b9a3c22..6f3692db29af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) if (l2t_size < L2T_MIN_HASH_BUCKETS) return NULL; - d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); + d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL); if (!d) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index c9026352a842..02acff741f11 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -177,7 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) } list_del(&qe->list); - t4_free_mem(qe); + kvfree(qe); if (atomic_dec_and_test(&e->refcnt)) { e->state = SCHED_STATE_UNUSED; memset(&e->info, 0, sizeof(e->info)); @@ -201,7 +201,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) if (p->queue < 0 || p->queue >= pi->nqsets) return -ERANGE; - qe = t4_alloc_mem(sizeof(struct sched_queue_entry)); + qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL); if (!qe) return -ENOMEM; @@ -211,7 +211,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); if (err) { - t4_free_mem(qe); + kvfree(qe); goto out; } @@ -224,7 +224,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) spin_lock(&e->lock); err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); if (err) { - t4_free_mem(qe); + kvfree(qe); spin_unlock(&e->lock); goto out; } @@ -512,7 +512,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size) struct sched_table *s; unsigned int i; - s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class)); + s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL); if (!s) return NULL; @@ -548,6 +548,6 @@ void t4_cleanup_sched(struct adapter *adap) t4_sched_class_free(pi, e); write_unlock(&s->rw_lock); } - t4_free_mem(s); + kvfree(s); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e0c5ffb3e3a6..353840520d53 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -70,13 +70,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); - ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); + ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); if (!ring->tx_info) { - ring->tx_info = vmalloc(tmp); - if (!ring->tx_info) { - err = -ENOMEM; - goto err_ring; - } + err = -ENOMEM; + goto err_ring; } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index db65f72879e9..ce852ca22a96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -115,12 +115,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); - if (!buddy->bits[i]) { - buddy->bits[i] = vzalloc(s * sizeof(long)); - if (!buddy->bits[i]) - goto err_out_free; - } + buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); + if (!buddy->bits[i]) + goto err_out_free; } set_bit(0, buddy->bits[buddy->max_order]); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 0eedc49e0d47..3bd332b167d9 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -102,10 +102,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) return -ENXIO; } - ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL); - if (!ndd->data) - ndd->data = vmalloc(ndd->nsarea.config_size); - + ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); if (!ndd->data) return -ENOMEM; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c index a6a76a681ea9..8f638267e704 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c @@ -45,15 +45,6 @@ EXPORT_SYMBOL(libcfs_kvzalloc); void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, gfp_t flags) { - void *ret; - - ret = kzalloc_node(size, flags | __GFP_NOWARN, - cfs_cpt_spread_node(cptab, cpt)); - if (!ret) { - WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH))); - ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); - } - - return ret; + return kvzalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt)); } EXPORT_SYMBOL(libcfs_kvzalloc_cpt); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 6890897a6f30..10f1ef582659 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -87,18 +87,6 @@ struct user_evtchn { bool enabled; }; -static evtchn_port_t *evtchn_alloc_ring(unsigned int size) -{ - evtchn_port_t *ring; - size_t s = size * sizeof(*ring); - - ring = kmalloc(s, GFP_KERNEL); - if (!ring) - ring = vmalloc(s); - - return ring; -} - static void evtchn_free_ring(evtchn_port_t *ring) { kvfree(ring); @@ -334,7 +322,7 @@ static int evtchn_resize_ring(struct per_user_data *u) else new_size = 2 * u->ring_size; - new_ring = evtchn_alloc_ring(new_size); + new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL); if (!new_ring) return -ENOMEM; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 165e7ec12af7..a3a75f1de002 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -5392,13 +5392,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root, goto out; } - tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); + tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); if (!tmp_buf) { - tmp_buf = vmalloc(fs_info->nodesize); - if (!tmp_buf) { - ret = -ENOMEM; - goto out; - } + ret = -ENOMEM; + goto out; } left_path->search_commit_root = 1; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 378837e2fb3c..04862ae503a4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3544,12 +3544,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode, u64 last_dest_end = destoff; ret = -ENOMEM; - buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); - if (!buf) { - buf = vmalloc(fs_info->nodesize); - if (!buf) - return ret; - } + buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); + if (!buf) + return ret; path = btrfs_alloc_path(); if (!path) { diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index a60d5bfb8a49..3f645cd67b54 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -6360,22 +6360,16 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) sctx->clone_roots_cnt = arg->clone_sources_count; sctx->send_max_size = BTRFS_SEND_BUF_SIZE; - sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN); + sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); if (!sctx->send_buf) { - sctx->send_buf = vmalloc(sctx->send_max_size); - if (!sctx->send_buf) { - ret = -ENOMEM; - goto out; - } + ret = -ENOMEM; + goto out; } - sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN); + sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL); if (!sctx->read_buf) { - sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); - if (!sctx->read_buf) { - ret = -ENOMEM; - goto out; - } + ret = -ENOMEM; + goto out; } sctx->pending_dir_moves = RB_ROOT; @@ -6396,13 +6390,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); if (arg->clone_sources_count) { - clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN); + clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); if (!clone_sources_tmp) { - clone_sources_tmp = vmalloc(alloc_size); - if (!clone_sources_tmp) { - ret = -ENOMEM; - goto out; - } + ret = -ENOMEM; + goto out; } ret = copy_from_user(clone_sources_tmp, arg->clone_sources, diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 26cc95421cca..18c045e2ead6 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -74,12 +74,9 @@ dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, align = (unsigned long)(it->iov->iov_base + it->iov_offset) & (PAGE_SIZE - 1); npages = calc_pages_for(align, nbytes); - pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); - if (!pages) { - pages = vmalloc(sizeof(*pages) * npages); - if (!pages) - return ERR_PTR(-ENOMEM); - } + pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); for (idx = 0; idx < npages; ) { size_t start; diff --git a/fs/select.c b/fs/select.c index 9287d3a96e35..b448274a9790 100644 --- a/fs/select.c +++ b/fs/select.c @@ -586,10 +586,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, goto out_nofds; alloc_size = 6 * size; - bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); - if (!bits && alloc_size > PAGE_SIZE) - bits = vmalloc(alloc_size); - + bits = kvmalloc(alloc_size, GFP_KERNEL); if (!bits) goto out_nofds; } diff --git a/fs/xattr.c b/fs/xattr.c index 94f49a082dd2..464c94bf65f9 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -431,12 +431,9 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value, if (size) { if (size > XATTR_SIZE_MAX) return -E2BIG; - kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!kvalue) { - kvalue = vmalloc(size); - if (!kvalue) - return -ENOMEM; - } + kvalue = kvmalloc(size, GFP_KERNEL); + if (!kvalue) + return -ENOMEM; if (copy_from_user(kvalue, value, size)) { error = -EFAULT; goto out; @@ -528,12 +525,9 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, if (size) { if (size > XATTR_SIZE_MAX) size = XATTR_SIZE_MAX; - kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!kvalue) { - kvalue = vzalloc(size); - if (!kvalue) - return -ENOMEM; - } + kvalue = kvzalloc(size, GFP_KERNEL); + if (!kvalue) + return -ENOMEM; } error = vfs_getxattr(d, kname, kvalue, size); @@ -611,12 +605,9 @@ listxattr(struct dentry *d, char __user *list, size_t size) if (size) { if (size > XATTR_LIST_MAX) size = XATTR_LIST_MAX; - klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL); - if (!klist) { - klist = vmalloc(size); - if (!klist) - return -ENOMEM; - } + klist = kvmalloc(size, GFP_KERNEL); + if (!klist) + return -ENOMEM; } error = vfs_listxattr(d, klist, size); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f50864626230..6cd000f30921 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -891,12 +891,7 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev) static inline void *mlx5_vzalloc(unsigned long size) { - void *rtn; - - rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!rtn) - rtn = vzalloc(size); - return rtn; + return kvzalloc(size, GFP_KERNEL); } static inline u32 mlx5_base_mkey(const u32 key) diff --git a/include/linux/mm.h b/include/linux/mm.h index 5d36be8f4620..c82e8dbc81ea 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -532,6 +532,14 @@ static inline void *kvzalloc(size_t size, gfp_t flags) return kvmalloc(size, flags | __GFP_ZERO); } +static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (size != 0 && n > SIZE_MAX / size) + return NULL; + + return kvmalloc(n * size, flags); +} + extern void kvfree(const void *addr); static inline atomic_t *compound_mapcount_ptr(struct page *page) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..4b2b3f5b3380 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -965,10 +965,7 @@ EXPORT_SYMBOL(iov_iter_get_pages); static struct page **get_pages_array(size_t n) { - struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); - if (!p) - p = vmalloc(n * sizeof(struct page *)); - return p; + return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); } static ssize_t pipe_get_pages_alloc(struct iov_iter *i, diff --git a/mm/frame_vector.c b/mm/frame_vector.c index db77dcb38afd..72ebec18629c 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c @@ -200,10 +200,7 @@ struct frame_vector *frame_vector_create(unsigned int nr_frames) * Avoid higher order allocations, use vmalloc instead. It should * be rare anyway. */ - if (size <= PAGE_SIZE) - vec = kmalloc(size, GFP_KERNEL); - else - vec = vmalloc(size); + vec = kvmalloc(size, GFP_KERNEL); if (!vec) return NULL; vec->nr_allocated = nr_frames; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 8bea74298173..e9a59d2d91d4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -678,11 +678,7 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) /* no more locks than number of hash buckets */ nblocks = min(nblocks, hashinfo->ehash_mask + 1); - hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, - GFP_KERNEL | __GFP_NOWARN); - if (!hashinfo->ehash_locks) - hashinfo->ehash_locks = vmalloc(nblocks * locksz); - + hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); if (!hashinfo->ehash_locks) return -ENOMEM; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 9d0d4f39e42b..653bbd67e3a3 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -1011,10 +1011,7 @@ static int __net_init tcp_net_metrics_init(struct net *net) tcp_metrics_hash_log = order_base_2(slots); size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; - tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!tcp_metrics_hash) - tcp_metrics_hash = vzalloc(size); - + tcp_metrics_hash = kvzalloc(size, GFP_KERNEL); if (!tcp_metrics_hash) return -ENOMEM; diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 5928d22ba9c8..81de33da89ed 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -2001,10 +2001,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) unsigned index; if (size) { - labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!labels) - labels = vzalloc(size); - + labels = kvzalloc(size, GFP_KERNEL); if (!labels) goto nolabels; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 14857afc9937..d529989f5791 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -763,17 +763,8 @@ EXPORT_SYMBOL(xt_check_entry_offsets); */ unsigned int *xt_alloc_entry_offsets(unsigned int size) { - unsigned int *off; + return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO); - off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); - - if (off) - return off; - - if (size < (SIZE_MAX / sizeof(unsigned int))) - off = vmalloc(size * sizeof(unsigned int)); - - return off; } EXPORT_SYMBOL(xt_alloc_entry_offsets); @@ -1114,7 +1105,7 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) size = sizeof(void **) * nr_cpu_ids; if (size > PAGE_SIZE) - i->jumpstack = vzalloc(size); + i->jumpstack = kvzalloc(size, GFP_KERNEL); else i->jumpstack = kzalloc(size, GFP_KERNEL); if (i->jumpstack == NULL) @@ -1136,12 +1127,8 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) */ size = sizeof(void *) * i->stacksize * 2u; for_each_possible_cpu(cpu) { - if (size > PAGE_SIZE) - i->jumpstack[cpu] = vmalloc_node(size, - cpu_to_node(cpu)); - else - i->jumpstack[cpu] = kmalloc_node(size, - GFP_KERNEL, cpu_to_node(cpu)); + i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); if (i->jumpstack[cpu] == NULL) /* * Freeing will be done later on by the callers. The diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 1d89a4eaf841..d6aa8f63ed2e 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -388,10 +388,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par, } sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size; - if (sz <= PAGE_SIZE) - t = kzalloc(sz, GFP_KERNEL); - else - t = vzalloc(sz); + t = kvzalloc(sz, GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; goto out; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 593183a5b5b5..f11f8732b0d5 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -376,10 +376,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) if (mask != q->tab_mask) { struct sk_buff **ntab; - ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), - GFP_KERNEL | __GFP_NOWARN); - if (!ntab) - ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *)); + ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO); if (!ntab) return -ENOMEM; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 097bbe9857a5..efb27e4dc808 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -445,27 +445,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) return 0; } -static void *fq_codel_zalloc(size_t sz) -{ - void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); - - if (!ptr) - ptr = vzalloc(sz); - return ptr; -} - -static void fq_codel_free(void *addr) -{ - kvfree(addr); -} - static void fq_codel_destroy(struct Qdisc *sch) { struct fq_codel_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); - fq_codel_free(q->backlogs); - fq_codel_free(q->flows); + kvfree(q->backlogs); + kvfree(q->flows); } static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) @@ -492,13 +478,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) } if (!q->flows) { - q->flows = fq_codel_zalloc(q->flows_cnt * - sizeof(struct fq_codel_flow)); + q->flows = kvzalloc(q->flows_cnt * + sizeof(struct fq_codel_flow), GFP_KERNEL); if (!q->flows) return -ENOMEM; - q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); + q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); if (!q->backlogs) { - fq_codel_free(q->flows); + kvfree(q->flows); return -ENOMEM; } for (i = 0; i < q->flows_cnt; i++) { diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 2fae8b5f1b80..8cf3d9eac553 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -467,29 +467,14 @@ static void hhf_reset(struct Qdisc *sch) rtnl_kfree_skbs(skb, skb); } -static void *hhf_zalloc(size_t sz) -{ - void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); - - if (!ptr) - ptr = vzalloc(sz); - - return ptr; -} - -static void hhf_free(void *addr) -{ - kvfree(addr); -} - static void hhf_destroy(struct Qdisc *sch) { int i; struct hhf_sched_data *q = qdisc_priv(sch); for (i = 0; i < HHF_ARRAYS_CNT; i++) { - hhf_free(q->hhf_arrays[i]); - hhf_free(q->hhf_valid_bits[i]); + kvfree(q->hhf_arrays[i]); + kvfree(q->hhf_valid_bits[i]); } for (i = 0; i < HH_FLOWS_CNT; i++) { @@ -503,7 +488,7 @@ static void hhf_destroy(struct Qdisc *sch) kfree(flow); } } - hhf_free(q->hh_flows); + kvfree(q->hh_flows); } static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { @@ -609,8 +594,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) if (!q->hh_flows) { /* Initialize heavy-hitter flow table. */ - q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * - sizeof(struct list_head)); + q->hh_flows = kvzalloc(HH_FLOWS_CNT * + sizeof(struct list_head), GFP_KERNEL); if (!q->hh_flows) return -ENOMEM; for (i = 0; i < HH_FLOWS_CNT; i++) @@ -624,8 +609,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) /* Initialize heavy-hitter filter arrays. */ for (i = 0; i < HHF_ARRAYS_CNT; i++) { - q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * - sizeof(u32)); + q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN * + sizeof(u32), GFP_KERNEL); if (!q->hhf_arrays[i]) { /* Note: hhf_destroy() will be called * by our caller. @@ -637,8 +622,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) /* Initialize valid bits of heavy-hitter filter arrays. */ for (i = 0; i < HHF_ARRAYS_CNT; i++) { - q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / - BITS_PER_BYTE); + q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN / + BITS_PER_BYTE, GFP_KERNEL); if (!q->hhf_valid_bits[i]) { /* Note: hhf_destroy() will be called * by our caller. diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 94b4928ad413..e4ca9d65dc4f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -702,15 +702,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) spinlock_t *root_lock; struct disttable *d; int i; - size_t s; if (n > NETEM_DIST_MAX) return -EINVAL; - s = sizeof(struct disttable) + n * sizeof(s16); - d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); - if (!d) - d = vmalloc(s); + d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); if (!d) return -ENOMEM; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index b00e02c139de..332d94be6e1c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -685,11 +685,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) static void *sfq_alloc(size_t sz) { - void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); - - if (!ptr) - ptr = vmalloc(sz); - return ptr; + return kvmalloc(sz, GFP_KERNEL); } static void sfq_free(void *addr) diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 10fcea154c0f..e4402ae56681 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -101,14 +101,9 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, if (_payload) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); - if (!payload) { - if (plen <= PAGE_SIZE) - goto error2; - payload = vmalloc(plen); - if (!payload) - goto error2; - } + payload = kvmalloc(plen, GFP_KERNEL); + if (!payload) + goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) @@ -1066,14 +1061,9 @@ long keyctl_instantiate_key_common(key_serial_t id, if (from) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL); - if (!payload) { - if (plen <= PAGE_SIZE) - goto error; - payload = vmalloc(plen); - if (!payload) - goto error; - } + payload = kvmalloc(plen, GFP_KERNEL); + if (!payload) + goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) -- cgit v1.2.3 From e678c0215f76c8f24cbeae168ca53e1aa135fe6d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:41 +1000 Subject: net: use kvmalloc with __GFP_REPEAT rather than open coded variant fq_alloc_node, alloc_netdev_mqs and netif_alloc* open code kmalloc with vmalloc fallback. Use the kvmalloc variant instead. Keep the __GFP_REPEAT flag based on explanation from Eric: "At the time, tests on the hardware I had in my labs showed that vmalloc() could deliver pages spread all over the memory and that was a small penalty (once memory is fragmented enough, not at boot time)" The way how the code is constructed means, however, that we prefer to go and hit the OOM killer before we fall back to the vmalloc for requests <=32kB (with 4kB pages) in the current code. This is rather disruptive for something that can be achived with the fallback. On the other hand __GFP_REPEAT doesn't have any useful semantic for these requests. So the effect of this patch is that requests which fit into 32kB will fall back to vmalloc easier now. Link: http://lkml.kernel.org/r/20170306103327.2766-3-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Eric Dumazet Cc: David Miller Signed-off-by: Andrew Morton --- net/core/dev.c | 24 +++++++++--------------- net/sched/sch_fq.c | 12 +----------- 2 files changed, 10 insertions(+), 26 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 7efb4178ffef..875a1e747fcc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7119,12 +7119,10 @@ static int netif_alloc_rx_queues(struct net_device *dev) BUG_ON(count < 1); - rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!rx) { - rx = vzalloc(sz); - if (!rx) - return -ENOMEM; - } + rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); + if (!rx) + return -ENOMEM; + dev->_rx = rx; for (i = 0; i < count; i++) @@ -7161,12 +7159,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev) if (count < 1 || count > 0xffff) return -EINVAL; - tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!tx) { - tx = vzalloc(sz); - if (!tx) - return -ENOMEM; - } + tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); + if (!tx) + return -ENOMEM; + dev->_tx = tx; netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); @@ -7700,9 +7696,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; - p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); - if (!p) - p = vzalloc(alloc_size); + p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT); if (!p) return NULL; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a4f738ac7728..594f77d89f6c 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -624,16 +624,6 @@ static void fq_rehash(struct fq_sched_data *q, q->stat_gc_flows += fcnt; } -static void *fq_alloc_node(size_t sz, int node) -{ - void *ptr; - - ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node); - if (!ptr) - ptr = vmalloc_node(sz, node); - return ptr; -} - static void fq_free(void *addr) { kvfree(addr); @@ -650,7 +640,7 @@ static int fq_resize(struct Qdisc *sch, u32 log) return 0; /* If XPS was setup, we can allocate memory on right NUMA node */ - array = fq_alloc_node(sizeof(struct rb_root) << log, + array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT, netdev_queue_numa_node_read(sch->dev_queue)); if (!array) return -ENOMEM; -- cgit v1.2.3 From 645cf540fb494f6772b91bd170fd17080266baa2 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:42 +1000 Subject: drivers/md/dm-ioctl.c: use kvmalloc rather than opencoded variant copy_params uses kmalloc with vmalloc fallback. We already have a helper for that - kvmalloc. This caller requires GFP_NOIO semantic so it hasn't been converted with many others by previous patches. All we need to achieve this semantic is to use the scope memalloc_noio_{save,restore} around kvmalloc. Link: http://lkml.kernel.org/r/20170306103327.2766-4-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Mikulas Patocka Cc: Mike Snitzer Signed-off-by: Andrew Morton --- drivers/md/dm-ioctl.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4da6fc6b1ffd..4951bf99dfb1 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1699,6 +1699,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern struct dm_ioctl *dmi; int secure_data; const size_t minimum_data_size = offsetof(struct dm_ioctl, data); + unsigned noio_flag; if (copy_from_user(param_kernel, user, minimum_data_size)) return -EFAULT; @@ -1721,15 +1722,9 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern * Use kmalloc() rather than vmalloc() when we can. */ dmi = NULL; - if (param_kernel->data_size <= KMALLOC_MAX_SIZE) - dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - - if (!dmi) { - unsigned noio_flag; - noio_flag = memalloc_noio_save(); - dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL); - memalloc_noio_restore(noio_flag); - } + noio_flag = memalloc_noio_save(); + dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); + memalloc_noio_restore(noio_flag); if (!dmi) { if (secure_data && clear_user(user, param_kernel->data_size)) -- cgit v1.2.3 From e81b3400cddb75a06e5cc179955b63a93e0cf4db Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:42 +1000 Subject: drivers/md/bcache/super.c: use kvmalloc bcache_device_init uses kmalloc for small requests and vmalloc for those which are larger than 64 pages. This alone is a strange criterion. Moreover kmalloc can fallback to vmalloc on the failure. Let's simply use kvmalloc instead as it knows how to handle the fallback properly Link: http://lkml.kernel.org/r/20170306103327.2766-5-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Kent Overstreet Signed-off-by: Andrew Morton --- drivers/md/bcache/super.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 85e3f21c2514..e57353e39168 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -767,16 +767,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, } n = d->nr_stripes * sizeof(atomic_t); - d->stripe_sectors_dirty = n < PAGE_SIZE << 6 - ? kzalloc(n, GFP_KERNEL) - : vzalloc(n); + d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); if (!d->stripe_sectors_dirty) return -ENOMEM; n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); - d->full_dirty_stripes = n < PAGE_SIZE << 6 - ? kzalloc(n, GFP_KERNEL) - : vzalloc(n); + d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); if (!d->full_dirty_stripes) return -ENOMEM; -- cgit v1.2.3 From 70a4d76d466420c7344130826ea107bcbbd71be7 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Apr 2017 09:24:42 +1000 Subject: mm, vmalloc: use __GFP_HIGHMEM implicitly __vmalloc* allows users to provide gfp flags for the underlying allocation. This API is quite popular $ git grep "=[[:space:]]__vmalloc\|return[[:space:]]*__vmalloc" | wc -l 77 The only problem is that many people are not aware that they really want to give __GFP_HIGHMEM along with other flags because there is really no reason to consume precious lowmemory on CONFIG_HIGHMEM systems for pages which are mapped to the kernel vmalloc space. About half of users don't use this flag, though. This signals that we make the API unnecessarily too complex. This patch simply uses __GFP_HIGHMEM implicitly when allocating pages to be mapped to the vmalloc space. Current users which add __GFP_HIGHMEM are simplified and drop the flag. Link: http://lkml.kernel.org/r/20170307141020.29107-1-mhocko@kernel.org Signed-off-by: Michal Hocko Reviewed-by: Matthew Wilcox Cc: Al Viro Cc: Vlastimil Babka Cc: David Rientjes Cc: Cristopher Lameter Signed-off-by: Andrew Morton --- arch/parisc/kernel/module.c | 2 +- arch/x86/kernel/module.c | 2 +- drivers/block/drbd/drbd_bitmap.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_dump.c | 4 ++-- drivers/md/dm-bufio.c | 2 +- fs/btrfs/free-space-tree.c | 3 +-- fs/file.c | 2 +- fs/xfs/kmem.c | 2 +- include/drm/drm_mem_util.h | 9 +++------ kernel/bpf/core.c | 9 +++------ kernel/bpf/syscall.c | 3 +-- kernel/fork.c | 2 +- kernel/groups.c | 2 +- kernel/module.c | 2 +- mm/kasan/kasan.c | 2 +- mm/nommu.c | 3 +-- mm/util.c | 2 +- mm/vmalloc.c | 14 +++++++------- net/ceph/ceph_common.c | 2 +- net/netfilter/x_tables.c | 3 +-- 20 files changed, 31 insertions(+), 41 deletions(-) diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index c66c943d9322..f1a76935a314 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -218,7 +218,7 @@ void *module_alloc(unsigned long size) * easier than trying to map the text, data, init_text and * init_data correctly */ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_HIGHMEM, + GFP_KERNEL, PAGE_KERNEL_RWX, 0, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 477ae806c2fa..f67bd3205df7 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -85,7 +85,7 @@ void *module_alloc(unsigned long size) p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR + get_module_load_offset(), - MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, + MODULES_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (p && (kasan_module_alloc(p, size) < 0)) { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index dece26f119d4..a804a4107fbc 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -409,7 +409,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); if (!new_pages) { new_pages = __vmalloc(bytes, - GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO, + GFP_NOIO | __GFP_ZERO, PAGE_KERNEL); if (!new_pages) return NULL; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index d019b5e311cc..2d955d7d7b6d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -161,8 +161,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) file_size += sizeof(*iter.hdr) * n_obj; /* Allocate the file in vmalloc memory, it's likely to be big */ - iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM | - __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, + PAGE_KERNEL); if (!iter.start) { dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); return; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 280b56c229d6..9407803b79df 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -406,7 +406,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, if (gfp_mask & __GFP_NORETRY) noio_flag = memalloc_noio_save(); - ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); if (gfp_mask & __GFP_NORETRY) memalloc_noio_restore(noio_flag); diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index dd7fb22a955a..fc0bd8406758 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -167,8 +167,7 @@ static u8 *alloc_bitmap(u32 bitmap_size) if (mem) return mem; - return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, - PAGE_KERNEL); + return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL); } int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, diff --git a/fs/file.c b/fs/file.c index ad6f094f2eff..1c2972e3a405 100644 --- a/fs/file.c +++ b/fs/file.c @@ -42,7 +42,7 @@ static void *alloc_fdmem(size_t size) if (data != NULL) return data; } - return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL); } static void __free_fdtable(struct fdtable *fdt) diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 780fc8986dab..393b6849aeb3 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -67,7 +67,7 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags) nofs_flag = memalloc_nofs_save(); lflags = kmem_flags_convert(flags); - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + ptr = __vmalloc(size, lflags | __GFP_ZERO, PAGE_KERNEL); if (flags & KM_NOFS) memalloc_nofs_restore(nofs_flag); diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h index 70d4e221a3ad..d0f6cf2e5324 100644 --- a/include/drm/drm_mem_util.h +++ b/include/drm/drm_mem_util.h @@ -37,8 +37,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) if (size * nmemb <= PAGE_SIZE) return kcalloc(nmemb, size, GFP_KERNEL); - return __vmalloc(size * nmemb, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + return vzalloc(size * nmemb); } /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ @@ -50,8 +49,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) if (size * nmemb <= PAGE_SIZE) return kmalloc(nmemb * size, GFP_KERNEL); - return __vmalloc(size * nmemb, - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); + return vmalloc(size * nmemb); } static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) @@ -69,8 +67,7 @@ static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) return ptr; } - return __vmalloc(size * nmemb, - gfp | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size * nmemb, gfp, PAGE_KERNEL); } static __inline void drm_free_large(void *ptr) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f45827e205d3..33b7861b517c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -76,8 +76,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) { - gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | - gfp_extra_flags; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; struct bpf_prog_aux *aux; struct bpf_prog *fp; @@ -107,8 +106,7 @@ EXPORT_SYMBOL_GPL(bpf_prog_alloc); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags) { - gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | - gfp_extra_flags; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; struct bpf_prog *fp; u32 pages, delta; int ret; @@ -659,8 +657,7 @@ out: static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, gfp_t gfp_extra_flags) { - gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | - gfp_extra_flags; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; struct bpf_prog *fp; fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ab0cf4c43690..3dd05b9f7fc3 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -68,8 +68,7 @@ void *bpf_map_area_alloc(size_t size) return area; } - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, - PAGE_KERNEL); + return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL); } void bpf_map_area_free(void *area) diff --git a/kernel/fork.c b/kernel/fork.c index d39e785d83d0..b908bc759671 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -203,7 +203,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, VMALLOC_START, VMALLOC_END, - THREADINFO_GFP | __GFP_HIGHMEM, + THREADINFO_GFP, PAGE_KERNEL, 0, node, __builtin_return_address(0)); diff --git a/kernel/groups.c b/kernel/groups.c index 8dd7a61b7115..d09727692a2a 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -18,7 +18,7 @@ struct group_info *groups_alloc(int gidsetsize) len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize; gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY); if (!gi) - gi = __vmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_HIGHMEM, PAGE_KERNEL); + gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL); if (!gi) return NULL; diff --git a/kernel/module.c b/kernel/module.c index 63321952c71c..c8d0047d8c7d 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2864,7 +2864,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len, /* Suck in entire file: we'll want most of it. */ info->hdr = __vmalloc(info->len, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL); + GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL); if (!info->hdr) return -ENOMEM; diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index e4c2975fcae8..c81549d5c833 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -691,7 +691,7 @@ int kasan_module_alloc(void *addr, size_t size) ret = __vmalloc_node_range(shadow_size, 1, shadow_start, shadow_start + shadow_size, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, __builtin_return_address(0)); diff --git a/mm/nommu.c b/mm/nommu.c index a80411d258fc..fc184f597d59 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -246,8 +246,7 @@ void *vmalloc_user(unsigned long size) { void *ret; - ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, - PAGE_KERNEL); + ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); if (ret) { struct vm_area_struct *vma; diff --git a/mm/util.c b/mm/util.c index f4e590b2c0da..718154debc87 100644 --- a/mm/util.c +++ b/mm/util.c @@ -382,7 +382,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) if (ret || size <= PAGE_SIZE) return ret; - return __vmalloc_node_flags(size, node, flags | __GFP_HIGHMEM); + return __vmalloc_node_flags(size, node, flags); } EXPORT_SYMBOL(kvmalloc_node); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 33603239560e..8ef8ea1a68f6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1658,7 +1658,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page **pages; unsigned int nr_pages, array_size, i; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; + const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN; nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1828,7 +1828,7 @@ void *__vmalloc_node_flags(unsigned long size, void *vmalloc(unsigned long size) { return __vmalloc_node_flags(size, NUMA_NO_NODE, - GFP_KERNEL | __GFP_HIGHMEM); + GFP_KERNEL); } EXPORT_SYMBOL(vmalloc); @@ -1845,7 +1845,7 @@ EXPORT_SYMBOL(vmalloc); void *vzalloc(unsigned long size) { return __vmalloc_node_flags(size, NUMA_NO_NODE, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc); @@ -1862,7 +1862,7 @@ void *vmalloc_user(unsigned long size) void *ret; ret = __vmalloc_node(size, SHMLBA, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); if (ret) { @@ -1886,7 +1886,7 @@ EXPORT_SYMBOL(vmalloc_user); */ void *vmalloc_node(unsigned long size, int node) { - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, + return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); @@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(vmalloc_node); void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node_flags(size, node, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc_node); @@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(vzalloc_node); void *vmalloc_exec(unsigned long size) { - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, + return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 108533859a53..4eb773ccce11 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -187,7 +187,7 @@ void *ceph_kvmalloc(size_t size, gfp_t flags) return ptr; } - return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); + return __vmalloc(size, flags, PAGE_KERNEL); } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index d529989f5791..e58ecff638b3 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -998,8 +998,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!info) { - info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | - __GFP_NORETRY | __GFP_HIGHMEM, + info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); if (!info) return NULL; -- cgit v1.2.3 From d6145ed2c24f403dce069ed941ddd0c47f624383 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 5 Apr 2017 09:24:43 +1000 Subject: scripts/spelling.txt: add "memory" pattern and fix typos Fix typos and add the following to the scripts/spelling.txt: momery||memory Link: http://lkml.kernel.org/r/20170317011131.6881-1-sboyd@codeaurora.org Signed-off-by: Stephen Boyd Signed-off-by: Andrew Morton --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 2 +- drivers/leds/leds-lp5521.c | 2 +- drivers/leds/leds-lp5523.c | 2 +- drivers/leds/leds-lp5562.c | 2 +- scripts/spelling.txt | 1 + 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 5c48dbf43c1a..0d922fbf1a38 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1726,7 +1726,7 @@ int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); - /* DMA momery regsiter */ + /* DMA memory regsiter */ if (mr->type == MR_TYPE_DMA) return 0; diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 549b315ca8fe..f53c8cda1bde 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -281,7 +281,7 @@ static void lp5521_firmware_loaded(struct lp55xx_chip *chip) } /* - * Program momery sequence + * Program memory sequence * 1) set engine mode to "LOAD" * 2) write firmware data into program memory */ diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index c5b30f06218a..e9ba8cd32d66 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -387,7 +387,7 @@ static void lp5523_firmware_loaded(struct lp55xx_chip *chip) } /* - * Program momery sequence + * Program memory sequence * 1) set engine mode to "LOAD" * 2) write firmware data into program memory */ diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index b75333803a63..90892585bcb5 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -270,7 +270,7 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip) } /* - * Program momery sequence + * Program memory sequence * 1) set engine mode to "LOAD" * 2) write firmware data into program memory */ diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 0545f5a8cabe..93a0dfd49930 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -672,6 +672,7 @@ miximum||maximum mmnemonic||mnemonic mnay||many modulues||modules +momery||memory monochorome||monochrome monochromo||monochrome monocrome||monochrome -- cgit v1.2.3 From 873fd9cd37da50ef49b0091db88f96ff331c152c Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 5 Apr 2017 09:24:43 +1000 Subject: scripts/spelling.txt: Add regsiter -> register spelling mistake This typo is quite common. Fix it and add it to the spelling file so that checkpatch catches it earlier. Link: http://lkml.kernel.org/r/20170317011131.6881-2-sboyd@codeaurora.org Signed-off-by: Stephen Boyd Signed-off-by: Andrew Morton --- arch/arc/kernel/unwind.c | 2 +- arch/arm/kernel/kgdb.c | 2 +- arch/arm/mach-ixp4xx/common-pci.c | 4 ++-- arch/m68k/ifpsp060/src/ilsp.S | 2 +- arch/m68k/ifpsp060/src/isp.S | 2 +- arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c | 2 +- arch/mips/include/asm/octeon/cvmx-helper-rgmii.h | 2 +- arch/parisc/kernel/entry.S | 2 +- arch/powerpc/mm/icswx.c | 2 +- drivers/acpi/cppc_acpi.c | 2 +- drivers/clk/qcom/common.c | 2 +- drivers/cpufreq/sti-cpufreq.c | 4 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 2 +- drivers/infiniband/hw/hns/hns_roce_mr.c | 2 +- drivers/net/can/rcar/rcar_canfd.c | 2 +- drivers/net/ethernet/amd/amd8111e.h | 4 ++-- drivers/net/ethernet/atheros/atl1c/atl1c_hw.c | 2 +- drivers/net/ethernet/intel/igb/e1000_phy.c | 2 +- drivers/scsi/isci/registers.h | 4 ++-- drivers/scsi/mpt3sas/mpt3sas_base.h | 2 +- include/linux/bcma/bcma_driver_pci.h | 2 +- include/linux/ftrace.h | 2 +- include/uapi/linux/ipmi.h | 2 +- scripts/spelling.txt | 1 + sound/soc/soc-core.c | 2 +- 25 files changed, 29 insertions(+), 28 deletions(-) diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index b6e4f7a7419b..333daab7def0 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -845,7 +845,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, * state->dataAlign; break; case DW_CFA_def_cfa_register: - unw_debug("cfa_def_cfa_regsiter: "); + unw_debug("cfa_def_cfa_register: "); state->cfa.reg = get_uleb128(&ptr.p8, end); break; /*todo case DW_CFA_def_cfa_expression: */ diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 9232caee7060..1bb4c40a3135 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -269,7 +269,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) /* * Register our undef instruction hooks with ARM undef core. - * We regsiter a hook specifically looking for the KGB break inst + * We register a hook specifically looking for the KGB break inst * and we handle the normal undef case within the do_undefinstr * handler. */ diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 4977296f0c78..bcf3df59f71b 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -43,14 +43,14 @@ int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); /* - * Base address for PCI regsiter region + * Base address for PCI register region */ unsigned long ixp4xx_pci_reg_base = 0; /* * PCI cfg an I/O routines are done by programming a * command/byte enable register, and then read/writing - * the data from a data regsiter. We need to ensure + * the data from a data register. We need to ensure * these transactions are atomic or we will end up * with corrupt data on the bus or in a driver. */ diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S index 970abaf3303e..dd5b2c357e95 100644 --- a/arch/m68k/ifpsp060/src/ilsp.S +++ b/arch/m68k/ifpsp060/src/ilsp.S @@ -776,7 +776,7 @@ muls64_zero: # ALGORITHM *********************************************************** # # In the interest of simplicity, all operands are converted to # # longword size whether the operation is byte, word, or long. The # -# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is # +# bounds are sign extended accordingly. If Rn is a data register, Rn is # # also sign extended. If Rn is an address register, it need not be sign # # extended since the full register is always used. # # The condition codes are set correctly before the final "rts". # diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S index b865c1a052ba..29a9f8629b9d 100644 --- a/arch/m68k/ifpsp060/src/isp.S +++ b/arch/m68k/ifpsp060/src/isp.S @@ -1876,7 +1876,7 @@ movp_read_err: # word, or longword sized operands. Then, in the interest of # # simplicity, all operands are converted to longword size whether the # # operation is byte, word, or long. The bounds are sign extended # -# accordingly. If Rn is a data regsiter, Rn is also sign extended. If # +# accordingly. If Rn is a data register, Rn is also sign extended. If # # Rn is an address register, it need not be sign extended since the # # full register is always used. # # The comparisons are made and the condition codes calculated. # diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c index ba4753c23b03..d18ed5af62f4 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c @@ -152,7 +152,7 @@ static int __cvmx_helper_errata_asx_pass1(int interface, int port, } /** - * Configure all of the ASX, GMX, and PKO regsiters required + * Configure all of the ASX, GMX, and PKO registers required * to get RGMII to function on the supplied interface. * * @interface: PKO Interface to configure (0 or 1) diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h index f89775be7654..f7a95d7de140 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h @@ -55,7 +55,7 @@ extern int __cvmx_helper_rgmii_probe(int interface); extern void cvmx_helper_rgmii_internal_loopback(int port); /** - * Configure all of the ASX, GMX, and PKO regsiters required + * Configure all of the ASX, GMX, and PKO registers required * to get RGMII to function on the supplied interface. * * @interface: PKO Interface to configure (0 or 1) diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ad4cb1613c57..a4fd296c958e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1369,7 +1369,7 @@ nadtlb_nullify: /* When there is no translation for the probe address then we - must nullify the insn and return zero in the target regsiter. + must nullify the insn and return zero in the target register. This will indicate to the calling code that it does not have write/read privileges to this address. diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c index 915412e4d5ba..1fa794d7d59f 100644 --- a/arch/powerpc/mm/icswx.c +++ b/arch/powerpc/mm/icswx.c @@ -186,7 +186,7 @@ static u32 acop_get_inst(struct pt_regs *regs) } /** - * @regs: regsiters at time of interrupt + * @regs: registers at time of interrupt * @address: storage address * @error_code: Fault code, usually the DSISR or ESR depending on * processor type diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3ca0729f7e0e..992dda9b8338 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -95,7 +95,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); /* pcc mapped address + header size + offset within PCC subspace */ #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) -/* Check if a CPC regsiter is in PCC */ +/* Check if a CPC register is in PCC */ #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ (cpc)->cpc_entry.reg.space_id == \ ACPI_ADR_SPACE_PLATFORM_COMM) diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 03f9d316f969..d523991c945f 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -128,7 +128,7 @@ static void qcom_cc_gdsc_unregister(void *data) /* * Backwards compatibility with old DTs. Register a pass-through factor 1/1 - * clock to translate 'path' clk into 'name' clk and regsiter the 'path' + * clock to translate 'path' clk into 'name' clk and register the 'path' * clk as a fixed rate clock if it isn't present. */ static int _qcom_cc_register_board_clk(struct device *dev, const char *path, diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index a7db9011d5fe..d2d0430d09d4 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -236,7 +236,7 @@ use_defaults: return 0; } -static int sti_cpufreq_fetch_syscon_regsiters(void) +static int sti_cpufreq_fetch_syscon_registers(void) { struct device *dev = ddata.cpu; struct device_node *np = dev->of_node; @@ -275,7 +275,7 @@ static int sti_cpufreq_init(void) goto skip_voltage_scaling; } - ret = sti_cpufreq_fetch_syscon_regsiters(); + ret = sti_cpufreq_fetch_syscon_registers(); if (ret) goto skip_voltage_scaling; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 0d922fbf1a38..0f7e85c7b70b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1726,7 +1726,7 @@ int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); - /* DMA memory regsiter */ + /* DMA memory register */ if (mr->type == MR_TYPE_DMA) return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 4139abee3b54..f0dfcdd3f0f0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -204,7 +204,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, return 0; } - /* Note: if page_shift is zero, FAST memory regsiter */ + /* Note: if page_shift is zero, FAST memory register */ mtt->page_shift = page_shift; /* Compute MTT entry necessary */ diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 4ef07d97156d..602c19e23f05 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -413,7 +413,7 @@ /* RSCFDnRPGACCr */ #define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) -/* CAN FD mode specific regsiter map */ +/* CAN FD mode specific register map */ /* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ #define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m))) diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index 7cdb18512407..2a57b46fd6a6 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -48,7 +48,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm /* 32 bit registers */ #define ASF_STAT 0x00 /* ASF status register */ -#define CHIPID 0x04 /* Chip ID regsiter */ +#define CHIPID 0x04 /* Chip ID register */ #define MIB_DATA 0x10 /* MIB data register */ #define MIB_ADDR 0x14 /* MIB address register */ #define STAT0 0x30 /* Status0 register */ @@ -648,7 +648,7 @@ typedef enum { /* driver ioctl parameters */ #define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) -/* amd8111e desriptor format */ +/* amd8111e descriptor format */ struct amd8111e_tx_dr{ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index a8b80c56ac25..73efdb05a490 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) /* * atl1c_read_phy_core - * core function to read register in PHY via MDIO control regsiter. + * core function to read register in PHY via MDIO control register. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to read diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 68812d783f33..413025bdcb50 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -127,7 +127,7 @@ out: * @offset: register offset to be read * @data: pointer to the read data * - * Reads the MDI control regsiter in the PHY at offset and stores the + * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 97f3ceb8d724..63468cfe3e4a 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -652,7 +652,7 @@ struct scu_iit_entry { /* - * TODO: Where is the SAS_LNKTOV regsiter? + * TODO: Where is the SAS_LNKTOV register? * TODO: Where is the SAS_PHYTOV register? */ #define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1) @@ -1827,7 +1827,7 @@ struct scu_peg_registers { }; /** - * struct scu_registers - SCU regsiters including both PEG registers if we turn + * struct scu_registers - SCU registers including both PEG registers if we turn * on that compile option. All of these registers are in the memory mapped * space returned from BAR1. * diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 8981806fb13f..099ab4ca7edf 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1421,7 +1421,7 @@ void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply); void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, - u8 bits_to_regsiter); + u8 bits_to_register); int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset); diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 9657f11d48a7..bca6a5e4ca3d 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -80,7 +80,7 @@ struct pci_dev; #define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ #define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ #define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ -#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */ +#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */ #define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ #define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ #define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 3633e8beff39..e51ffdcf8ac8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -70,7 +70,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and * IPMODIFY are a kind of attribute flags which can be set only before * registering the ftrace_ops, and can not be modified while registered. - * Changing those attribute flags after regsitering ftrace_ops will + * Changing those attribute flags after registering ftrace_ops will * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h index 7b26a62e5707..b9095a27a08a 100644 --- a/include/uapi/linux/ipmi.h +++ b/include/uapi/linux/ipmi.h @@ -355,7 +355,7 @@ struct ipmi_cmdspec { #define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \ struct ipmi_cmdspec) /* - * Unregister a regsitered command. error values: + * Unregister a registered command. error values: * - EFAULT - an address supplied was invalid. * - ENOENT - The netfn/cmd was not found registered for this user. */ diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 93a0dfd49930..279a52a8323d 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -872,6 +872,7 @@ registerd||registered registeresd||registered registes||registers registraration||registration +regsiter||register regster||register regualar||regular reguator||regulator diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 8be2ce17aa65..e7d99071d108 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -936,7 +936,7 @@ static struct snd_soc_component *soc_find_component( * * @dlc: name of the DAI and optional component info to match * - * This function will search all regsitered components and their DAIs to + * This function will search all registered components and their DAIs to * find the DAI of the same name. The component's of_node and name * should also match if being specified. * -- cgit v1.2.3 From 738e70a1781eb24d11dd7f0d40f77dee8f7d1b0d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 5 Apr 2017 09:24:43 +1000 Subject: scripts/spelling.txt: add "intialise(d)" pattern and fix typo instances Fix typos and add the following to the scripts/spelling.txt: intialisation||initialisation intialised||initialised intialise||initialise This commit does not intend to change the British spelling itself. Link: http://lkml.kernel.org/r/1481573103-11329-18-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton --- certs/blacklist.c | 2 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 8 ++++---- drivers/video/fbdev/intelfb/intelfbdrv.c | 2 +- fs/inode.c | 2 +- fs/xfs/xfs_log_recover.c | 2 +- scripts/spelling.txt | 3 +++ 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/certs/blacklist.c b/certs/blacklist.c index 3eddce0e307a..3a507b9e2568 100644 --- a/certs/blacklist.c +++ b/certs/blacklist.c @@ -140,7 +140,7 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type) EXPORT_SYMBOL_GPL(is_hash_blacklisted); /* - * Intialise the blacklist + * Initialise the blacklist */ static int __init blacklist_init(void) { diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index d54490d3f7ad..1e594351a60f 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -387,7 +387,7 @@ static void sxgbe_free_rx_buffers(struct net_device *dev, /** * init_tx_ring - init the TX descriptor ring * @dev: net device structure - * @tx_ring: ring to be intialised + * @tx_ring: ring to be initialised * @tx_rsize: ring size * Description: this function initializes the DMA TX descriptor */ @@ -437,7 +437,7 @@ dmamem_err: /** * free_rx_ring - free the RX descriptor ring * @dev: net device structure - * @rx_ring: ring to be intialised + * @rx_ring: ring to be initialised * @rx_rsize: ring size * Description: this function initializes the DMA RX descriptor */ @@ -453,7 +453,7 @@ static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, /** * init_rx_ring - init the RX descriptor ring * @dev: net device structure - * @rx_ring: ring to be intialised + * @rx_ring: ring to be initialised * @rx_rsize: ring size * Description: this function initializes the DMA RX descriptor */ @@ -539,7 +539,7 @@ err_free_dma_rx: /** * free_tx_ring - free the TX descriptor ring * @dev: net device structure - * @tx_ring: ring to be intialised + * @tx_ring: ring to be initialised * @tx_rsize: ring size * Description: this function initializes the DMA TX descriptor */ diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index ff2a5d2023e1..6b444400a86c 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -934,7 +934,7 @@ static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var) } /*************************************************************** - * Various intialisation functions * + * Various initialisation functions * ***************************************************************/ static void get_initial_mode(struct intelfb_info *dinfo) diff --git a/fs/inode.c b/fs/inode.c index 32c8ee454ef0..e7d876931e06 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -119,7 +119,7 @@ static int no_open(struct inode *inode, struct file *file) } /** - * inode_init_always - perform inode structure intialisation + * inode_init_always - perform inode structure initialisation * @sb: superblock inode belongs to * @inode: inode to initialise * diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 4a98762ec8b4..cd0b077deb35 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3796,7 +3796,7 @@ xlog_recover_bud_pass2( * This routine is called when an inode create format structure is found in a * committed transaction in the log. It's purpose is to initialise the inodes * being allocated on disk. This requires us to get inode cluster buffers that - * match the range to be intialised, stamped with inode templates and written + * match the range to be initialised, stamped with inode templates and written * by delayed write so that subsequent modifications will hit the cached buffer * and only need writing out at the end of recovery. */ diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 279a52a8323d..412f576ba4d7 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -590,6 +590,9 @@ interruptted||interrupted interupted||interrupted interupt||interrupt intial||initial +intialisation||initialisation +intialised||initialised +intialise||initialise intialization||initialization intialized||initialized intialize||initialize -- cgit v1.2.3 From 1c91191e331a3df8ba24c99909b491cc26ae9f5e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 5 Apr 2017 09:24:44 +1000 Subject: treewide: spelling: correct diffrent[iate] and banlance typos Add these misspellings to scripts/spelling.txt too Link: http://lkml.kernel.org/r/962aace119675e5fe87be2a88ddac1a5486f8e60.1490931810.git.joe@perches.com Signed-off-by: Joe Perches Acked-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton --- drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_int.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 2 +- include/linux/mlx4/device.h | 2 +- scripts/spelling.txt | 3 +++ 7 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h index 354ec07eae87..23ae72468025 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h @@ -70,7 +70,7 @@ * (3) both long and short but short preferred and long only when necesarry * * These modes must be selected compile time via compile switches. -* Compile switch settings for the diffrent modes: +* Compile switch settings for the different modes: * (1) DRXDAPFASI_LONG_ADDR_ALLOWED=0, DRXDAPFASI_SHORT_ADDR_ALLOWED=1 * (2) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=0 * (3) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index cea6bdcde33f..8baf9d3eb4b1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1591,7 +1591,7 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, if (rc != 0) { __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); - /* Calling function should not diffrentiate between this case + /* Calling function should not differentiate between this case * and the case in which there is already a pending ramrod */ rc = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0ed24d6e6c65..40f057edeafc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -3058,7 +3058,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect * the number of VF SBs [especially for first VF on engine, as we can't - * diffrentiate between empty entries and its entries]. + * differentiate between empty entries and its entries]. * Since we don't really support more SBs than VFs today, prevent any * such configuration by sanitizing the number of SBs to equal the * number of VFs. diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 634e7a2433a9..b9bde431afc2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -950,7 +950,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (rc) goto err2; - /* First Dword used to diffrentiate between various sources */ + /* First Dword used to differentiate between various sources */ data = cdev->firmware->data + sizeof(u32); qed_dbg_pf_init(cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 92a3ee1715d9..ebe39ceea18b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -625,7 +625,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) * - If !ARI, VFs would start on next device. * so offset - (256 - pf_id) would provide the number. * Utilize the fact that (256 - pf_id) is achieved only by later - * to diffrentiate between the two. + * to differentiate between the two. */ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 1beb1ec2fbdf..eb1a51a6617b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -108,7 +108,7 @@ enum { MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) }; -/* Driver supports 3 diffrent device methods to manage traffic steering: +/* Driver supports 3 different device methods to manage traffic steering: * -device managed - High level API for ib and eth flow steering. FW is * managing flow steering tables. * - B0 steering mode - Common low level API for ib and (if supported) eth. diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 412f576ba4d7..e9cea4c5b6a1 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -174,6 +174,7 @@ bakup||backup baloon||balloon baloons||balloons bandwith||bandwidth +banlance||balance batery||battery beacuse||because becasue||because @@ -365,6 +366,8 @@ dictionnary||dictionary didnt||didn't diferent||different differrence||difference +diffrent||different +diffrentiate||differentiate difinition||definition diplay||display direectly||directly -- cgit v1.2.3 From ae50db0dc25c556e17e630dfe480ee39c812cbc4 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:44 +1000 Subject: treewide: move set_memory_* functions away from cacheflush.h Patch series "set_memory_* functions header refactor", v3. The set_memory_* APIs came out of a desire to have a better way to change memory attributes. Many of these attributes were linked to cache functionality so the prototypes were put in cacheflush.h. These days, the APIs have grown and have a much wider use than just cache APIs. To support this growth, split off set_memory_* and friends into a separate header file to avoid growing cacheflush.h for APIs that have nothing to do with caches. Link: http://lkml.kernel.org/r/1488920133-27229-2-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Russell King Acked-by: Mark Rutland Signed-off-by: Andrew Morton --- arch/arm/include/asm/cacheflush.h | 21 +-------- arch/arm/include/asm/set_memory.h | 32 ++++++++++++++ arch/arm64/include/asm/Kbuild | 1 + arch/arm64/include/asm/cacheflush.h | 6 +-- arch/s390/include/asm/cacheflush.h | 28 +----------- arch/s390/include/asm/set_memory.h | 31 +++++++++++++ arch/x86/include/asm/cacheflush.h | 86 +----------------------------------- arch/x86/include/asm/set_memory.h | 87 +++++++++++++++++++++++++++++++++++++ include/asm-generic/set_memory.h | 12 +++++ 9 files changed, 167 insertions(+), 137 deletions(-) create mode 100644 arch/arm/include/asm/set_memory.h create mode 100644 arch/s390/include/asm/set_memory.h create mode 100644 arch/x86/include/asm/set_memory.h create mode 100644 include/asm-generic/set_memory.h diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 02454fa15d2c..1cb9d118bb16 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -16,6 +16,7 @@ #include #include #include +#include #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) @@ -478,26 +479,6 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ "r9","r10","lr","memory" ) -#ifdef CONFIG_MMU -int set_memory_ro(unsigned long addr, int numpages); -int set_memory_rw(unsigned long addr, int numpages); -int set_memory_x(unsigned long addr, int numpages); -int set_memory_nx(unsigned long addr, int numpages); -#else -static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -#endif - -#ifdef CONFIG_STRICT_KERNEL_RWX -void set_kernel_text_rw(void); -void set_kernel_text_ro(void); -#else -static inline void set_kernel_text_rw(void) { } -static inline void set_kernel_text_ro(void) { } -#endif - void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h new file mode 100644 index 000000000000..5aa4315abe91 --- /dev/null +++ b/arch/arm/include/asm/set_memory.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASMARM_SET_MEMORY_H +#define _ASMARM_SET_MEMORY_H + +#ifdef CONFIG_MMU +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +#else +static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +#endif + +#ifdef CONFIG_STRICT_KERNEL_RWX +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); +#else +static inline void set_kernel_text_rw(void) { } +static inline void set_kernel_text_ro(void) { } +#endif + +#endif diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index a12f1afc95a3..a7a97a608033 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -29,6 +29,7 @@ generic-y += rwsem.h generic-y += segment.h generic-y += sembuf.h generic-y += serial.h +generic-y += set_memory.h generic-y += shmbuf.h generic-y += simd.h generic-y += sizes.h diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 5a2a6ee65f65..7db6962fc4b4 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -20,6 +20,7 @@ #define __ASM_CACHEFLUSH_H #include +#include /* * This flag is used to indicate that the page pointed to by a pte is clean @@ -150,9 +151,4 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } -int set_memory_ro(unsigned long addr, int numpages); -int set_memory_rw(unsigned long addr, int numpages); -int set_memory_x(unsigned long addr, int numpages); -int set_memory_nx(unsigned long addr, int numpages); - #endif diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 0499334f9473..afe296515f76 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -3,32 +3,6 @@ /* Caches aren't brain-dead on the s390. */ #include - -#define SET_MEMORY_RO 1UL -#define SET_MEMORY_RW 2UL -#define SET_MEMORY_NX 4UL -#define SET_MEMORY_X 8UL - -int __set_memory(unsigned long addr, int numpages, unsigned long flags); - -static inline int set_memory_ro(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RO); -} - -static inline int set_memory_rw(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RW); -} - -static inline int set_memory_nx(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_NX); -} - -static inline int set_memory_x(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_X); -} +#include #endif /* _S390_CACHEFLUSH_H */ diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h new file mode 100644 index 000000000000..46a4db44c47a --- /dev/null +++ b/arch/s390/include/asm/set_memory.h @@ -0,0 +1,31 @@ +#ifndef _ASMS390_SET_MEMORY_H +#define _ASMS390_SET_MEMORY_H + +#define SET_MEMORY_RO 1UL +#define SET_MEMORY_RW 2UL +#define SET_MEMORY_NX 4UL +#define SET_MEMORY_X 8UL + +int __set_memory(unsigned long addr, int numpages, unsigned long flags); + +static inline int set_memory_ro(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, SET_MEMORY_RO); +} + +static inline int set_memory_rw(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, SET_MEMORY_RW); +} + +static inline int set_memory_nx(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, SET_MEMORY_NX); +} + +static inline int set_memory_x(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, SET_MEMORY_X); +} + +#endif diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index e7e1942edff7..3d7db6f35aeb 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -4,94 +4,10 @@ /* Caches aren't brain-dead on the intel. */ #include #include - -/* - * The set_memory_* API can be used to change various attributes of a virtual - * address range. The attributes include: - * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack - * Executability : eXeutable, NoteXecutable - * Read/Write : ReadOnly, ReadWrite - * Presence : NotPresent - * - * Within a category, the attributes are mutually exclusive. - * - * The implementation of this API will take care of various aspects that - * are associated with changing such attributes, such as: - * - Flushing TLBs - * - Flushing CPU caches - * - Making sure aliases of the memory behind the mapping don't violate - * coherency rules as defined by the CPU in the system. - * - * What this API does not do: - * - Provide exclusion between various callers - including callers that - * operation on other mappings of the same physical page - * - Restore default attributes when a page is freed - * - Guarantee that mappings other than the requested one are - * in any state, other than that these do not violate rules for - * the CPU you have. Do not depend on any effects on other mappings, - * CPUs other than the one you have may have more relaxed rules. - * The caller is required to take care of these. - */ - -int _set_memory_uc(unsigned long addr, int numpages); -int _set_memory_wc(unsigned long addr, int numpages); -int _set_memory_wt(unsigned long addr, int numpages); -int _set_memory_wb(unsigned long addr, int numpages); -int set_memory_uc(unsigned long addr, int numpages); -int set_memory_wc(unsigned long addr, int numpages); -int set_memory_wt(unsigned long addr, int numpages); -int set_memory_wb(unsigned long addr, int numpages); -int set_memory_x(unsigned long addr, int numpages); -int set_memory_nx(unsigned long addr, int numpages); -int set_memory_ro(unsigned long addr, int numpages); -int set_memory_rw(unsigned long addr, int numpages); -int set_memory_np(unsigned long addr, int numpages); -int set_memory_4k(unsigned long addr, int numpages); - -int set_memory_array_uc(unsigned long *addr, int addrinarray); -int set_memory_array_wc(unsigned long *addr, int addrinarray); -int set_memory_array_wt(unsigned long *addr, int addrinarray); -int set_memory_array_wb(unsigned long *addr, int addrinarray); - -int set_pages_array_uc(struct page **pages, int addrinarray); -int set_pages_array_wc(struct page **pages, int addrinarray); -int set_pages_array_wt(struct page **pages, int addrinarray); -int set_pages_array_wb(struct page **pages, int addrinarray); - -/* - * For legacy compatibility with the old APIs, a few functions - * are provided that work on a "struct page". - * These functions operate ONLY on the 1:1 kernel mapping of the - * memory that the struct page represents, and internally just - * call the set_memory_* function. See the description of the - * set_memory_* function for more details on conventions. - * - * These APIs should be considered *deprecated* and are likely going to - * be removed in the future. - * The reason for this is the implicit operation on the 1:1 mapping only, - * making this not a generally useful API. - * - * Specifically, many users of the old APIs had a virtual address, - * called virt_to_page() or vmalloc_to_page() on that address to - * get a struct page* that the old API required. - * To convert these cases, use set_memory_*() on the original - * virtual address, do not use these functions. - */ - -int set_pages_uc(struct page *page, int numpages); -int set_pages_wb(struct page *page, int numpages); -int set_pages_x(struct page *page, int numpages); -int set_pages_nx(struct page *page, int numpages); -int set_pages_ro(struct page *page, int numpages); -int set_pages_rw(struct page *page, int numpages); - +#include void clflush_cache_range(void *addr, unsigned int size); #define mmio_flush_range(addr, size) clflush_cache_range(addr, size) -extern int kernel_set_to_readonly; -void set_kernel_text_rw(void); -void set_kernel_text_ro(void); - #endif /* _ASM_X86_CACHEFLUSH_H */ diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h new file mode 100644 index 000000000000..eaec6c364e42 --- /dev/null +++ b/arch/x86/include/asm/set_memory.h @@ -0,0 +1,87 @@ +#ifndef _ASM_X86_SET_MEMORY_H +#define _ASM_X86_SET_MEMORY_H + +#include +#include + +/* + * The set_memory_* API can be used to change various attributes of a virtual + * address range. The attributes include: + * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack + * Executability : eXeutable, NoteXecutable + * Read/Write : ReadOnly, ReadWrite + * Presence : NotPresent + * + * Within a category, the attributes are mutually exclusive. + * + * The implementation of this API will take care of various aspects that + * are associated with changing such attributes, such as: + * - Flushing TLBs + * - Flushing CPU caches + * - Making sure aliases of the memory behind the mapping don't violate + * coherency rules as defined by the CPU in the system. + * + * What this API does not do: + * - Provide exclusion between various callers - including callers that + * operation on other mappings of the same physical page + * - Restore default attributes when a page is freed + * - Guarantee that mappings other than the requested one are + * in any state, other than that these do not violate rules for + * the CPU you have. Do not depend on any effects on other mappings, + * CPUs other than the one you have may have more relaxed rules. + * The caller is required to take care of these. + */ + +int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wc(unsigned long addr, int numpages); +int _set_memory_wt(unsigned long addr, int numpages); +int _set_memory_wb(unsigned long addr, int numpages); +int set_memory_uc(unsigned long addr, int numpages); +int set_memory_wc(unsigned long addr, int numpages); +int set_memory_wt(unsigned long addr, int numpages); +int set_memory_wb(unsigned long addr, int numpages); +int set_memory_np(unsigned long addr, int numpages); +int set_memory_4k(unsigned long addr, int numpages); + +int set_memory_array_uc(unsigned long *addr, int addrinarray); +int set_memory_array_wc(unsigned long *addr, int addrinarray); +int set_memory_array_wt(unsigned long *addr, int addrinarray); +int set_memory_array_wb(unsigned long *addr, int addrinarray); + +int set_pages_array_uc(struct page **pages, int addrinarray); +int set_pages_array_wc(struct page **pages, int addrinarray); +int set_pages_array_wt(struct page **pages, int addrinarray); +int set_pages_array_wb(struct page **pages, int addrinarray); + +/* + * For legacy compatibility with the old APIs, a few functions + * are provided that work on a "struct page". + * These functions operate ONLY on the 1:1 kernel mapping of the + * memory that the struct page represents, and internally just + * call the set_memory_* function. See the description of the + * set_memory_* function for more details on conventions. + * + * These APIs should be considered *deprecated* and are likely going to + * be removed in the future. + * The reason for this is the implicit operation on the 1:1 mapping only, + * making this not a generally useful API. + * + * Specifically, many users of the old APIs had a virtual address, + * called virt_to_page() or vmalloc_to_page() on that address to + * get a struct page* that the old API required. + * To convert these cases, use set_memory_*() on the original + * virtual address, do not use these functions. + */ + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); +int set_pages_x(struct page *page, int numpages); +int set_pages_nx(struct page *page, int numpages); +int set_pages_ro(struct page *page, int numpages); +int set_pages_rw(struct page *page, int numpages); + +extern int kernel_set_to_readonly; +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); + +#endif /* _ASM_X86_SET_MEMORY_H */ diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h new file mode 100644 index 000000000000..83e81f8996b2 --- /dev/null +++ b/include/asm-generic/set_memory.h @@ -0,0 +1,12 @@ +#ifndef __ASM_SET_MEMORY_H +#define __ASM_SET_MEMORY_H + +/* + * Functions to change memory attributes. + */ +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +#endif -- cgit v1.2.3 From 151117a9c597f965c50945dc38cab31600b570a7 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:44 +1000 Subject: arm: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly Link: http://lkml.kernel.org/r/1488920133-27229-3-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Russell King Signed-off-by: Andrew Morton --- arch/arm/kernel/ftrace.c | 1 + arch/arm/kernel/machine_kexec.c | 1 + arch/arm/mm/pageattr.c | 1 + arch/arm/net/bpf_jit_32.c | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 3f1759411d51..dea3e965fe88 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_THUMB2_KERNEL #define NOP 0xf85deb04 /* pop.w {lr} */ diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index b18c1ea56bed..15495887ca14 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -18,6 +18,7 @@ #include #include #include +#include extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 3b69f2642513..1403cb4a0c3d 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -15,6 +15,7 @@ #include #include +#include struct page_change_data { pgprot_t set_mask; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 93d0b6d0b63e..d5b9fa19b684 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -18,6 +18,7 @@ #include #include +#include #include #include -- cgit v1.2.3 From caf264b41cf209ec8c9cfd6a3489ab5e67fc0b51 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:44 +1000 Subject: arm64: use set_memory.h header The set_memory_* functions have moved to set_memory.h. Use that header explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-4-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Catalin Marinas Acked-by: Mark Rutland Signed-off-by: Andrew Morton --- arch/arm64/mm/pageattr.c | 1 + arch/arm64/net/bpf_jit_comp.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 8def55e7249b..26839f213fac 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -17,6 +17,7 @@ #include #include +#include #include struct page_change_data { diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a785554916c0..efc1bf237cc9 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "bpf_jit.h" -- cgit v1.2.3 From 8f82ee07e14b814e3f0f05a97563ca7d20a30c6b Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:45 +1000 Subject: s390: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly Link: http://lkml.kernel.org/r/1488920133-27229-5-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Heiko Carstens Signed-off-by: Andrew Morton --- arch/s390/kernel/ftrace.c | 1 + arch/s390/kernel/kprobes.c | 2 +- arch/s390/kernel/machine_kexec.c | 1 + arch/s390/mm/init.c | 1 + arch/s390/mm/pageattr.c | 1 + arch/s390/mm/vmem.c | 1 + arch/s390/net/bpf_jit_comp.c | 1 + 7 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 60a8a4e207ed..27477f34cc0a 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "entry.h" /* diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 76f9eda1d7c0..3d6a99746454 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index db5658daf994..49a6bd45957b 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ee5066718b21..ee6a1d3d4983 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -39,6 +39,7 @@ #include #include #include +#include pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index fc5dc33bb141..143cc9af6fd7 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -8,6 +8,7 @@ #include #include #include +#include static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) { diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 60d38993f232..c33c94b4be60 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -17,6 +17,7 @@ #include #include #include +#include static DEFINE_MUTEX(vmem_mutex); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 4ecf6d687509..6e97a2e3fd8d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "bpf_jit.h" int bpf_jit_enable __read_mostly; -- cgit v1.2.3 From 42cac47673d934e461371ce410bb464cb1ec2bf4 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:45 +1000 Subject: x86: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-6-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- arch/x86/kernel/amd_gart_64.c | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/bugs.c | 2 +- arch/x86/kernel/ftrace.c | 2 +- arch/x86/kernel/machine_kexec_32.c | 2 +- arch/x86/kernel/machine_kexec_64.c | 1 + arch/x86/mm/init.c | 2 +- arch/x86/mm/init_32.c | 2 +- arch/x86/mm/init_64.c | 2 +- arch/x86/mm/ioremap.c | 2 +- arch/x86/mm/pageattr.c | 1 + arch/x86/net/bpf_jit_comp.c | 1 + arch/x86/pci/pcbios.c | 2 +- arch/x86/platform/efi/efi.c | 2 +- arch/x86/realmode/init.c | 2 +- 15 files changed, 15 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index df083efe6ee0..815dd63f49d0 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c36140d788fe..ee8f11800295 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -16,7 +16,7 @@ #ifdef CONFIG_X86_64 # include -# include +# include #endif #include "cpu.h" diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index a44ef52184df..0af86d9242da 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include void __init check_bugs(void) { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cbd73eb42170..d87f12b4f106 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -24,7 +24,7 @@ #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 5f43cec296c5..8c53c5d7a1bc 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include static void set_idt(void *newidt, __u16 limit) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index b41ee7a91fe9..a0bacd25c9cd 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_KEXEC_FILE static struct kexec_file_ops *kexec_file_loaders[] = { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 22af912d66d2..adbfb095bade 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -5,7 +5,7 @@ #include #include /* for max_low_pfn */ -#include +#include #include #include #include diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 601b8e04e5c6..a24e04c7ed61 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -48,7 +48,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7bdda6f1d135..29a805e97a7e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index a5e1cda85974..1924c4ab8be5 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index b5949017dead..4f6e9059082f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * The current flushing context - we pass it instead of 5 arguments: diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 32322ce9b405..a63e6af4afb5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include int bpf_jit_enable __read_mostly; diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 1d97cea3b3a4..4d510a601244 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include /* BIOS32 signature: "_32_" */ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 565dff3c9a12..6867d884cebb 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -48,7 +48,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 5db706f14111..a163a90af4aa 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include -- cgit v1.2.3 From ac6332daf350be1d8775612eb2824b52ab0464aa Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:45 +1000 Subject: agp: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-7-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Signed-off-by: Andrew Morton --- drivers/char/agp/amd-k7-agp.c | 1 + drivers/char/agp/ati-agp.c | 1 + drivers/char/agp/generic.c | 4 +++- drivers/char/agp/intel-gtt.c | 1 + drivers/char/agp/sworks-agp.c | 1 + 5 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 3661a51e93e2..5fbd333e4c6d 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "agp.h" #define AMD_MMBASE_BAR 1 diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 75a9786a77e6..0b5ec7af2414 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "agp.h" #define ATI_GART_MMBASE_BAR 1 diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index bdf418cac8ef..658664a5a5aa 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -39,7 +39,9 @@ #include #include #include -#include +#ifdef CONFIG_X86 +#include +#endif #include #include "agp.h" diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 7fcc2a9d1d5a..9b6b6023193b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,6 +25,7 @@ #include "agp.h" #include "intel-agp.h" #include +#include /* * If we have Intel graphics, we're not going to have anything other than diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 9b163b49d976..03be4ac79b0d 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "agp.h" #define SVWRKS_COMMAND 0x04 -- cgit v1.2.3 From baa0dc84d41a6d83fcd089411c8fec9fab0b3091 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:46 +1000 Subject: drm: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-8-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Signed-off-by: Andrew Morton --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 3 +++ drivers/gpu/drm/gma500/gtt.c | 1 + drivers/gpu/drm/gma500/psb_drv.c | 1 + drivers/gpu/drm/radeon/radeon_gart.c | 3 +++ drivers/gpu/drm/ttm/ttm_page_alloc.c | 3 +++ drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 3 +++ drivers/gpu/drm/ttm/ttm_tt.c | 3 +++ 7 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 6d691abe889c..2ee327d69775 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -27,6 +27,9 @@ */ #include #include +#ifdef CONFIG_X86 +#include +#endif #include "amdgpu.h" /* diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 3f4f424196b2..3949b0990916 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -21,6 +21,7 @@ #include #include +#include #include "psb_drv.h" #include "blitter.h" diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 5ee93ff55608..1f9b35afefee 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -35,6 +35,7 @@ #include #include #include +#include static struct drm_driver driver; static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index c4777c8d0312..0b3ec35515f3 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -27,6 +27,9 @@ */ #include #include +#ifdef CONFIG_X86 +#include +#endif #include "radeon.h" /* diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index a37de5db5731..eeddc1e48409 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -51,6 +51,9 @@ #if IS_ENABLED(CONFIG_AGP) #include #endif +#ifdef CONFIG_X86 +#include +#endif #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 16 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index cec4b4baa179..90ddbdca93bd 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -53,6 +53,9 @@ #if IS_ENABLED(CONFIG_AGP) #include #endif +#ifdef CONFIG_X86 +#include +#endif #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 4 diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index aee3c00f836e..5260179d788a 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -44,6 +44,9 @@ #include #include #include +#ifdef CONFIG_X86 +#include +#endif /** * Allocates storage for pointers to the pages that back the ttm. -- cgit v1.2.3 From cf438ac0e87e4caf320f16863df60fb2bd5ad465 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Apr 2017 09:24:46 +1000 Subject: drm-use-set_memoryh-header-fix track drivers/gpu/drm/i915/i915_gem_gtt.c linux-next changes Cc: Laura Abbott Signed-off-by: Andrew Morton --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8bab4aea63e6..2aa6b97fd22f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -31,6 +31,8 @@ #include #include +#include + #include #include -- cgit v1.2.3 From 4925d54ddbd66051afec3e3f29d05751b16dad61 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:46 +1000 Subject: drivers/hwtracing/intel_th/msu.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-9-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Alexander Shishkin Signed-off-by: Andrew Morton --- drivers/hwtracing/intel_th/msu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index e88afe1a435c..dbbe31df74df 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -27,7 +27,9 @@ #include #include -#include +#ifdef CONFIG_X86 +#include +#endif #include "intel_th.h" #include "msu.h" -- cgit v1.2.3 From 4fd18fe95f2dce1398ecd0d989bd759beff81dda Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:47 +1000 Subject: drivers/watchdog/hpwdt.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-10-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Guenter Roeck Signed-off-by: Andrew Morton --- drivers/watchdog/hpwdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 70c7194e2810..67fbe35ce7cf 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #endif /* CONFIG_HPWDT_NMI_DECODING */ #include #include -- cgit v1.2.3 From add6b5c31923b029763f4872952573a186d4aeee Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:47 +1000 Subject: include/linux/filter.h: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-11-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Daniel Borkmann Signed-off-by: Andrew Morton --- include/linux/filter.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 511fe910bf1d..68c55488f45d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -19,7 +19,9 @@ #include -#include +#ifdef CONFIG_ARCH_HAS_SET_MEMORY +#include +#endif #include #include -- cgit v1.2.3 From b538e245490dc66765b74db9d8ba85791ffe1cdf Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:47 +1000 Subject: kernel/module.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-12-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Jessica Yu Signed-off-by: Andrew Morton --- kernel/module.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/module.c b/kernel/module.c index c8d0047d8c7d..f953df992a11 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -49,6 +49,9 @@ #include #include #include +#ifdef CONFIG_STRICT_MODULE_RWX +#include +#endif #include #include #include -- cgit v1.2.3 From fbe65e63c1ef2da73b175cab606d116153266556 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:48 +1000 Subject: kernel/power/snapshot.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-13-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Signed-off-by: Andrew Morton --- kernel/power/snapshot.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d79a38de425a..3b1e0f3ad07f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -36,6 +36,9 @@ #include #include #include +#ifdef CONFIG_STRICT_KERNEL_RWX +#include +#endif #include "power.h" -- cgit v1.2.3 From 689f8b7c0ed0ccf49b057df401ae81bf5d8ac8fd Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:48 +1000 Subject: alsa: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-14-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Takashi Iwai Signed-off-by: Andrew Morton --- sound/pci/hda/hda_intel.c | 2 +- sound/pci/intel8x0.c | 4 +++- sound/x86/intel_hdmi_audio.c | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 64db6698214c..fc95149c0bae 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -53,7 +53,7 @@ #ifdef CONFIG_X86 /* for snoop control */ #include -#include +#include #include #endif #include diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 9720a30dbfff..6d17b171c17b 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c @@ -40,7 +40,9 @@ #include /* for 440MX workaround */ #include -#include +#ifdef CONFIG_X86 +#include +#endif MODULE_AUTHOR("Jaroslav Kysela "); MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455"); diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index c505b019e09c..664b7fe206d6 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From c20fa02eddbca3f81c2ae6edbf34cec726c04b59 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:48 +1000 Subject: drivers/misc/sram-exec.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-15-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Signed-off-by: Andrew Morton --- drivers/misc/sram-exec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c index ac522417c462..3d528a13b8fc 100644 --- a/drivers/misc/sram-exec.c +++ b/drivers/misc/sram-exec.c @@ -16,9 +16,10 @@ #include #include +#include #include -#include +#include #include "sram.h" -- cgit v1.2.3 From 2f2b092c0f6aa51545327486450a0957b92ff157 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:48 +1000 Subject: drivers/video/fbdev/vermilion/vermilion.c: use set_memory.h header set_memory_* functions have moved to set_memory.h. Switch to this explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-16-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Bartlomiej Zolnierkiewicz Signed-off-by: Andrew Morton --- drivers/video/fbdev/vermilion/vermilion.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 1c1e95a0b8fa..ce4c4729a5e8 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3 From 95d801db4c5d69aa227f3a81e3b25ca15caf0ae0 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Apr 2017 09:24:49 +1000 Subject: drivers/staging/media/atomisp/pci/atomisp2: use set_memory.h Cc: Laura Abbott Signed-off-by: Andrew Morton --- drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c | 3 ++- drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c | 2 +- drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c | 3 ++- drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c | 5 ++++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c index 6c4ab5aa2a8d..a51a27bc6b5b 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c @@ -36,12 +36,13 @@ #include #include #include -#include #include #include #include #include +#include + #include "atomisp_internal.h" #include "hmm/hmm_common.h" #include "hmm/hmm_pool.h" diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c index 6e540cc2e451..9b35980db715 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c @@ -27,7 +27,7 @@ #include #include -#include "asm/cacheflush.h" +#include #include "atomisp_internal.h" diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c index 590ff7bc6c5f..b51b61995dd9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c @@ -27,7 +27,8 @@ #include #include -#include "asm/cacheflush.h" +#include + #include "atomisp_internal.h" #include "hmm/hmm_pool.h" diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c index 2009e3a11b86..706bd43e8b1b 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c @@ -30,13 +30,16 @@ #include /* for kmalloc */ #include #include -#include #include #include #include #include #include +#ifdef CONFIG_X86 +#include +#endif + #include "atomisp_internal.h" #include "mmu/isp_mmu.h" -- cgit v1.2.3 From be68d4b397aa5b42b30808088c480d17d5e9c450 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 5 Apr 2017 09:24:49 +1000 Subject: treewide: decouple cacheflush.h and set_memory.h Now that all call sites, completely decouple cacheflush.h and set_memory.h Link: http://lkml.kernel.org/r/1488920133-27229-17-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott Acked-by: Catalin Marinas Acked-by: Mark Rutland Signed-off-by: Andrew Morton --- arch/arm/include/asm/cacheflush.h | 1 - arch/arm64/include/asm/cacheflush.h | 1 - arch/s390/include/asm/Kbuild | 1 + arch/s390/include/asm/cacheflush.h | 8 -------- arch/x86/include/asm/cacheflush.h | 1 - 5 files changed, 1 insertion(+), 11 deletions(-) delete mode 100644 arch/s390/include/asm/cacheflush.h diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 1cb9d118bb16..d69bebf697e7 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -16,7 +16,6 @@ #include #include #include -#include #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 7db6962fc4b4..5812469194b1 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -20,7 +20,6 @@ #define __ASM_CACHEFLUSH_H #include -#include /* * This flag is used to indicate that the page pointed to by a pte is clean diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 7e3481eb2174..45092b12f54f 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += asm-offsets.h +generic-y += cacheflush.h generic-y += clkdev.h generic-y += dma-contiguous.h generic-y += div64.h diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h deleted file mode 100644 index afe296515f76..000000000000 --- a/arch/s390/include/asm/cacheflush.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _S390_CACHEFLUSH_H -#define _S390_CACHEFLUSH_H - -/* Caches aren't brain-dead on the s390. */ -#include -#include - -#endif /* _S390_CACHEFLUSH_H */ diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 3d7db6f35aeb..8b4140f6724f 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -4,7 +4,6 @@ /* Caches aren't brain-dead on the intel. */ #include #include -#include void clflush_cache_range(void *addr, unsigned int size); -- cgit v1.2.3 From 0f6e361db7ca34dc2e12b6c765190deb7a741b40 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 5 Apr 2017 09:24:49 +1000 Subject: kref: remove WARN_ON for NULL release functions The kref functions check for NULL release functions. This WARN_ON seems rather pointless. We will eventually release and then just crash nicely. It is also somewhat expensive because these functions are inlined in a lot of places. Removing the WARN_ONs saves around 2.3k in this kernel (likely more in others with more drivers) text data bss dec hex filename 9083992 5367600 11116544 25568136 1862388 vmlinux-before-load-avg 9070166 5367600 11116544 25554310 185ed86 vmlinux-load-avg Link: http://lkml.kernel.org/r/20170315021431.13107-5-andi@firstfloor.org Signed-off-by: Andi Kleen Acked-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton --- include/linux/kref.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/include/linux/kref.h b/include/linux/kref.h index f4156f88f557..29220724bf1c 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -66,8 +66,6 @@ static inline void kref_get(struct kref *kref) */ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { - WARN_ON(release == NULL); - if (refcount_dec_and_test(&kref->refcount)) { release(kref); return 1; @@ -79,8 +77,6 @@ static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *lock) { - WARN_ON(release == NULL); - if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { release(kref); return 1; @@ -92,8 +88,6 @@ static inline int kref_put_lock(struct kref *kref, void (*release)(struct kref *kref), spinlock_t *lock) { - WARN_ON(release == NULL); - if (refcount_dec_and_lock(&kref->refcount, lock)) { release(kref); return 1; -- cgit v1.2.3 From ddbb3ad7401d7df4463fba1b85f40c589ff13ccf Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 5 Apr 2017 09:24:50 +1000 Subject: drivers/scsi/megaraid: remove expensive inline from megasas_return_cmd Remove an inline from a fairly big function that is used often. It's unlikely that calling or not calling it makes a lot of difference. Saves around 8k text in my kernel. text data bss dec hex filename 9047801 5367568 11116544 25531913 1859609 vmlinux-before-megasas 9039417 5367568 11116544 25523529 1857549 vmlinux-megasas Link: http://lkml.kernel.org/r/20170315021431.13107-7-andi@firstfloor.org Signed-off-by: Andi Kleen Cc: Kashyap Desai Cc: Sumit Saxena Cc: James Bottomley Signed-off-by: Andrew Morton --- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0016f12cc563..316c3df0c3fd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -244,7 +244,7 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ -inline void +void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; -- cgit v1.2.3 From 9ab371f3036749683518a753e995cb26e810208f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 5 Apr 2017 09:24:50 +1000 Subject: include/linux/uaccess.h: remove expensive WARN_ON in pagefault_disabled_dec pagefault_disabled_dec is frequently used inline, and it has a WARN_ON for underflow that expands to about 6.5k of extra code. The warning doesn't seem to be that useful and worth so much code so remove it. If it was needed could make it depending on some debug kernel option. Saves ~6.5k in my kernel text data bss dec hex filename 9039417 5367568 11116544 25523529 1857549 vmlinux-before-pf 9032805 5367568 11116544 25516917 1855b75 vmlinux-pf Link: http://lkml.kernel.org/r/20170315021431.13107-8-andi@firstfloor.org Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton --- include/linux/uaccess.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index f30c187ed785..b691aad918fb 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -12,7 +12,6 @@ static __always_inline void pagefault_disabled_inc(void) static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; - WARN_ON(current->pagefault_disabled < 0); } /* -- cgit v1.2.3 From 7daf6d8f9da8f87323b08cc4bca65d95d55e5a6e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 5 Apr 2017 09:24:50 +1000 Subject: tracing: move trace_handle_return() out of line Currently trace_handle_return() looks like this: static inline enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } Where trace_seq_overflowed(s) is: static inline bool trace_seq_has_overflowed(struct trace_seq *s) { return s->full || seq_buf_has_overflowed(&s->seq); } And seq_buf_has_overflowed(&s->seq) is: static inline bool seq_buf_has_overflowed(struct seq_buf *s) { return s->len > s->size; } Making trace_handle_return() into: return (s->full || (s->seq->len > s->seq->size)) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; One would think this is not an issue to keep as an inline. But because this is used in the TRACE_EVENT() macro, it is extended for every tracepoint in the system. Taking a look at a single tracepoint x86_irq_vector (was the first one I randomly chosen). As trace_handle_return is used in the TRACE_EVENT() macro of trace_raw_output_##call() we disassemble trace_raw_output_x86_irq_vector and do a diff. I removed identical lines that were different just due to different addresses. The original has 22 bytes of text more than the out of line version. As this is for every TRACE_EVENT() defined in the system, this can become quite large. text data bss dec hex filename 8690305 5450490 1298432 15439227 eb957b vmlinux-orig 8681725 5450490 1298432 15430647 eb73f7 vmlinux-handle This change has a total of 8580 bytes in savings. $ objdump -dr /tmp/vmlinux-orig | grep '^[0-9a-f]* Reported-by: Andi Kleen Signed-off-by: Andrew Morton --- include/linux/trace_events.h | 11 +---------- kernel/trace/trace.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0af63c4381b9..a556805eff8a 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -138,16 +138,7 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; -/* - * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq - * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function - * simplifies those functions and keeps them in sync. - */ -static inline enum print_line_t trace_handle_return(struct trace_seq *s) -{ - return trace_seq_has_overflowed(s) ? - TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; -} +enum print_line_t trace_handle_return(struct trace_seq *s); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0ed834d6beb0..128356d6dd49 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1927,6 +1927,18 @@ void tracing_record_cmdline(struct task_struct *tsk) __this_cpu_write(trace_cmdline_save, false); } +/* + * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq + * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function + * simplifies those functions and keeps them in sync. + */ +enum print_line_t trace_handle_return(struct trace_seq *s) +{ + return trace_seq_has_overflowed(s) ? + TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; +} +EXPORT_SYMBOL_GPL(trace_handle_return); + void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) -- cgit v1.2.3 From 9aa1d9b69987eb1fec89b70a79701eaff2b160b4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 5 Apr 2017 09:24:51 +1000 Subject: fs: semove set but not checked AOP_FLAG_UNINTERRUPTIBLE flag afddba49d18f346e ("fs: introduce write_begin, write_end, and perform_write aops") introduced AOP_FLAG_UNINTERRUPTIBLE flag which was checked in pagecache_write_begin(), but that check was removed by 4e02ed4b4a2fae34 ("fs: remove prepare_write/commit_write"). Between these two commits, d9414774dc0c7b39 ("cifs: Convert cifs to new aops.") added a check in cifs_write_begin(), but that check was soon removed by a98ee8c1c707fe32 ("[CIFS] fix regression in cifs_write_begin/cifs_write_end"). Therefore, AOP_FLAG_UNINTERRUPTIBLE flag is checked nowhere. Let's remove this flag. This patch has no functionality changes. Link: http://lkml.kernel.org/r/1489294781-53494-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Reviewed-by: Jeff Layton Reviewed-by: Christoph Hellwig Cc: Nick Piggin Cc: Al Viro Signed-off-by: Andrew Morton --- Documentation/filesystems/vfs.txt | 3 +-- fs/buffer.c | 13 +++++-------- fs/exofs/dir.c | 3 +-- fs/hfs/extent.c | 4 ++-- fs/hfsplus/extents.c | 5 ++--- fs/iomap.c | 13 +++---------- fs/namei.c | 2 +- include/linux/fs.h | 5 ++--- mm/filemap.c | 6 ------ 9 files changed, 17 insertions(+), 37 deletions(-) diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 94dd27ef4a76..f42b90687d40 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -694,8 +694,7 @@ struct address_space_operations { write_end: After a successful write_begin, and data copy, write_end must be called. len is the original len passed to write_begin, and copied - is the amount that was able to be copied (copied == len is always true - if write_begin was called with the AOP_FLAG_UNINTERRUPTIBLE flag). + is the amount that was able to be copied. The filesystem must take care of unlocking the page and releasing it refcount, and updating i_size. diff --git a/fs/buffer.c b/fs/buffer.c index 2ba905716d34..584ea92d81b4 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2379,8 +2379,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) goto out; err = pagecache_write_begin(NULL, mapping, size, 0, - AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, - &page, &fsdata); + AOP_FLAG_CONT_EXPAND, &page, &fsdata); if (err) goto out; @@ -2415,9 +2414,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, } len = PAGE_SIZE - zerofrom; - err = pagecache_write_begin(file, mapping, curpos, len, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); + err = pagecache_write_begin(file, mapping, curpos, len, 0, + &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); @@ -2449,9 +2447,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, } len = offset - zerofrom; - err = pagecache_write_begin(file, mapping, curpos, len, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); + err = pagecache_write_begin(file, mapping, curpos, len, 0, + &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 42f9a0a0c4ca..8eeb694332fe 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -405,8 +405,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de, int err; lock_page(page); - err = exofs_write_begin(NULL, page->mapping, pos, len, - AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); + err = exofs_write_begin(NULL, page->mapping, pos, len, 0, &page, NULL); if (err) EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n", err); diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c index e33a0d36a93e..5d0182654580 100644 --- a/fs/hfs/extent.c +++ b/fs/hfs/extent.c @@ -485,8 +485,8 @@ void hfs_file_truncate(struct inode *inode) /* XXX: Can use generic_cont_expand? */ size = inode->i_size - 1; - res = pagecache_write_begin(NULL, mapping, size+1, 0, - AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); + res = pagecache_write_begin(NULL, mapping, size+1, 0, 0, + &page, &fsdata); if (!res) { res = pagecache_write_end(NULL, mapping, size+1, 0, 0, page, fsdata); diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index feca524ce2a5..a3eb640b4f8f 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -545,9 +545,8 @@ void hfsplus_file_truncate(struct inode *inode) void *fsdata; loff_t size = inode->i_size; - res = pagecache_write_begin(NULL, mapping, size, 0, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); + res = pagecache_write_begin(NULL, mapping, size, 0, 0, + &page, &fsdata); if (res) return; res = pagecache_write_end(NULL, mapping, size, diff --git a/fs/iomap.c b/fs/iomap.c index fb17dba3dac5..0b457ffb85ba 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -158,12 +158,6 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ssize_t written = 0; unsigned int flags = AOP_FLAG_NOFS; - /* - * Copies from kernel address space cannot fail (NFSD is a big user). - */ - if (!iter_is_iovec(i)) - flags |= AOP_FLAG_UNINTERRUPTIBLE; - do { struct page *page; unsigned long offset; /* Offset into pagecache page */ @@ -291,8 +285,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, return PTR_ERR(rpage); status = iomap_write_begin(inode, pos, bytes, - AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE, - &page, iomap); + AOP_FLAG_NOFS, &page, iomap); put_page(rpage); if (unlikely(status)) return status; @@ -343,8 +336,8 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, struct page *page; int status; - status = iomap_write_begin(inode, pos, bytes, - AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap); + status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, + iomap); if (status) return status; diff --git a/fs/namei.c b/fs/namei.c index 482414aa558b..886169c9c616 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -4763,7 +4763,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) struct page *page; void *fsdata; int err; - unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; + unsigned int flags = 0; if (nofs) flags |= AOP_FLAG_NOFS; diff --git a/include/linux/fs.h b/include/linux/fs.h index 7251f7bb45e8..5ad421d9a354 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -250,9 +250,8 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ -#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ -#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct +#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ diff --git a/mm/filemap.c b/mm/filemap.c index d04e475bcf9e..e9e5f7b6a6c3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2796,12 +2796,6 @@ ssize_t generic_perform_write(struct file *file, ssize_t written = 0; unsigned int flags = 0; - /* - * Copies from kernel address space cannot fail (NFSD is a big user). - */ - if (!iter_is_iovec(i)) - flags |= AOP_FLAG_UNINTERRUPTIBLE; - do { struct page *page; unsigned long offset; /* Offset into pagecache page */ -- cgit v1.2.3