From 4fa81ed27781a12f6303b9263056635ae74e3e21 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:32 +0100 Subject: [S390] __div64_31 broken for CONFIG_MARCH_G5 The implementation of __div64_31 for G5 machines is broken. The comments in __div64_31 are correct, only the code does not do what the comments say. The part "If the remainder has overflown subtract base and increase the quotient" is only partially realized, the base is subtracted correctly but the quotient is only increased if the dividend had the last bit set. Using the correct instruction fixes the problem. Cc: stable@kernel.org Reported-by: Frans Pop Tested-by: Frans Pop Signed-off-by: Martin Schwidefsky --- arch/s390/lib/div64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c index a5f8300bf3ee..d9e62c0b576a 100644 --- a/arch/s390/lib/div64.c +++ b/arch/s390/lib/div64.c @@ -61,7 +61,7 @@ static uint32_t __div64_31(uint64_t *n, uint32_t base) " clr %0,%3\n" " jl 0f\n" " slr %0,%3\n" - " alr %1,%2\n" + " ahi %1,1\n" "0:\n" : "+d" (reg2), "+d" (reg3), "=d" (tmp) : "d" (base), "2" (1UL) : "cc" ); -- cgit v1.2.3 From f55d63854e426e95d7858c2662c2353262a5af70 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 18 Mar 2009 13:27:33 +0100 Subject: [S390] topology: define SD_MC_INIT to fix performance regression The default values for SD_MC_INIT cause an additional cpu usage of up to 40% on some network benchmarks compared to the plain SD_CPU_INIT values. So just define SD_MC_INIT to SD_CPU_INIT. More tuning needs to be done. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/topology.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c93eb50e1d09..c979c3b56ab0 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -30,6 +30,8 @@ static inline void s390_init_cpu_topology(void) }; #endif +#define SD_MC_INIT SD_CPU_INIT + #include #endif /* _ASM_S390_TOPOLOGY_H */ -- cgit v1.2.3 From cf087343805ebfea2b1234b06fd5f12273e865b1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 18 Mar 2009 13:27:34 +0100 Subject: [S390] ftrace/mcount: fix kernel stack backchain With packed stack the backchain is at a different location. Just use __SF_BACKCHAIN as an offset to store the backchain. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/mcount.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 397d131a345f..80641224a095 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -5,6 +5,8 @@ * */ +#include + #ifndef CONFIG_64BIT .globl _mcount _mcount: @@ -14,7 +16,7 @@ _mcount: ahi %r15,-96 l %r3,100(%r15) la %r2,0(%r14) - st %r1,0(%r15) + st %r1,__SF_BACKCHAIN(%r15) la %r3,0(%r3) bras %r14,0f .long ftrace_trace_function @@ -38,7 +40,7 @@ _mcount: stg %r14,112(%r15) lgr %r1,%r15 aghi %r15,-160 - stg %r1,0(%r15) + stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) larl %r14,ftrace_trace_function -- cgit v1.2.3 From 2887fc5aa60803b9d6d38c79248805f08d8b0e28 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Wed, 18 Mar 2009 13:27:35 +0100 Subject: [S390] Dont check for pfn_valid() in uaccess_pt.c pfn_valid() actually checks for a valid struct page and not for a valid pfn. Using xip mappings w/o struct pages, this will result in -EFAULT returned by the (page table walk) user copy functions, even though there is valid memory. Those user copy functions don't need a struct page, so this patch just removes the pfn_valid() check. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/lib/uaccess_pt.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index d66215b0fde9..b0b84c35b0ad 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -119,8 +119,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out; offset = uaddr & (PAGE_SIZE - 1); size = min(n - done, PAGE_SIZE - offset); @@ -135,7 +133,6 @@ retry: done += size; uaddr += size; } while (done < n); -out: spin_unlock(&mm->page_table_lock); return n - done; fault: @@ -163,9 +160,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out; - ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); out: return ret; @@ -244,11 +238,6 @@ retry: goto fault; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) { - done = -1; - goto out; - } - offset = uaddr & (PAGE_SIZE-1); addr = (char *)(pfn << PAGE_SHIFT) + offset; len = min(count - done, PAGE_SIZE - offset); @@ -256,7 +245,6 @@ retry: done += len_str; uaddr += len_str; } while ((len_str == len) && (done < count)); -out: spin_unlock(&mm->page_table_lock); return done + 1; fault: @@ -325,12 +313,7 @@ retry: } pfn_from = pte_pfn(*pte_from); - if (!pfn_valid(pfn_from)) - goto out; pfn_to = pte_pfn(*pte_to); - if (!pfn_valid(pfn_to)) - goto out; - offset_from = uaddr_from & (PAGE_SIZE-1); offset_to = uaddr_from & (PAGE_SIZE-1); offset_max = max(offset_from, offset_to); @@ -342,7 +325,6 @@ retry: uaddr_from += size; uaddr_to += size; } while (done < n); -out: spin_unlock(&mm->page_table_lock); return n - done; fault: -- cgit v1.2.3 From f481bfafd36e621d6cbc62d4b25f74811410aef7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:36 +0100 Subject: [S390] make page table walking more robust Make page table walking on s390 more robust. The current code requires that the pgd/pud/pmd/pte loop is only done for address ranges that are below the end address of the last vma of the address space. But this is not always true, e.g. the generic page table walker does not guarantee this. Change TASK_SIZE/TASK_SIZE_OF to reflect the current size of the address space. This makes the generic page table walker happy but it breaks the upgrade of a 3 level page table to a 4 level page table. To make the upgrade work again another fix is required. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/processor.h | 5 ++--- arch/s390/mm/mmap.c | 4 ++-- arch/s390/mm/pgtable.c | 2 ++ 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 066b99502e09..db4523fe38ac 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -61,7 +61,7 @@ extern void print_cpu_info(struct cpuinfo_S390 *); extern int get_cpu_capability(unsigned int *); /* - * User space process size: 2GB for 31 bit, 4TB for 64 bit. + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. */ #ifndef __s390x__ @@ -70,8 +70,7 @@ extern int get_cpu_capability(unsigned int *); #else /* __s390x__ */ -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \ - (1UL << 31) : (1UL << 53)) +#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (1UL << 30) : (1UL << 41)) #define TASK_SIZE TASK_SIZE_OF(current) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 5932a824547a..346dd0c5cbde 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -35,7 +35,7 @@ * Leave an at least ~128 MB hole. */ #define MIN_GAP (128*1024*1024) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (STACK_TOP/6*5) static inline unsigned long mmap_base(void) { @@ -46,7 +46,7 @@ static inline unsigned long mmap_base(void) else if (gap > MAX_GAP) gap = MAX_GAP; - return TASK_SIZE - (gap & PAGE_MASK); + return STACK_TOP - (gap & PAGE_MASK); } static inline int mmap_is_legacy(void) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0767827540b1..6b6ddc4ea02b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -117,6 +117,7 @@ repeat: crst_table_init(table, entry); pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); mm->pgd = (pgd_t *) table; + mm->task_size = mm->context.asce_limit; table = NULL; } spin_unlock(&mm->page_table_lock); @@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) BUG(); } mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); + mm->task_size = mm->context.asce_limit; crst_table_free(mm, (unsigned long *) pgd); } update_mm(mm, current); -- cgit v1.2.3 From 0fb1d9bcbcf701a45835aa150c57ca54ea685bfa Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 18 Mar 2009 13:27:37 +0100 Subject: [S390] make page table upgrade work again After TASK_SIZE now gives the current size of the address space the upgrade of a 64 bit process from 3 to 4 levels of page table needs to use the arch_mmap_check hook to catch large mmap lengths. The get_unmapped_area* functions need to check for -ENOMEM from the arch_get_unmapped_area*, upgrade the page table and retry. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mman.h | 5 +++++ arch/s390/mm/mmap.c | 44 ++++++++++++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 7839767d837e..da01432e8f44 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -22,4 +22,9 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) +int s390_mmap_check(unsigned long addr, unsigned long len); +#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) +#endif + #endif /* __S390_MMAN_H__ */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 346dd0c5cbde..e008d236cc15 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); #else +int s390_mmap_check(unsigned long addr, unsigned long len) +{ + if (!test_thread_flag(TIF_31BIT) && + len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) + return crst_table_upgrade(current->mm, 1UL << 53); + return 0; +} + static unsigned long s390_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; + unsigned long area; int rc; - addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags); - if (addr & ~PAGE_MASK) - return addr; - if (unlikely(mm->context.asce_limit < addr + len)) { - rc = crst_table_upgrade(mm, addr + len); + area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); + if (!(area & ~PAGE_MASK)) + return area; + if (area == -ENOMEM && + !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + /* Upgrade the page table to 4 levels and retry. */ + rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); } - return addr; + return area; } static unsigned long -s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long area; int rc; - addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); - if (addr & ~PAGE_MASK) - return addr; - if (unlikely(mm->context.asce_limit < addr + len)) { - rc = crst_table_upgrade(mm, addr + len); + area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); + if (!(area & ~PAGE_MASK)) + return area; + if (area == -ENOMEM && + !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + /* Upgrade the page table to 4 levels and retry. */ + rc = crst_table_upgrade(mm, 1UL << 53); if (rc) return (unsigned long) rc; + area = arch_get_unmapped_area_topdown(filp, addr, len, + pgoff, flags); } - return addr; + return area; } /* * This function, called very early during the creation of a new -- cgit v1.2.3