From 746cddd37d48a166f170165a0df4bd50fde1ea60 Mon Sep 17 00:00:00 2001 From: Weidong Han Date: Fri, 10 Apr 2009 17:17:17 +0800 Subject: x86, intr-remap: fix eoi for interrupt remapping without x2apic To simplify level irq migration in the presence of interrupt-remapping, Suresh used a virtual vector (io-apic pin number) to eliminate io-apic RTE modification. Level triggered interrupt will appear as an edge to the local apic cpu but still as level to the IO-APIC. So in addition to do the local apic EOI, it still needs to do IO-APIC directed EOI to clear the remote IRR bit in the IO-APIC RTE. Pls refer to Suresh's patch for more details (commit 0280f7c416c652a2fd95d166f52b199ae61122c0). Now interrupt remapping is decoupled from x2apic, it also needs to do the directed EOI for apic. Otherwise, apic interrupts won't work correctly. Signed-off-by: Weidong Han Cc: iommu@lists.linux-foundation.org Cc: Weidong Han Cc: suresh.b.siddha@intel.com Cc: dwmw2@infradead.org Cc: allen.m.kay@intel.com LKML-Reference: <1239355037-22856-1-git-send-email-weidong.han@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/io_apic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 767fe7e46d6..a2789e42e16 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp) static inline void irq_complete_move(struct irq_desc **descp) {} #endif -#ifdef CONFIG_X86_X2APIC static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) { int apic, pin; @@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc) spin_unlock_irqrestore(&ioapic_lock, flags); } +#ifdef CONFIG_X86_X2APIC static void ack_x2apic_level(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq) */ ack_APIC_irq(); + if (irq_remapped(irq)) + eoi_ioapic_irq(desc); + /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. -- cgit v1.2.3 From a0d22f485af1553060b4094ee0154537a8f6a8a6 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Thu, 9 Apr 2009 16:45:29 -0700 Subject: x86: Document get_user_pages_fast() While better than get_user_pages(), the usage of gupf(), especially the return values and the fact that it can potentially only partially pin the range, warranted some documentation. Signed-off-by: Andy Grover Cc: npiggin@suse.de Cc: akpm@linux-foundation.org LKML-Reference: <1239320729-3262-1-git-send-email-andy.grover@oracle.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/gup.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index be54176e9eb..6340cef6798 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +/** + * get_user_pages_fast() - pin user pages in memory + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. + * + * Attempt to pin user pages in memory without taking mm->mmap_sem. + * If not successful, it will fall back to taking the lock and + * calling get_user_pages(). + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. + */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { -- cgit v1.2.3 From 9b987aeb4a7bc42a3eb8361030b820b0263c31f1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Apr 2009 10:55:33 -0700 Subject: x86: fix set_fixmap to use phys_addr_t Impact: fix kprobes crash on 32-bit with RAM above 4G Use phys_addr_t for receiving a physical address argument instead of unsigned long. This allows fixmap to handle pages higher than 4GB on x86-32. Signed-off-by: Masami Hiramatsu Acked-by: Mathieu Desnoyers Cc: Andrew Morton Cc: Ananth N Mavinakayanahalli Cc: systemtap-ml Cc: Gary Hade Cc: Linus Torvalds LKML-Reference: <49DE3695.6040800@redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fixmap.h | 4 ++-- arch/x86/include/asm/io.h | 6 ++++-- arch/x86/include/asm/paravirt.h | 4 ++-- arch/x86/mm/ioremap.c | 23 +++++++++++++---------- arch/x86/mm/pgtable.c | 3 ++- arch/x86/xen/mmu.c | 2 +- 6 files changed, 24 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 81937a5dc77..2d81af3974a 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); void native_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); + phys_addr_t phys, pgprot_t flags); #ifndef CONFIG_PARAVIRT static inline void __set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { native_set_fixmap(idx, phys, flags); } diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e5383e3d2f8..73739322b6d 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); */ extern void early_ioremap_init(void); extern void early_ioremap_reset(void); -extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); -extern void __iomem *early_memremap(unsigned long offset, unsigned long size); +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void __iomem *early_memremap(resource_size_t phys_addr, + unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); #define IO_SPACE_LIMIT 0xffff diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7727aa8b7dd..378e3691c08 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -347,7 +347,7 @@ struct pv_mmu_ops { /* Sometimes the physical address is a pfn, and sometimes its an mfn. We can tell which is which from the index. */ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, - unsigned long phys, pgprot_t flags); + phys_addr_t phys, pgprot_t flags); }; struct raw_spinlock; @@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void) void arch_flush_lazy_mmu_mode(void); static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { pv_mmu_ops.set_fixmap(idx, phys, flags); } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0dfa09d69e8..09daebfdb11 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -547,7 +547,7 @@ void __init early_ioremap_reset(void) } static void __init __early_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { unsigned long addr = __fix_to_virt(idx); pte_t *pte; @@ -566,7 +566,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, } static inline void __init early_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t prot) + phys_addr_t phys, pgprot_t prot) { if (after_paging_init) __set_fixmap(idx, phys, prot); @@ -607,9 +607,10 @@ static int __init check_early_ioremap_leak(void) late_initcall(check_early_ioremap_leak); static void __init __iomem * -__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) +__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { - unsigned long offset, last_addr; + unsigned long offset; + resource_size_t last_addr; unsigned int nrpages; enum fixed_addresses idx0, idx; int i, slot; @@ -625,15 +626,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) } if (slot < 0) { - printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", - phys_addr, size); + printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", + (u64)phys_addr, size); WARN_ON(1); return NULL; } if (early_ioremap_debug) { - printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", - phys_addr, size, slot); + printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", + (u64)phys_addr, size, slot); dump_stack(); } @@ -680,13 +681,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) } /* Remap an IO device */ -void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) +void __init __iomem * +early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); } /* Remap memory */ -void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) +void __init __iomem * +early_memremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL); } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5b7c7c8464f..7aa03a5389f 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) fixmaps_set++; } -void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) +void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, + pgprot_t flags) { __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index db3802fb7b8..2a81838a9ab 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1750,7 +1750,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, } #endif /* CONFIG_X86_64 */ -static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) +static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) { pte_t pte; -- cgit v1.2.3 From 575922248c0df490843ddfbcf3bc65b54c4adb08 Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sat, 11 Apr 2009 09:04:59 +0600 Subject: x86: Fix section mismatches in mpparse Impact: fix section mismatch In arch/x86/kernel/mpparse.c, smp_reserve_bootmem() has been called and also refers to a function which is in .init section. Thus causes the first warning. And check_irq_src() also requires an __init, because it refers to an .init section. Signed-off-by: Rakib Mullick Cc: Andrew Morton LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index dce99dca6cf..70fd7e414c1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -679,7 +679,7 @@ void __init get_smp_config(void) __get_smp_config(0); } -static void smp_reserve_bootmem(struct mpf_intel *mpf) +static void __init smp_reserve_bootmem(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); #ifdef CONFIG_X86_32 @@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; -static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) +static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) { int i; @@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) } } #else /* CONFIG_X86_IO_APIC */ -static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} +static +inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, -- cgit v1.2.3 From 1ee4bd92a7aa49eb66c8d5672e837090d3e7b7ff Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 10 Apr 2009 22:47:17 +0200 Subject: x86: fix wrong section of pat_disable & make it static pat_disable cannot be __cpuinit anymore because it's called from pat_init and the callchain looks like this: pat_disable [cpuinit] <- pat_init <- generic_set_all <- ipi_handler <- set_mtrr <- (other non init/cpuinit functions) WARNING: arch/x86/mm/built-in.o(.text+0x449e): Section mismatch in reference from the function pat_init() to the function .cpuinit.text:pat_disable() The function pat_init() references the function __cpuinit pat_disable(). This is often because pat_init lacks a __cpuinit annotation or the annotation of pat_disable is wrong. Non CONFIG_X86_PAT version of pat_disable is static inline, so this version can be static too (and there are no callers outside of this file). Signed-off-by: Marcin Slusarz Acked-by: Sam Ravnborg LKML-Reference: <49DFB055.6070405@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 640339ee4fb..c009a241d56 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -31,7 +31,7 @@ #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; -void __cpuinit pat_disable(const char *reason) +static inline void pat_disable(const char *reason) { pat_enabled = 0; printk(KERN_INFO "%s\n", reason); -- cgit v1.2.3 From a30469e7921a6dd2067e9e836d7787cfa0105627 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 10 Apr 2009 15:21:24 -0700 Subject: x86: add linux kernel support for YMM state Impact: save/restore Intel-AVX state properly between tasks Intel Advanced Vector Extensions (AVX) introduce 256-bit vector processing capability. More about AVX at http://software.intel.com/sites/avx Add OS support for YMM state management using xsave/xrstor infrastructure to support AVX. Signed-off-by: Suresh Siddha LKML-Reference: <1239402084.27006.8057.camel@localhost.localdomain> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 6 ++++++ arch/x86/include/asm/sigcontext.h | 6 ++++++ arch/x86/include/asm/xsave.h | 3 ++- arch/x86/kernel/xsave.c | 2 +- 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 34c52370f2f..fcf4d92e7e0 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -352,6 +352,11 @@ struct i387_soft_struct { u32 entry_eip; }; +struct ymmh_struct { + /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ + u32 ymmh_space[64]; +}; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -361,6 +366,7 @@ struct xsave_hdr_struct { struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; + struct ymmh_struct ymmh; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index ec666491aaa..72e5a449166 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -269,6 +269,11 @@ struct _xsave_hdr { __u64 reserved2[5]; }; +struct _ymmh_state { + /* 16 * 16 bytes for each YMMH-reg */ + __u32 ymmh_space[64]; +}; + /* * Extended state pointed by the fpstate pointer in the sigcontext. * In addition to the fpstate, information encoded in the xstate_hdr @@ -278,6 +283,7 @@ struct _xsave_hdr { struct _xstate { struct _fpstate fpstate; struct _xsave_hdr xstate_hdr; + struct _ymmh_state ymmh; /* new processor state extensions go here */ }; diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 08e9a1ac07a..727acc15234 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -7,6 +7,7 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 +#define XSTATE_YMM 0x4 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -15,7 +16,7 @@ /* * These are the features that the OS can handle currently. */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 2b54fe002e9..0a5b04aa98f 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void) } /* - * for now OS knows only about FP/SSE + * Support only the state known to OS. */ pcntxt_mask = pcntxt_mask & XCNTXT_MASK; xsave_init(); -- cgit v1.2.3