aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi3
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/mips/dec/int-handler.S34
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/include/uapi/asm/inst.h2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/process.c11
-rw-r--r--arch/mips/kernel/ptrace.c7
-rw-r--r--arch/mips/kernel/traps.c72
-rw-r--r--arch/sparc/include/asm/page_64.h1
-rw-r--r--arch/sparc/kernel/pci.c29
-rw-r--r--arch/sparc/lib/U3memcpy.S4
-rw-r--r--arch/sparc/mm/fault_64.c1
-rw-r--r--arch/sparc/mm/tlb.c35
-rw-r--r--arch/sparc/mm/tsb.c18
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/pci/fixup.c7
-rw-r--r--arch/x86/um/setjmp_32.S16
-rw-r--r--arch/x86/um/setjmp_64.S16
25 files changed, 225 insertions, 135 deletions
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 53fd75c8ffcf..47894b41e4e2 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -130,6 +130,17 @@
};
};
+&cpu0 {
+ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+ operating-points = <
+ /* kHz uV */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ 1000000 1200000
+ >;
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index e00d50ef678f..3ff5ea16ebb3 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -577,7 +577,7 @@
};
sata0: sata@29000000 {
- compatible = "generic-ahci";
+ compatible = "qcom,apq8064-ahci", "generic-ahci";
status = "disabled";
reg = <0x29000000 0x180>;
interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>;
@@ -599,6 +599,7 @@
phys = <&sata_phy0>;
phy-names = "sata-phy";
+ ports-implemented = <0x1>;
};
/* Temporary fixed regulator */
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0c81056c1dd7..2a3feb73de0b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -460,7 +460,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5cc1e418cb79..93af2159e205 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -129,26 +129,27 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
/* only preserve the access flags and write permission */
pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
- /*
- * PTE_RDONLY is cleared by default in the asm below, so set it in
- * back if necessary (read-only or clean PTE).
- */
+ /* set PTE_RDONLY if actual read-only or clean PTE */
if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*
* Setting the flags must be done atomically to avoid racing with the
- * hardware update of the access/dirty state.
+ * hardware update of the access/dirty state. The PTE_RDONLY bit must
+ * be set to the most permissive (lowest value) of *ptep and entry
+ * (calculated as: a & b == ~(~a | ~b)).
*/
+ pte_val(entry) ^= PTE_RDONLY;
asm volatile("// ptep_set_access_flags\n"
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
- " and %0, %0, %3 // clear PTE_RDONLY\n"
+ " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n"
" orr %0, %0, %4 // set flags\n"
+ " eor %0, %0, %3 // negate final PTE_RDONLY\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
- : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
+ : "L" (PTE_RDONLY), "r" (pte_val(entry)));
flush_tlb_fix_spurious_fault(vma, address);
return 1;
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 554d1da97743..21f4a9fe82fa 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -147,23 +147,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, cpu_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(cpu_mask_nr_tbl)
- lui AT, %hi(cpu_mask_nr_tbl)
- daddiu t1, t1, %higher(cpu_mask_nr_tbl)
- daddiu AT, AT, %lo(cpu_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
1: lw t2,(t1)
nop
@@ -214,23 +203,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, asic_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(asic_mask_nr_tbl)
- lui AT, %hi(asic_mask_nr_tbl)
- daddiu t1, t1, %higher(asic_mask_nr_tbl)
- daddiu AT, AT, %lo(asic_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
2: lw t2,(t1)
nop
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 2f021cdfba4f..742223716fc8 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -66,6 +66,8 @@ extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -92,4 +94,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 9b44d5a816fa..1b6f2f219298 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -846,7 +846,7 @@ struct mm16_r3_format { /* Load from global pointer format */
struct mm16_r5_format { /* Load/store from stack pointer format */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rt : 5,
- __BITFIELD_FIELD(signed int simmediate : 5,
+ __BITFIELD_FIELD(unsigned int imm : 5,
__BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;))))
};
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index cbe0f025856d..7b887027dca2 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -900,7 +900,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1183,13 +1183,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ed6cac4a4df0..ebd8a715fe38 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -207,7 +207,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
if (ip->mm16_r5_format.rt != 31)
return 0;
- *poff = ip->mm16_r5_format.simmediate;
+ *poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
@@ -341,6 +341,7 @@ static int get_frame_info(struct mips_frame_info *info)
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
const unsigned int max_insns = 128;
+ unsigned int last_insn_size = 0;
unsigned int i;
info->pc_offset = -1;
@@ -352,15 +353,19 @@ static int get_frame_info(struct mips_frame_info *info)
ip_end = (void *)ip + info->func_size;
- for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
+ for (i = 0; i < max_insns && ip < ip_end; i++) {
+ ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.halfword[0] = 0;
insn.halfword[1] = ip->halfword[0];
+ last_insn_size = 2;
} else if (is_mmips) {
insn.halfword[0] = ip->halfword[1];
insn.halfword[1] = ip->halfword[0];
+ last_insn_size = 4;
} else {
insn.word = ip->word;
+ last_insn_size = 4;
}
if (is_jump_ins(&insn))
@@ -382,8 +387,6 @@ static int get_frame_info(struct mips_frame_info *info)
tmp = (ip->halfword[0] >> 1);
info->frame_size = -(signed short)(tmp & 0xf);
}
- ip = (void *) &ip->halfword[1];
- ip--;
} else
#endif
info->frame_size = - ip->i_format.simmediate;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 5a869515b393..9d04392f7ef0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1b901218e3ae..6abd6b41c13d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -706,6 +706,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -715,27 +741,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -798,13 +804,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -830,7 +836,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -852,13 +858,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1431,13 +1437,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 8c2a8c937540..c1263fc390db 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -25,6 +25,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 9f9614df9e1e..c2b202d763a1 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
}
}
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+ void *stc, void *host_controller,
+ struct platform_device *op,
+ int numa_node)
+{
+ sd->iommu = iommu;
+ sd->stc = stc;
+ sd->host_controller = host_controller;
+ sd->op = op;
+ sd->numa_node = numa_node;
+}
+
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (!dev)
return NULL;
+ op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
- sd->iommu = pbm->iommu;
- sd->stc = &pbm->stc;
- sd->host_controller = pbm;
- sd->op = op = of_find_device_by_node(node);
- sd->numa_node = pbm->numa_node;
-
+ pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+ pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
@@ -1003,9 +1012,13 @@ int pcibios_add_device(struct pci_dev *dev)
* Copy dev_archdata from PF to VF
*/
if (dev->is_virtfn) {
+ struct dev_archdata *psd;
+
pdev = dev->physfn;
- memcpy(&dev->dev.archdata, &pdev->dev.archdata,
- sizeof(struct dev_archdata));
+ psd = &pdev->dev.archdata;
+ pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+ psd->stc, psd->host_controller, NULL,
+ psd->numa_node);
}
return 0;
}
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 54f98706b03b..5a8cb37f0a3b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index e15f33715103..b01ec72522cb 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -487,6 +487,7 @@ good_area:
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+ mm_rss *= REAL_HPAGE_PER_HPAGE;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3659d37b4d81..c56a195c9071 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return;
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
- if (pmd_val(pmd) & _PAGE_PMD_HUGE)
- mm->context.thp_pte_count++;
- else
- mm->context.thp_pte_count--;
+ /*
+ * Note that this routine only sets pmds for THP pages.
+ * Hugetlb pages are handled elsewhere. We need to check
+ * for huge zero page. Huge zero pages are like hugetlb
+ * pages in that there is no RSS, but there is the need
+ * for TSB entries. So, huge zero page counts go into
+ * hugetlb_pte_count.
+ */
+ if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
+ if (is_huge_zero_page(pmd_page(pmd)))
+ mm->context.hugetlb_pte_count++;
+ else
+ mm->context.thp_pte_count++;
+ } else {
+ if (is_huge_zero_page(pmd_page(orig)))
+ mm->context.hugetlb_pte_count--;
+ else
+ mm->context.thp_pte_count--;
+ }
/* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
}
+/*
+ * This routine is only called when splitting a THP
+ */
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ /*
+ * set_pmd_at() will not be called in a way to decrement
+ * thp_pte_count when splitting a THP, so do it now.
+ * Sanity check pmd before doing the actual decrement.
+ */
+ if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
+ !is_huge_zero_page(pmd_page(entry)))
+ (vma->vm_mm)->context.thp_pte_count--;
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 266411291634..84cd593117a6 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -489,8 +489,10 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ unsigned long mm_rss = get_mm_rss(mm);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- unsigned long total_huge_pte_count;
+ unsigned long saved_hugetlb_pte_count;
+ unsigned long saved_thp_pte_count;
#endif
unsigned int i;
@@ -503,10 +505,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
- total_huge_pte_count = mm->context.hugetlb_pte_count +
- mm->context.thp_pte_count;
+ saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
+ saved_thp_pte_count = mm->context.thp_pte_count;
mm->context.hugetlb_pte_count = 0;
mm->context.thp_pte_count = 0;
+
+ mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
#endif
/* copy_mm() copies over the parent's mm_struct before calling
@@ -519,11 +523,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
- tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
+ tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (unlikely(total_huge_pte_count))
- tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
+ if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
+ tsb_grow(mm, MM_TSB_HUGE,
+ (saved_hugetlb_pte_count + saved_thp_pte_count) *
+ REAL_HPAGE_PER_HPAGE);
#endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 9ccf462131c4..d9cd7ed27834 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -59,10 +59,14 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/um
# Same things for in6addr_loopback and mktime - found in libc. For these two we
# only get link-time error, luckily.
#
+# -Dlongjmp=kernel_longjmp prevents anything from referencing the libpthread.a
+# embedded copy of longjmp, same thing for setjmp.
+#
# These apply to USER_CFLAGS to.
KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \
$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
+ -Dlongjmp=kernel_longjmp -Dsetjmp=kernel_setjmp \
-Din6addr_loopback=kernel_in6addr_loopback \
-Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e0ba66ca68c6..f5e780bfa2b3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -184,22 +184,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_arg(1)",%0" \
+ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_arg(1)",%0" \
+ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_arg(1)",%0" \
+ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_arg(1)",%0" \
+ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index d39c09119db6..590c8fd2ed9b 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -23,7 +23,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
+__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index cbd047e825d3..97dd53fc168c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -963,11 +963,11 @@ static void populate_pte(struct cpa_data *cpa,
}
}
-static int populate_pmd(struct cpa_data *cpa,
- unsigned long start, unsigned long end,
- unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
- unsigned int cur_pages = 0;
+ long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@@ -1037,12 +1037,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages;
}
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
- pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ pgprot_t pgprot)
{
pud_t *pud;
unsigned long end;
- int cur_pages = 0;
+ long cur_pages = 0;
pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1098,7 +1098,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */
if (start < end) {
- int tmp;
+ long tmp;
pud = pud_offset(pgd, start);
if (pud_none(*pud))
@@ -1124,7 +1124,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry;
- int ret;
+ long ret;
pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1359,7 +1359,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
- int ret, numpages = cpa->numpages;
+ unsigned long numpages = cpa->numpages;
+ int ret;
while (numpages) {
/*
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 0ae7e9fa348d..89f90549c6a8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -541,9 +541,16 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+/*
+ * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ *
+ * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
+ * entry BDF2.
+ */
static void pci_bdwep_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S
index b766792c9933..39053192918d 100644
--- a/arch/x86/um/setjmp_32.S
+++ b/arch/x86/um/setjmp_32.S
@@ -16,9 +16,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
#ifdef _REGPARM
movl %eax,%edx
#else
@@ -35,13 +35,13 @@ setjmp:
movl %ecx,20(%edx) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
#ifdef _REGPARM
xchgl %eax,%edx
#else
@@ -55,4 +55,4 @@ longjmp:
movl 16(%edx),%edi
jmp *20(%edx)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp
diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S
index 45f547b4043e..c56942e1a38c 100644
--- a/arch/x86/um/setjmp_64.S
+++ b/arch/x86/um/setjmp_64.S
@@ -18,9 +18,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
pop %rsi # Return address, and adjust the stack
xorl %eax,%eax # Return value
movq %rbx,(%rdi)
@@ -34,13 +34,13 @@ setjmp:
movq %rsi,56(%rdi) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
movl %esi,%eax # Return value (int)
movq (%rdi),%rbx
movq 8(%rdi),%rsp
@@ -51,4 +51,4 @@ longjmp:
movq 48(%rdi),%r15
jmp *56(%rdi)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp