aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorYazen Ghannam <yazen.ghannam@amd.com>2018-02-21 11:19:00 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-06-05 11:41:55 +0200
commit5df3a1b9f87b62c66fb2fa39c7b22eb32303df36 (patch)
treefb722af406935d5f7adb9a059497411f7bdb202d /arch
parent6bcf3b066c69d00068d1afdcad477d4648ffa51d (diff)
x86/mce/AMD: Carve out SMCA get_block_address() code
commit 8a331f4a0863bea758561c921b94b4d28f7c4029 upstream. Carve out the SMCA code in get_block_address() into a separate helper function. No functional change. Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com> [ Save an indentation level. ] Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Link: http://lkml.kernel.org/r/20180215210943.11530-4-Yazen.Ghannam@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c57
1 files changed, 31 insertions, 26 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 259c75d7a2a0..b44e3e263abd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -429,6 +429,35 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
wrmsr(MSR_CU_DEF_ERR, low, high);
}
+static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
+ unsigned int block)
+{
+ u32 low, high;
+ u32 addr = 0;
+
+ if (smca_get_bank_type(bank) == SMCA_RESERVED)
+ return addr;
+
+ if (!block)
+ return MSR_AMD64_SMCA_MCx_MISC(bank);
+
+ /*
+ * For SMCA enabled processors, BLKPTR field of the first MISC register
+ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
+ */
+ if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
+ return addr;
+
+ if (!(low & MCI_CONFIG_MCAX))
+ return addr;
+
+ if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
+ (low & MASK_BLKPTR_LO))
+ return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+
+ return addr;
+}
+
static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
unsigned int bank, unsigned int block)
{
@@ -449,32 +478,8 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
}
}
- if (mce_flags.smca) {
- if (smca_get_bank_type(bank) == SMCA_RESERVED)
- return addr;
-
- if (!block) {
- addr = MSR_AMD64_SMCA_MCx_MISC(bank);
- } else {
- /*
- * For SMCA enabled processors, BLKPTR field of the
- * first MISC register (MCx_MISC0) indicates presence of
- * additional MISC register set (MISC1-4).
- */
- u32 low, high;
-
- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
- return addr;
-
- if (!(low & MCI_CONFIG_MCAX))
- return addr;
-
- if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
- (low & MASK_BLKPTR_LO))
- addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
- }
- return addr;
- }
+ if (mce_flags.smca)
+ return smca_get_block_address(cpu, bank, block);
/* Fall back to method we used for older processors: */
switch (block) {