aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-10-20 11:47:18 +1000
committerPaul Mackerras <paulus@samba.org>2006-10-25 11:54:07 +1000
commit0909c8c2d547e45ca50e2492b08ec93a37b35237 (patch)
tree23e66e1dc9a5bd674ba1375b5fccd2cb0d5787a8 /arch
parent7aeb732428fc8e2ecae6d432873770c12f04a979 (diff)
[POWERPC] Support feature fixups in vdso's
This patch reworks the feature fixup mecanism so vdso's can be fixed up. The main issue was that the construct: .long label (or .llong on 64 bits) will not work in the case of a shared library like the vdso. It will generate an empty placeholder in the fixup table along with a reloc, which is not something we can deal with in the vdso. The idea here (thanks Alan Modra !) is to instead use something like: 1: .long label - 1b That is, the feature fixup tables no longer contain addresses of bits of code to patch, but offsets of such code from the fixup table entry itself. That is properly resolved by ld when building the .so's. I've modified the fixup mecanism generically to use that method for the rest of the kernel as well. Another trick is that the 32 bits vDSO included in the 64 bits kernel need to have a table in the 64 bits format. However, gas does not support 32 bits code with a statement of the form: .llong label - 1b (Or even just .llong label) That is, it cannot emit the right fixup/relocation for the linker to use to assign a 32 bits address to an .llong field. Thus, in the specific case of the 32 bits vdso built as part of the 64 bits kernel, we are using a modified macro that generates: .long 0xffffffff .llong label - 1b Note that is assumes that the value is negative which is enforced by the .lds (those offsets are always negative as the .text is always before the fixup table and gas doesn't support emiting the reloc the other way around). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/cputable.c11
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/vdso.c43
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S12
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S10
-rw-r--r--arch/ppc/kernel/setup.c2
7 files changed, 74 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6fdfaa4a82b..bfd499ee375 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1202,14 +1202,13 @@ struct cpu_spec *identify_cpu(unsigned long offset)
return NULL;
}
-void do_feature_fixups(unsigned long offset, unsigned long value,
- void *fixup_start, void *fixup_end)
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
struct fixup_entry {
unsigned long mask;
unsigned long value;
- unsigned int *start;
- unsigned int *end;
+ long start_off;
+ long end_off;
} *fcur, *fend;
fcur = fixup_start;
@@ -1224,8 +1223,8 @@ void do_feature_fixups(unsigned long offset, unsigned long value,
/* These PTRRELOCs will disappear once the new scheme for
* modules and vdso is implemented
*/
- pstart = PTRRELOC(fcur->start);
- pend = PTRRELOC(fcur->end);
+ pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+ pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
for (p = pstart; p < pend; p++) {
*p = 0x60000000u;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 769e511783b..a4c2964a3ca 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -103,7 +103,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
*/
spec = identify_cpu(offset);
- do_feature_fixups(offset, spec->cpu_features,
+ do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1969b5686ee..16278968dab 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -354,9 +354,9 @@ void __init setup_system(void)
/* Apply the CPUs-specific and firmware specific fixups to kernel
* text (nop out sections not relevant to this CPU or this firmware)
*/
- do_feature_fixups(0, cur_cpu_spec->cpu_features,
+ do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
- do_feature_fixups(0, powerpc_firmware_features,
+ do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
/*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1a7e19cdab3..c913ad5cad2 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -36,6 +36,8 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
+#include "setup.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return 0;
}
+
+static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
+ struct lib64_elfinfo *v64)
+{
+ void *start32;
+ unsigned long size32;
+
+#ifdef CONFIG_PPC64
+ void *start64;
+ unsigned long size64;
+
+ start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start64, start64 + size64);
+
+ start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(powerpc_firmware_features,
+ start64, start64 + size64);
+#endif /* CONFIG_PPC64 */
+
+ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start32, start32 + size32);
+
+#ifdef CONFIG_PPC64
+ start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(powerpc_firmware_features,
+ start32, start32 + size32);
+#endif /* CONFIG_PPC64 */
+
+ return 0;
+}
+
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
if (vdso_fixup_datapage(&v32, &v64))
return -1;
+ if (vdso_fixup_features(&v32, &v64))
+ return -1;
+
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
@@ -714,6 +756,7 @@ void __init vdso_init(void)
* Setup the syscall map in the vDOS
*/
vdso_setup_syscall_map();
+
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 6187af2d54c..26e138c4ce1 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -32,6 +32,18 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+#ifdef CONFIG_PPC64
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+#endif
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960..2d70f35d50b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -31,6 +31,16 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 41a640f16bd..27faeca2c7a 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -314,7 +314,7 @@ early_init(int r3, int r4, int r5)
* that depend on which cpu we have.
*/
spec = identify_cpu(offset);
- do_feature_fixups(offset, spec->cpu_features,
+ do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));