aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-05-02 19:27:10 +0200
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 19:27:10 +0200
commit6fb14755a676282a4e6caa05a08c92db8e45cfff (patch)
tree71a862edf87cafe61986c0aff90db72045cf14c3
parentd01ad8dd56527be72947b4b9997bb2c05783c3ed (diff)
[PATCH] x86: tighten kernel image page access rights
On x86-64, kernel memory freed after init can be entirely unmapped instead of just getting 'poisoned' by overwriting with a debug pattern. On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table can also be write-protected. Compared to the first version, this one prevents re-creating deleted mappings in the kernel image range on x86-64, if those got removed previously. This, together with the original changes, prevents temporarily having inconsistent mappings when cacheability attributes are being changed on such pages (e.g. from AGP code). While on i386 such duplicate mappings don't exist, the same change is done there, too, both for consistency and because checking pte_present() before using various other pte_XXX functions is a requirement anyway. At once, i386 code gets adjusted to use pte_huge() instead of open coding this. AK: split out cpa() changes Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/vmlinux.lds.S4
-rw-r--r--arch/i386/mm/init.c25
-rw-r--r--arch/x86_64/kernel/head.S1
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86_64/mm/init.c25
-rw-r--r--include/linux/poison.h3
6 files changed, 40 insertions, 23 deletions
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 6f38f818380..f4ec7223183 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -61,8 +61,6 @@ SECTIONS
__stop___ex_table = .;
}
- RODATA
-
BUG_TABLE
. = ALIGN(4);
@@ -72,6 +70,8 @@ SECTIONS
__tracedata_end = .;
}
+ RODATA
+
/* writeable */
. = ALIGN(4096);
.data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 23be1b0aafa..bd5ef371850 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
@@ -751,13 +752,25 @@ static int noinline do_test_wp_bit(void)
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)__start_rodata;
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
- for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
- change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
+#ifdef CONFIG_HOTPLUG_CPU
+ /* It must still be possible to apply SMP alternatives. */
+ if (num_possible_cpus() <= 1)
+#endif
+ {
+ change_page_attr(virt_to_page(start),
+ size >> PAGE_SHIFT, PAGE_KERNEL_RX);
+ printk("Write protecting the kernel text: %luk\n", size >> 10);
+ }
- printk("Write protecting the kernel read-only data: %uk\n",
- (__end_rodata - __start_rodata) >> 10);
+ start += size;
+ size = (unsigned long)__end_rodata - start;
+ change_page_attr(virt_to_page(start),
+ size >> PAGE_SHIFT, PAGE_KERNEL_RO);
+ printk("Write protecting the kernel read-only data: %luk\n",
+ size >> 10);
/*
* change_page_attr() requires a global_flush_tlb() call after it.
@@ -781,7 +794,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
__free_page(page);
totalram_pages++;
}
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
}
void free_initmem(void)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 36aa98a6d15..fd9fdfdd143 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -280,7 +280,6 @@ early_idt_ripmsg:
.balign PAGE_SIZE
ENTRY(stext)
-ENTRY(_stext)
#define NEXT_PAGE(name) \
.balign PAGE_SIZE; \
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 5176ecf006e..3bdeb88d28f 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -29,6 +29,7 @@ SECTIONS
.text : AT(ADDR(.text) - LOAD_OFFSET) {
/* First the code that has to be first for bootstrapping */
*(.bootstrap.text)
+ _stext = .;
/* Then all the functions that are "hot" in profiles, to group them
onto the same hugetlb entry */
#include "functionlist"
@@ -50,10 +51,10 @@ SECTIONS
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
__stop___ex_table = .;
- RODATA
-
BUG_TABLE
+ RODATA
+
. = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 69e22d3c923..e3134bc9a4f 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -563,21 +564,23 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
if (begin >= end)
return;
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
for (addr = begin; addr < end; addr += PAGE_SIZE) {
struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
ClearPageReserved(page);
init_page_count(page);
memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
+ if (addr >= __START_KERNEL_map)
+ change_page_attr_addr(addr, 1, __pgprot(0));
__free_page(page);
totalram_pages++;
}
+ if (addr > __START_KERNEL_map)
+ global_flush_tlb();
}
void free_initmem(void)
{
- memset(__initdata_begin, POISON_FREE_INITDATA,
- __initdata_end - __initdata_begin);
free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin),
__pa_symbol(&__init_end));
@@ -587,14 +590,18 @@ void free_initmem(void)
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata));
- unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata));
+ unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size;
- for (; addr < end; addr += PAGE_SIZE)
- change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
+#ifdef CONFIG_HOTPLUG_CPU
+ /* It must still be possible to apply SMP alternatives. */
+ if (num_possible_cpus() > 1)
+ start = PFN_ALIGN(__va(__pa_symbol(&_etext)));
+#endif
+ size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start;
+ change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO);
- printk ("Write protecting the kernel read-only data: %luk\n",
- (__end_rodata - __start_rodata) >> 10);
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ size >> 10);
/*
* change_page_attr_addr() requires a global_flush_tlb() call after it.
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 3e628f990fd..89580b76495 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -26,9 +26,6 @@
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
-/********** arch/x86_64/mm/init.c **********/
-#define POISON_FREE_INITDATA 0xba
-
/********** arch/ia64/hp/common/sba_iommu.c **********/
/*
* arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a