rpms/kernel/devel linux-2.6-execshield-fixes.patch, NONE, 1.1 kernel-2.6.spec, 1.1834, 1.1835 linux-2.6-execshield-vdso.patch, 1.1, 1.2 linux-2.6-execshield.patch, 1.6, 1.7
fedora-cvs-commits at redhat.com
fedora-cvs-commits at redhat.com
Mon Jan 9 19:24:10 UTC 2006
Author: davej
Update of /cvs/dist/rpms/kernel/devel
In directory cvs.devel.redhat.com:/tmp/cvs-serv28266
Modified Files:
kernel-2.6.spec linux-2.6-execshield-vdso.patch
linux-2.6-execshield.patch
Added Files:
linux-2.6-execshield-fixes.patch
Log Message:
split up the execshield patches again
linux-2.6-execshield-fixes.patch:
linux-2.6.15/fs/proc/base.c | 2 +-
linux-exec-shield-curr.q/arch/i386/kernel/traps.c | 2 +-
linux-exec-shield-curr.q/include/asm-i386/desc.h | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
--- NEW FILE linux-2.6-execshield-fixes.patch ---
arch/i386/kernel/traps.c | 2 +-
include/asm-i386/desc.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
Index: linux-exec-shield-curr.q/arch/i386/kernel/traps.c
===================================================================
--- linux-exec-shield-curr.q.orig/arch/i386/kernel/traps.c
+++ linux-exec-shield-curr.q/arch/i386/kernel/traps.c
@@ -494,7 +494,7 @@ check_lazy_exec_limit(int cpu, struct pt
set_user_cs(¤t->mm->context.user_cs, limit);
desc1 = ¤t->mm->context.user_cs;
- desc2 = per_cpu(cpu_gdt_table, cpu) + GDT_ENTRY_DEFAULT_USER_CS;
+ desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
if (desc1->a != desc2->a || desc1->b != desc2->b) {
/*
Index: linux-exec-shield-curr.q/include/asm-i386/desc.h
===================================================================
--- linux-exec-shield-curr.q.orig/include/asm-i386/desc.h
+++ linux-exec-shield-curr.q/include/asm-i386/desc.h
@@ -168,7 +168,7 @@ static inline void set_user_cs(struct de
}
#define load_user_cs_desc(cpu, mm) \
- per_cpu(cpu_gdt_table, (cpu))[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
--- linux-2.6.15/fs/proc/base.c~ 2006-01-09 14:23:23.000000000 -0500
+++ linux-2.6.15/fs/proc/base.c 2006-01-09 14:23:34.000000000 -0500
@@ -201,7 +201,7 @@ static struct pid_entry tgid_base_stuff[
E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
#ifdef CONFIG_MMU
- E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO),
+ E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUSR),
#endif
#ifdef CONFIG_SECURITY
E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.1834
retrieving revision 1.1835
diff -u -r1.1834 -r1.1835
--- kernel-2.6.spec 9 Jan 2006 18:55:14 -0000 1.1834
+++ kernel-2.6.spec 9 Jan 2006 19:24:08 -0000 1.1835
@@ -705,7 +705,7 @@
%endif
# Fix up the vdso.
-#%patch812 -p1
+%patch812 -p1
# Xen vDSO hack
%if %{includexen}
linux-2.6-execshield-vdso.patch:
arch/i386/kernel/sysenter.c | 69 ++++++++++++++++++++++----------------
fs/binfmt_elf.c | 15 ++------
fs/proc/task_mmu.c | 9 +++--
include/asm-i386/elf.h | 7 +++
include/asm-i386/page.h | 5 ++
include/linux/mm.h | 5 ++
kernel/sysctl.c | 10 +++++
mm/mmap.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
8 files changed, 155 insertions(+), 43 deletions(-)
Index: linux-2.6-execshield-vdso.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/linux-2.6-execshield-vdso.patch,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- linux-2.6-execshield-vdso.patch 13 Sep 2005 06:56:07 -0000 1.1
+++ linux-2.6-execshield-vdso.patch 9 Jan 2006 19:24:08 -0000 1.2
@@ -1,57 +1,125 @@
---- linux-2.6.13/include/asm-i386/page.h.vdso
-+++ linux-2.6.13/include/asm-i386/page.h
-@@ -120,6 +120,11 @@ extern int devmem_is_allowed(unsigned lo
- #endif
- #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
+ arch/i386/kernel/sysenter.c | 69 ++++++++++++++++++++++----------------
+ fs/binfmt_elf.c | 15 ++------
+ fs/proc/task_mmu.c | 9 +++--
+ include/asm-i386/elf.h | 7 +++
+ include/asm-i386/page.h | 5 ++
+ include/linux/mm.h | 5 ++
+ kernel/sysctl.c | 10 +++++
+ mm/mmap.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
+ 8 files changed, 155 insertions(+), 43 deletions(-)
+
+Index: linux/arch/i386/kernel/sysenter.c
+===================================================================
+--- linux.orig/arch/i386/kernel/sysenter.c
++++ linux/arch/i386/kernel/sysenter.c
+@@ -47,20 +47,13 @@ void enable_sep_cpu(void)
+ extern const char vsyscall_int80_start, vsyscall_int80_end;
+ extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+
+-struct page *sysenter_page;
++static struct page *sysenter_pages[2];
+
+ int __init sysenter_setup(void)
+ {
+ void *page = (void *)get_zeroed_page(GFP_ATOMIC);
+- /*
+- * We keep this page mapped readonly, even though the executable
+- * portion is randomized into a userspace vma - so that we dont
+- * have to fix up the data within the VDSO page every time we
+- * exec().
+- */
+- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_KERNEL_RO);
+- sysenter_page = virt_to_page(page);
++ sysenter_pages[0] = virt_to_page(page);
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ memcpy(page,
+@@ -78,42 +71,58 @@ int __init sysenter_setup(void)
+
+ extern void SYSENTER_RETURN_OFFSET;
+
+-unsigned int vdso_enabled = 0;
++unsigned int vdso_enabled = 1;
+
+-void map_vsyscall(void)
+/*
-+ * Under exec-shield we don't use the generic fixmap gate area.
-+ * The vDSO ("gate area") has a normal vma found the normal ways.
++ * This is called from binfmt_elf, we create the special vma for the
++ * vDSO and insert it into the mm struct tree.
+ */
-+#define __HAVE_ARCH_GATE_AREA 1
-
- #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
- #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
---- linux-2.6.13/include/asm-i386/elf.h.vdso
-+++ linux-2.6.13/include/asm-i386/elf.h
-@@ -146,6 +146,12 @@ do { \
- } \
- } while (0)
++int arch_setup_additional_pages(struct linux_binprm *bprm,
++ int executable_stack)
+ {
+ struct thread_info *ti = current_thread_info();
+- struct vm_area_struct *vma;
+- unsigned long addr;
++ unsigned long addr, len;
++ int err;
-+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-+struct linux_binprm;
-+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
-+ int executable_stack);
-+
-+#if 0 /* Disabled for exec-shield, where a normal vma holds the vDSO. */
- /*
- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
- * extra segments containing the vsyscall DSO contents. Dumping its
-@@ -189,6 +195,7 @@ do { \
- PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
- } \
- } while (0)
-+#endif
+- if (unlikely(!vdso_enabled)) {
+ current->mm->context.vdso = NULL;
+- return;
+- }
++ if (unlikely(!vdso_enabled) || unlikely(!sysenter_pages[0]))
++ return 0;
- #endif
+ /*
+ * Map the vDSO (it will be randomized):
+ */
+ down_write(¤t->mm->mmap_sem);
+- addr = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, 0);
+- current->mm->context.vdso = (void *)addr;
+- ti->sysenter_return = (void *)addr + (long)&SYSENTER_RETURN_OFFSET;
+- if (addr != -1) {
+- vma = find_vma(current->mm, addr);
+- if (vma) {
+- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+- get_page(sysenter_page);
+- install_page(current->mm, vma, addr,
+- sysenter_page, vma->vm_page_prot);
++ len = PAGE_SIZE > ELF_EXEC_PAGESIZE ? PAGE_SIZE : ELF_EXEC_PAGESIZE;
++ addr = get_unmapped_area_prot(NULL, 0, len, 0,
++ MAP_PRIVATE, PROT_READ | PROT_EXEC);
++ if (unlikely(addr & ~PAGE_MASK)) {
++ up_write(¤t->mm->mmap_sem);
++ return addr;
+ }
++ err = install_special_mapping(current->mm, addr, len,
++ VM_DONTEXPAND | VM_READ | VM_EXEC |
++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
++ PAGE_READONLY_EXEC,
++ sysenter_pages);
++ if (likely(err == 0)) {
++ current->mm->context.vdso = (void *)addr;
++ ti->sysenter_return = &SYSENTER_RETURN_OFFSET + addr;
+ }
+ up_write(¤t->mm->mmap_sem);
++ return err;
+ }
---- linux-2.6.13/include/linux/mm.h.vdso
-+++ linux-2.6.13/include/linux/mm.h
-@@ -848,6 +848,11 @@ static inline unsigned long get_unmapped
- return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+-static int __init vdso_setup(char *str)
++int in_gate_area_no_task(unsigned long addr)
+ {
+- vdso_enabled = simple_strtoul(str, NULL, 0);
+- return 1;
++ return 0;
}
+-__setup("vdso=", vdso_setup);
-+extern int install_special_mapping(struct mm_struct *mm,
-+ unsigned long addr, unsigned long len,
-+ unsigned long vm_flags, pgprot_t pgprot,
-+ struct page **pages);
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++ return 0;
++}
+
- extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long pgoff);
---- linux-2.6.13/fs/binfmt_elf.c.vdso
-+++ linux-2.6.13/fs/binfmt_elf.c
-@@ -1001,8 +1001,6 @@ static int load_elf_binary(struct linux_
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++ return NULL;
++}
+Index: linux/fs/binfmt_elf.c
+===================================================================
+--- linux.orig/fs/binfmt_elf.c
++++ linux/fs/binfmt_elf.c
+@@ -1006,8 +1006,6 @@ static int load_elf_binary(struct linux_
elf_entry = loc->elf_ex.e_entry;
}
@@ -60,7 +128,7 @@
if (interpreter_type != INTERPRETER_AOUT)
sys_close(elf_exec_fileno);
-@@ -1012,17 +1010,11 @@ static int load_elf_binary(struct linux_
+@@ -1017,17 +1015,11 @@ static int load_elf_binary(struct linux_
retval = arch_setup_additional_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
@@ -80,7 +148,7 @@
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
-@@ -1227,6 +1219,9 @@ static int maydump(struct vm_area_struct
+@@ -1231,6 +1223,9 @@ static int maydump(struct vm_area_struct
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
@@ -90,9 +158,11 @@
/* Dump shared memory only if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED)
return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
---- linux-2.6.13/fs/proc/task_mmu.c.vdso
-+++ linux-2.6.13/fs/proc/task_mmu.c
-@@ -156,14 +156,19 @@ static int show_map_internal(struct seq_
+Index: linux/fs/proc/task_mmu.c
+===================================================================
+--- linux.orig/fs/proc/task_mmu.c
++++ linux/fs/proc/task_mmu.c
+@@ -173,14 +173,19 @@ static int show_map_internal(struct seq_
if (vma->vm_end == mm->brk) {
pad_len_spaces(m, len);
seq_puts(m, "[heap]");
@@ -114,9 +184,96 @@
} else {
pad_len_spaces(m, len);
seq_puts(m, "[vdso]");
---- linux-2.6.13/mm/mmap.c.vdso
-+++ linux-2.6.13/mm/mmap.c
-@@ -2155,3 +2155,81 @@ int may_expand_vm(struct mm_struct *mm,
+Index: linux/include/asm-i386/elf.h
+===================================================================
+--- linux.orig/include/asm-i386/elf.h
++++ linux/include/asm-i386/elf.h
+@@ -148,6 +148,12 @@ do { \
+ } \
+ } while (0)
+
++#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
++struct linux_binprm;
++extern int arch_setup_additional_pages(struct linux_binprm *bprm,
++ int executable_stack);
++
++#if 0 /* Disabled for exec-shield, where a normal vma holds the vDSO. */
+ /*
+ * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+ * extra segments containing the vsyscall DSO contents. Dumping its
+@@ -191,6 +197,7 @@ do { \
+ PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
+ } \
+ } while (0)
++#endif
+
+ #endif
+
+Index: linux/include/asm-i386/page.h
+===================================================================
+--- linux.orig/include/asm-i386/page.h
++++ linux/include/asm-i386/page.h
+@@ -118,6 +118,11 @@ extern int page_is_ram(unsigned long pag
+ #endif
+ #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
+
++/*
++ * Under exec-shield we don't use the generic fixmap gate area.
++ * The vDSO ("gate area") has a normal vma found the normal ways.
++ */
++#define __HAVE_ARCH_GATE_AREA 1
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+Index: linux/include/linux/mm.h
+===================================================================
+--- linux.orig/include/linux/mm.h
++++ linux/include/linux/mm.h
+@@ -923,6 +923,11 @@ static inline unsigned long get_unmapped
+ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+ }
+
++extern int install_special_mapping(struct mm_struct *mm,
++ unsigned long addr, unsigned long len,
++ unsigned long vm_flags, pgprot_t pgprot,
++ struct page **pages);
++
+ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+Index: linux/kernel/sysctl.c
+===================================================================
+--- linux.orig/kernel/sysctl.c
++++ linux/kernel/sysctl.c
+@@ -77,7 +77,7 @@ extern int proc_unknown_nmi_panic(ctl_ta
+ void __user *, size_t *, loff_t *);
+ #endif
+
+-extern unsigned int vdso_enabled;
++extern unsigned int vdso_enabled, vdso_populate;
+
+ int exec_shield = 1;
+
+@@ -317,6 +317,14 @@ static ctl_table kern_table[] = {
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++ {
++ .ctl_name = KERN_VDSO,
++ .procname = "vdso_populate",
++ .data = &vdso_populate,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
+ #endif
+ {
+ .ctl_name = KERN_CORE_USES_PID,
+Index: linux/mm/mmap.c
+===================================================================
+--- linux.orig/mm/mmap.c
++++ linux/mm/mmap.c
+@@ -2149,3 +2149,81 @@ int may_expand_vm(struct mm_struct *mm,
return 0;
return 1;
}
@@ -198,134 +355,3 @@
+
+ return err;
+}
---- linux-2.6.13/arch/i386/kernel/sysenter.c.vdso
-+++ linux-2.6.13/arch/i386/kernel/sysenter.c
-@@ -47,20 +47,13 @@ void enable_sep_cpu(void)
- extern const char vsyscall_int80_start, vsyscall_int80_end;
- extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-
--struct page *sysenter_page;
-+static struct page *sysenter_pages[2];
-
- int __init sysenter_setup(void)
- {
- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
-
-- /*
-- * We keep this page mapped readonly, even though the executable
-- * portion is randomized into a userspace vma - so that we dont
-- * have to fix up the data within the VDSO page every time we
-- * exec().
-- */
-- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_KERNEL_RO);
-- sysenter_page = virt_to_page(page);
-+ sysenter_pages[0] = virt_to_page(page);
-
- if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(page,
-@@ -78,37 +71,60 @@ int __init sysenter_setup(void)
-
- extern void SYSENTER_RETURN_OFFSET;
-
--unsigned int vdso_enabled = 0;
-+unsigned int vdso_enabled = 1;
-
--void map_vsyscall(void)
-+/*
-+ * This is called from binfmt_elf, we create the special vma for the
-+ * vDSO and insert it into the mm struct tree.
-+ */
-+int arch_setup_additional_pages(struct linux_binprm *bprm,
-+ int executable_stack)
- {
- struct thread_info *ti = current_thread_info();
-- struct vm_area_struct *vma;
-- unsigned long addr;
-+ unsigned long addr, len;
-+ int err;
-
-- if (unlikely(!vdso_enabled)) {
- current->mm->context.vdso = NULL;
-- return;
-- }
-+ if (unlikely(!vdso_enabled) || unlikely(!sysenter_pages[0]))
-+ return 0;
-
- /*
- * Map the vDSO (it will be randomized):
- */
- down_write(¤t->mm->mmap_sem);
-- addr = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, 0);
-- current->mm->context.vdso = (void *)addr;
-- ti->sysenter_return = (void *)addr + (long)&SYSENTER_RETURN_OFFSET;
-- if (addr != -1) {
-- vma = find_vma(current->mm, addr);
-- if (vma) {
-- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-- get_page(sysenter_page);
-- install_page(current->mm, vma, addr,
-- sysenter_page, vma->vm_page_prot);
--
-+ len = PAGE_SIZE > ELF_EXEC_PAGESIZE ? PAGE_SIZE : ELF_EXEC_PAGESIZE;
-+ addr = get_unmapped_area_prot(NULL, 0, len, 0,
-+ MAP_PRIVATE, PROT_READ | PROT_EXEC);
-+ if (unlikely(addr & ~PAGE_MASK)) {
-+ up_write(¤t->mm->mmap_sem);
-+ return addr;
- }
-+ err = install_special_mapping(current->mm, addr, len,
-+ VM_DONTEXPAND | VM_READ | VM_EXEC |
-+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
-+ PAGE_READONLY_EXEC,
-+ sysenter_pages);
-+ if (likely(err == 0)) {
-+ current->mm->context.vdso = (void *)addr;
-+ ti->sysenter_return = &SYSENTER_RETURN_OFFSET + addr;
- }
- up_write(¤t->mm->mmap_sem);
-+ return err;
-+}
-+
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+ return 0;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+ return 0;
-+}
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+ return NULL;
- }
-
- static int __init vdso_setup(char *str)
---- linux-2.6.13/kernel/sysctl.c
-+++ linux-2.6.13/kernel/sysctl.c
-@@ -74,7 +74,7 @@ extern int proc_unknown_nmi_panic(ctl_ta
- void __user *, size_t *, loff_t *);
- #endif
-
--extern unsigned int vdso_enabled;
-+extern unsigned int vdso_enabled, vdso_populate;
-
- int exec_shield = 1;
-
-@@ -316,6 +316,14 @@ static ctl_table kern_table[] = {
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
-+ {
-+ .ctl_name = KERN_VDSO,
-+ .procname = "vdso_populate",
-+ .data = &vdso_populate,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec,
-+ },
- #endif
- {
- .ctl_name = KERN_CORE_USES_PID,
-
linux-2.6-execshield.patch:
arch/i386/kernel/asm-offsets.c | 1
arch/i386/kernel/cpu/common.c | 7 +
arch/i386/kernel/entry.S | 8 +-
arch/i386/kernel/process.c | 59 +++++++++++++++
arch/i386/kernel/signal.c | 4 -
arch/i386/kernel/smp.c | 3
arch/i386/kernel/sysenter.c | 56 ++++++++++++++-
arch/i386/kernel/traps.c | 93 ++++++++++++++++++++++++-
arch/i386/kernel/vsyscall-sysenter.S | 6 -
arch/i386/kernel/vsyscall.lds.S | 4 -
arch/i386/mm/init.c | 6 +
arch/i386/mm/mmap.c | 6 +
arch/ia64/ia32/binfmt_elf32.c | 2
arch/x86_64/ia32/ia32_binfmt.c | 4 -
arch/x86_64/kernel/process.c | 6 -
arch/x86_64/kernel/setup64.c | 23 ------
arch/x86_64/mm/Makefile | 2
arch/x86_64/mm/fault.c | 2
arch/x86_64/mm/mmap.c | 95 +++++++++++++++++++++++++
drivers/char/random.c | 7 +
fs/binfmt_elf.c | 130 ++++++++++++++++++++++++++++-------
fs/proc/array.c | 8 +-
fs/proc/base.c | 4 -
fs/proc/task_mmu.c | 25 +++++-
include/asm-i386/desc.h | 14 +++
include/asm-i386/elf.h | 42 +++++++----
include/asm-i386/mmu.h | 6 +
include/asm-i386/pgalloc.h | 1
include/asm-i386/processor.h | 8 +-
include/asm-i386/thread_info.h | 1
include/asm-ia64/pgalloc.h | 4 +
include/asm-powerpc/pgalloc.h | 5 +
include/asm-ppc/pgalloc.h | 5 +
include/asm-s390/pgalloc.h | 4 +
include/asm-sparc/pgalloc.h | 4 +
include/asm-sparc64/pgalloc.h | 4 +
include/asm-x86_64/pgalloc.h | 7 +
include/asm-x86_64/pgtable.h | 2
include/asm-x86_64/processor.h | 5 +
include/linux/mm.h | 11 ++
include/linux/resource.h | 5 +
include/linux/sched.h | 9 ++
include/linux/sysctl.h | 3
kernel/signal.c | 38 ++++++++++
kernel/sysctl.c | 39 ++++++++++
mm/fremap.c | 16 ++--
mm/mmap.c | 105 ++++++++++++++++++++++++++--
mm/mprotect.c | 5 +
mm/mremap.c | 4 -
49 files changed, 784 insertions(+), 124 deletions(-)
Index: linux-2.6-execshield.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/linux-2.6-execshield.patch,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- linux-2.6-execshield.patch 9 Jan 2006 18:55:15 -0000 1.6
+++ linux-2.6-execshield.patch 9 Jan 2006 19:24:08 -0000 1.7
@@ -1,7 +1,58 @@
-Index: linux-exec-shield-curr.q/arch/i386/kernel/asm-offsets.c
+ arch/i386/kernel/asm-offsets.c | 1
+ arch/i386/kernel/cpu/common.c | 7 +
+ arch/i386/kernel/entry.S | 8 +-
+ arch/i386/kernel/process.c | 59 +++++++++++++++
+ arch/i386/kernel/signal.c | 4 -
+ arch/i386/kernel/smp.c | 3
+ arch/i386/kernel/sysenter.c | 56 ++++++++++++++-
+ arch/i386/kernel/traps.c | 93 ++++++++++++++++++++++++-
+ arch/i386/kernel/vsyscall-sysenter.S | 6 -
+ arch/i386/kernel/vsyscall.lds.S | 4 -
+ arch/i386/mm/init.c | 6 +
+ arch/i386/mm/mmap.c | 6 +
+ arch/ia64/ia32/binfmt_elf32.c | 2
+ arch/x86_64/ia32/ia32_binfmt.c | 4 -
+ arch/x86_64/kernel/process.c | 6 -
+ arch/x86_64/kernel/setup64.c | 23 ------
+ arch/x86_64/mm/Makefile | 2
+ arch/x86_64/mm/fault.c | 2
+ arch/x86_64/mm/mmap.c | 95 +++++++++++++++++++++++++
+ drivers/char/random.c | 7 +
+ fs/binfmt_elf.c | 130 ++++++++++++++++++++++++++++-------
+ fs/proc/array.c | 8 +-
+ fs/proc/base.c | 4 -
+ fs/proc/task_mmu.c | 25 +++++-
+ include/asm-i386/desc.h | 14 +++
+ include/asm-i386/elf.h | 42 +++++++----
+ include/asm-i386/mmu.h | 6 +
+ include/asm-i386/pgalloc.h | 1
+ include/asm-i386/processor.h | 8 +-
+ include/asm-i386/thread_info.h | 1
+ include/asm-ia64/pgalloc.h | 4 +
+ include/asm-powerpc/pgalloc.h | 5 +
+ include/asm-ppc/pgalloc.h | 5 +
+ include/asm-s390/pgalloc.h | 4 +
+ include/asm-sparc/pgalloc.h | 4 +
+ include/asm-sparc64/pgalloc.h | 4 +
+ include/asm-x86_64/pgalloc.h | 7 +
+ include/asm-x86_64/pgtable.h | 2
+ include/asm-x86_64/processor.h | 5 +
+ include/linux/mm.h | 11 ++
+ include/linux/resource.h | 5 +
+ include/linux/sched.h | 9 ++
+ include/linux/sysctl.h | 3
+ kernel/signal.c | 38 ++++++++++
+ kernel/sysctl.c | 39 ++++++++++
+ mm/fremap.c | 16 ++--
+ mm/mmap.c | 105 ++++++++++++++++++++++++++--
+ mm/mprotect.c | 5 +
+ mm/mremap.c | 4 -
+ 49 files changed, 784 insertions(+), 124 deletions(-)
+
+Index: linux/arch/i386/kernel/asm-offsets.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/asm-offsets.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/asm-offsets.c
+--- linux.orig/arch/i386/kernel/asm-offsets.c
++++ linux/arch/i386/kernel/asm-offsets.c
@@ -53,6 +53,7 @@ void foo(void)
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit);
@@ -10,10 +61,10 @@
BLANK();
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
-Index: linux-exec-shield-curr.q/arch/i386/kernel/cpu/common.c
+Index: linux/arch/i386/kernel/cpu/common.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/cpu/common.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/cpu/common.c
+--- linux.orig/arch/i386/kernel/cpu/common.c
++++ linux/arch/i386/kernel/cpu/common.c
@@ -392,6 +392,13 @@ void __devinit identify_cpu(struct cpuin
if (disable_pse)
clear_bit(X86_FEATURE_PSE, c->x86_capability);
@@ -28,10 +79,10 @@
/* If the model name is still unset, do table lookup. */
if ( !c->x86_model_id[0] ) {
char *p;
-Index: linux-exec-shield-curr.q/arch/i386/kernel/entry.S
+Index: linux/arch/i386/kernel/entry.S
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/entry.S
-+++ linux-exec-shield-curr.q/arch/i386/kernel/entry.S
+--- linux.orig/arch/i386/kernel/entry.S
++++ linux/arch/i386/kernel/entry.S
@@ -184,8 +184,12 @@ sysenter_past_esp:
pushl %ebp
pushfl
@@ -47,10 +98,10 @@
/*
* Load the potential sixth argument from user stack.
* Careful about security.
-Index: linux-exec-shield-curr.q/arch/i386/kernel/process.c
+Index: linux/arch/i386/kernel/process.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/process.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/process.c
+--- linux.orig/arch/i386/kernel/process.c
++++ linux/arch/i386/kernel/process.c
@@ -652,6 +652,8 @@ struct task_struct fastcall * __switch_t
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
@@ -121,10 +172,10 @@
+ current->mm->brk = new_brk;
+}
+
-Index: linux-exec-shield-curr.q/arch/i386/kernel/signal.c
+Index: linux/arch/i386/kernel/signal.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/signal.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/signal.c
+--- linux.orig/arch/i386/kernel/signal.c
++++ linux/arch/i386/kernel/signal.c
@@ -384,7 +384,7 @@ static int setup_frame(int sig, struct k
goto give_sigsegv;
}
@@ -143,10 +194,10 @@
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
-Index: linux-exec-shield-curr.q/arch/i386/kernel/smp.c
+Index: linux/arch/i386/kernel/smp.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/smp.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/smp.c
+--- linux.orig/arch/i386/kernel/smp.c
++++ linux/arch/i386/kernel/smp.c
@@ -23,6 +23,7 @@
#include <asm/mtrr.h>
@@ -164,10 +215,10 @@
if (!cpu_isset(cpu, flush_cpumask))
goto out;
-Index: linux-exec-shield-curr.q/arch/i386/kernel/sysenter.c
+Index: linux/arch/i386/kernel/sysenter.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/sysenter.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/sysenter.c
+--- linux.orig/arch/i386/kernel/sysenter.c
++++ linux/arch/i386/kernel/sysenter.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include <linux/string.h>
@@ -185,87 +236,78 @@
}
/*
-@@ -46,11 +47,13 @@ void enable_sep_cpu(void)
+@@ -46,11 +47,20 @@ void enable_sep_cpu(void)
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-+static struct page *sysenter_pages[2];
++struct page *sysenter_page;
+
int __init sysenter_setup(void)
{
void *page = (void *)get_zeroed_page(GFP_ATOMIC);
- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
-+ sysenter_pages[0] = virt_to_page(page);
++ /*
++ * We keep this page mapped readonly, even though the executable
++ * portion is randomized into a userspace vma - so that we dont
++ * have to fix up the data within the VDSO page every time we
++ * exec().
++ */
++ __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_KERNEL_RO);
++ sysenter_page = virt_to_page(page);
if (!boot_cpu_has(X86_FEATURE_SEP)) {
memcpy(page,
-@@ -65,3 +68,61 @@ int __init sysenter_setup(void)
+@@ -65,3 +75,45 @@ int __init sysenter_setup(void)
return 0;
}
+
+extern void SYSENTER_RETURN_OFFSET;
+
-+unsigned int vdso_enabled = 1;
++unsigned int vdso_enabled = 0;
+
-+/*
-+ * This is called from binfmt_elf, we create the special vma for the
-+ * vDSO and insert it into the mm struct tree.
-+ */
-+int arch_setup_additional_pages(struct linux_binprm *bprm,
-+ int executable_stack)
++void map_vsyscall(void)
+{
+ struct thread_info *ti = current_thread_info();
-+ unsigned long addr, len;
-+ int err;
++ struct vm_area_struct *vma;
++ unsigned long addr;
+
++ if (unlikely(!vdso_enabled)) {
+ current->mm->context.vdso = NULL;
-+ if (unlikely(!vdso_enabled) || unlikely(!sysenter_pages[0]))
-+ return 0;
++ return;
++ }
+
+ /*
+ * Map the vDSO (it will be randomized):
+ */
+ down_write(¤t->mm->mmap_sem);
-+ len = PAGE_SIZE > ELF_EXEC_PAGESIZE ? PAGE_SIZE : ELF_EXEC_PAGESIZE;
-+ addr = get_unmapped_area_prot(NULL, 0, len, 0,
-+ MAP_PRIVATE, PROT_READ | PROT_EXEC);
-+ if (unlikely(addr & ~PAGE_MASK)) {
-+ up_write(¤t->mm->mmap_sem);
-+ return addr;
++ addr = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, 0);
++ current->mm->context.vdso = (void *)addr;
++ ti->sysenter_return = (void *)addr + (long)&SYSENTER_RETURN_OFFSET;
++ if (addr != -1) {
++ vma = find_vma(current->mm, addr);
++ if (vma) {
++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
++ get_page(sysenter_page);
++ install_page(current->mm, vma, addr,
++ sysenter_page, vma->vm_page_prot);
+ }
-+ err = install_special_mapping(current->mm, addr, len,
-+ VM_DONTEXPAND | VM_READ | VM_EXEC |
-+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
-+ PAGE_READONLY_EXEC,
-+ sysenter_pages);
-+ if (likely(err == 0)) {
-+ current->mm->context.vdso = (void *)addr;
-+ ti->sysenter_return = &SYSENTER_RETURN_OFFSET + addr;
+ }
+ up_write(¤t->mm->mmap_sem);
-+ return err;
+}
+
-+int in_gate_area_no_task(unsigned long addr)
++static int __init vdso_setup(char *str)
+{
-+ return 0;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+ return 0;
++ vdso_enabled = simple_strtoul(str, NULL, 0);
++ return 1;
+}
++__setup("vdso=", vdso_setup);
+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+ return NULL;
-+}
-Index: linux-exec-shield-curr.q/arch/i386/kernel/traps.c
+Index: linux/arch/i386/kernel/traps.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/traps.c
-+++ linux-exec-shield-curr.q/arch/i386/kernel/traps.c
+--- linux.orig/arch/i386/kernel/traps.c
++++ linux/arch/i386/kernel/traps.c
@@ -461,7 +461,82 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", inv
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
@@ -304,7 +346,7 @@
+ set_user_cs(¤t->mm->context.user_cs, limit);
+
+ desc1 = ¤t->mm->context.user_cs;
-+ desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
++ desc2 = per_cpu(cpu_gdt_table, cpu) + GDT_ENTRY_DEFAULT_USER_CS;
+
+ if (desc1->a != desc2->a || desc1->b != desc2->b) {
+ /*
@@ -398,10 +440,10 @@
if (!fixup_exception(regs)) {
if (notify_die(DIE_GPF, "general protection fault", regs,
error_code, 13, SIGSEGV) == NOTIFY_STOP)
-Index: linux-exec-shield-curr.q/arch/i386/kernel/vsyscall-sysenter.S
+Index: linux/arch/i386/kernel/vsyscall-sysenter.S
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/vsyscall-sysenter.S
-+++ linux-exec-shield-curr.q/arch/i386/kernel/vsyscall-sysenter.S
+--- linux.orig/arch/i386/kernel/vsyscall-sysenter.S
++++ linux/arch/i386/kernel/vsyscall-sysenter.S
@@ -24,11 +24,11 @@ __kernel_vsyscall:
/* 7: align return point with nop's to make disassembly easier */
.space 7,0x90
@@ -417,10 +459,10 @@
pop %ebp
.Lpop_ebp:
pop %edx
-Index: linux-exec-shield-curr.q/arch/i386/kernel/vsyscall.lds.S
+Index: linux/arch/i386/kernel/vsyscall.lds.S
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/kernel/vsyscall.lds.S
-+++ linux-exec-shield-curr.q/arch/i386/kernel/vsyscall.lds.S
+--- linux.orig/arch/i386/kernel/vsyscall.lds.S
++++ linux/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
SECTIONS
@@ -439,10 +481,10 @@
.text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note
-Index: linux-exec-shield-curr.q/arch/i386/mm/init.c
+Index: linux/arch/i386/mm/init.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/mm/init.c
-+++ linux-exec-shield-curr.q/arch/i386/mm/init.c
+--- linux.orig/arch/i386/mm/init.c
++++ linux/arch/i386/mm/init.c
@@ -432,7 +432,7 @@ u64 __supported_pte_mask __read_mostly =
* Control non executable mappings.
*
@@ -471,10 +513,10 @@
pagetable_init();
-Index: linux-exec-shield-curr.q/arch/i386/mm/mmap.c
+Index: linux/arch/i386/mm/mmap.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/i386/mm/mmap.c
-+++ linux-exec-shield-curr.q/arch/i386/mm/mmap.c
+--- linux.orig/arch/i386/mm/mmap.c
++++ linux/arch/i386/mm/mmap.c
@@ -62,15 +62,17 @@ void arch_pick_mmap_layout(struct mm_str
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
@@ -495,10 +537,10 @@
mm->unmap_area = arch_unmap_area_topdown;
}
}
-Index: linux-exec-shield-curr.q/arch/ia64/ia32/binfmt_elf32.c
+Index: linux/arch/ia64/ia32/binfmt_elf32.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/ia64/ia32/binfmt_elf32.c
-+++ linux-exec-shield-curr.q/arch/ia64/ia32/binfmt_elf32.c
+--- linux.orig/arch/ia64/ia32/binfmt_elf32.c
++++ linux/arch/ia64/ia32/binfmt_elf32.c
@@ -264,7 +264,7 @@ elf32_set_personality (void)
}
@@ -508,10 +550,10 @@
{
unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
-Index: linux-exec-shield-curr.q/arch/x86_64/ia32/ia32_binfmt.c
+Index: linux/arch/x86_64/ia32/ia32_binfmt.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/x86_64/ia32/ia32_binfmt.c
-+++ linux-exec-shield-curr.q/arch/x86_64/ia32/ia32_binfmt.c
+--- linux.orig/arch/x86_64/ia32/ia32_binfmt.c
++++ linux/arch/x86_64/ia32/ia32_binfmt.c
@@ -248,8 +248,6 @@ elf_core_copy_task_xfpregs(struct task_s
#define elf_check_arch(x) \
((x)->e_machine == EM_386)
@@ -530,10 +572,10 @@
} while (0)
/* Override some function names */
-Index: linux-exec-shield-curr.q/arch/x86_64/kernel/process.c
+Index: linux/arch/x86_64/kernel/process.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/x86_64/kernel/process.c
-+++ linux-exec-shield-curr.q/arch/x86_64/kernel/process.c
+--- linux.orig/arch/x86_64/kernel/process.c
++++ linux/arch/x86_64/kernel/process.c
@@ -631,12 +631,6 @@ void set_personality_64bit(void)
/* Make sure to be in 64bit mode */
@@ -547,10 +589,10 @@
}
asmlinkage long sys_fork(struct pt_regs *regs)
-Index: linux-exec-shield-curr.q/arch/x86_64/kernel/setup64.c
+Index: linux/arch/x86_64/kernel/setup64.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/x86_64/kernel/setup64.c
-+++ linux-exec-shield-curr.q/arch/x86_64/kernel/setup64.c
+--- linux.orig/arch/x86_64/kernel/setup64.c
++++ linux/arch/x86_64/kernel/setup64.c
@@ -45,7 +45,7 @@ Control non executable mappings for 64bi
on Enable(default)
off Disable
@@ -589,10 +631,10 @@
/*
* Great future plan:
-Index: linux-exec-shield-curr.q/arch/x86_64/mm/Makefile
+Index: linux/arch/x86_64/mm/Makefile
===================================================================
---- linux-exec-shield-curr.q.orig/arch/x86_64/mm/Makefile
-+++ linux-exec-shield-curr.q/arch/x86_64/mm/Makefile
+--- linux.orig/arch/x86_64/mm/Makefile
++++ linux/arch/x86_64/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux x86_64-specific parts of the memory manager.
#
@@ -602,10 +644,10 @@
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
-Index: linux-exec-shield-curr.q/arch/x86_64/mm/fault.c
+Index: linux/arch/x86_64/mm/fault.c
===================================================================
---- linux-exec-shield-curr.q.orig/arch/x86_64/mm/fault.c
-+++ linux-exec-shield-curr.q/arch/x86_64/mm/fault.c
+--- linux.orig/arch/x86_64/mm/fault.c
++++ linux/arch/x86_64/mm/fault.c
@@ -74,7 +74,7 @@ static noinline int is_prefetch(struct p
instr = (unsigned char *)convert_rip_to_linear(current, regs);
max_instr = instr + 15;
@@ -615,10 +657,10 @@
return 0;
while (scan_more && instr < max_instr) {
-Index: linux-exec-shield-curr.q/arch/x86_64/mm/mmap.c
+Index: linux/arch/x86_64/mm/mmap.c
===================================================================
--- /dev/null
-+++ linux-exec-shield-curr.q/arch/x86_64/mm/mmap.c
++++ linux/arch/x86_64/mm/mmap.c
@@ -0,0 +1,95 @@
+/*
+ * linux/arch/x86-64/mm/mmap.c
@@ -715,10 +757,10 @@
+ return sp & ~0xf;
+}
+
-Index: linux-exec-shield-curr.q/drivers/char/random.c
+Index: linux/drivers/char/random.c
===================================================================
---- linux-exec-shield-curr.q.orig/drivers/char/random.c
-+++ linux-exec-shield-curr.q/drivers/char/random.c
+--- linux.orig/drivers/char/random.c
++++ linux/drivers/char/random.c
@@ -1632,13 +1632,18 @@ EXPORT_SYMBOL(secure_dccp_sequence_numbe
*/
unsigned int get_random_int(void)
@@ -739,10 +781,10 @@
}
/*
-Index: linux-exec-shield-curr.q/fs/binfmt_elf.c
+Index: linux/fs/binfmt_elf.c
===================================================================
---- linux-exec-shield-curr.q.orig/fs/binfmt_elf.c
-+++ linux-exec-shield-curr.q/fs/binfmt_elf.c
+--- linux.orig/fs/binfmt_elf.c
++++ linux/fs/binfmt_elf.c
@@ -47,7 +47,7 @@
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
@@ -974,30 +1016,22 @@
if (BAD_ADDR(elf_entry)) {
printk(KERN_ERR "Unable to load interpreter %.128s\n",
elf_interpreter);
-@@ -940,8 +1006,6 @@ static int load_elf_binary(struct linux_
- elf_entry = loc->elf_ex.e_entry;
- }
-
-- kfree(elf_phdata);
--
- if (interpreter_type != INTERPRETER_AOUT)
- sys_close(elf_exec_fileno);
-
-@@ -951,10 +1015,12 @@ static int load_elf_binary(struct linux_
- retval = arch_setup_additional_pages(bprm, executable_stack);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
-- goto out;
-+ goto out_free_fh;
+@@ -955,6 +1021,14 @@ static int load_elf_binary(struct linux_
}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
-+ kfree(elf_phdata);
++ /*
++ * Map the vsyscall trampoline. This address is then passed via
++ * AT_SYSINFO.
++ */
++#ifdef __HAVE_ARCH_VSYSCALL
++ map_vsyscall();
++#endif
+
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
-@@ -968,6 +1034,10 @@ static int load_elf_binary(struct linux_
+@@ -968,6 +1042,10 @@ static int load_elf_binary(struct linux_
current->mm->end_data = end_data;
current->mm->start_stack = bprm->p;
@@ -1008,20 +1042,10 @@
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
-@@ -1153,6 +1223,9 @@ static int maydump(struct vm_area_struct
- if (vma->vm_flags & (VM_IO | VM_RESERVED))
- return 0;
-
-+ if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
-+ return 1;
-+
- /* Dump shared memory only if mapped from an anonymous file. */
- if (vma->vm_flags & VM_SHARED)
- return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
-Index: linux-exec-shield-curr.q/fs/proc/array.c
+Index: linux/fs/proc/array.c
===================================================================
---- linux-exec-shield-curr.q.orig/fs/proc/array.c
-+++ linux-exec-shield-curr.q/fs/proc/array.c
+--- linux.orig/fs/proc/array.c
++++ linux/fs/proc/array.c
@@ -391,8 +391,12 @@ static int do_task_stat(struct task_stru
ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
read_unlock(&tasklist_lock);
@@ -1037,10 +1061,10 @@
if (!whole) {
min_flt = task->min_flt;
maj_flt = task->maj_flt;
-Index: linux-exec-shield-curr.q/fs/proc/base.c
+Index: linux/fs/proc/base.c
===================================================================
---- linux-exec-shield-curr.q.orig/fs/proc/base.c
-+++ linux-exec-shield-curr.q/fs/proc/base.c
+--- linux.orig/fs/proc/base.c
++++ linux/fs/proc/base.c
@@ -188,7 +188,7 @@ static struct pid_entry tgid_base_stuff[
E(PROC_TGID_CMDLINE, "cmdline", S_IFREG|S_IRUGO),
E(PROC_TGID_STAT, "stat", S_IFREG|S_IRUGO),
@@ -1059,10 +1083,10 @@
#ifdef CONFIG_NUMA
E(PROC_TID_NUMA_MAPS, "numa_maps", S_IFREG|S_IRUGO),
#endif
-Index: linux-exec-shield-curr.q/fs/proc/task_mmu.c
+Index: linux/fs/proc/task_mmu.c
===================================================================
---- linux-exec-shield-curr.q.orig/fs/proc/task_mmu.c
-+++ linux-exec-shield-curr.q/fs/proc/task_mmu.c
+--- linux.orig/fs/proc/task_mmu.c
++++ linux/fs/proc/task_mmu.c
@@ -43,7 +43,11 @@ char *task_mem(struct mm_struct *mm, cha
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
@@ -1106,7 +1130,7 @@
flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_pgoff << PAGE_SHIFT,
MAJOR(dev), MINOR(dev), ino, &len);
-@@ -154,18 +170,22 @@ static int show_map_internal(struct seq_
+@@ -154,8 +170,7 @@ static int show_map_internal(struct seq_
seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
} else {
if (mm) {
@@ -1115,28 +1139,11 @@
+ if (vma->vm_end == mm->brk) {
pad_len_spaces(m, len);
seq_puts(m, "[heap]");
-- } else {
-- if (vma->vm_start <= mm->start_stack &&
-+ } else if (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack) {
-
- pad_len_spaces(m, len);
- seq_puts(m, "[stack]");
- }
-+#ifdef __i386__
-+ else if (vma->vm_start ==
-+ (unsigned long)mm->context.vdso) {
-+ pad_len_spaces(m, len);
-+ seq_puts(m, "[vdso]");
- }
-+#endif
- } else {
- pad_len_spaces(m, len);
- seq_puts(m, "[vdso]");
-Index: linux-exec-shield-curr.q/include/asm-i386/desc.h
+ } else {
+Index: linux/include/asm-i386/desc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/desc.h
-+++ linux-exec-shield-curr.q/include/asm-i386/desc.h
+--- linux.orig/include/asm-i386/desc.h
++++ linux/include/asm-i386/desc.h
@@ -160,6 +160,20 @@ static inline unsigned long get_desc_bas
return base;
}
@@ -1149,7 +1156,7 @@
+}
+
+#define load_user_cs_desc(cpu, mm) \
-+ get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
++ per_cpu(cpu_gdt_table, (cpu))[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
+
+extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
+extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
@@ -1158,10 +1165,10 @@
#endif /* !__ASSEMBLY__ */
#endif
-Index: linux-exec-shield-curr.q/include/asm-i386/elf.h
+Index: linux/include/asm-i386/elf.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/elf.h
-+++ linux-exec-shield-curr.q/include/asm-i386/elf.h
+--- linux.orig/include/asm-i386/elf.h
++++ linux/include/asm-i386/elf.h
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/system.h> /* for savesegment */
@@ -1170,7 +1177,7 @@
#include <linux/utsname.h>
-@@ -129,17 +130,30 @@ extern int dump_task_extended_fpu (struc
+@@ -129,15 +130,22 @@ extern int dump_task_extended_fpu (struc
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
@@ -1199,16 +1206,8 @@
+ } \
} while (0)
-+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-+struct linux_binprm;
-+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
-+ int executable_stack);
-+
-+#if 0 /* Disabled for exec-shield, where a normal vma holds the vDSO. */
/*
- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
- * extra segments containing the vsyscall DSO contents. Dumping its
-@@ -148,15 +162,15 @@ do { \
+@@ -148,15 +156,15 @@ do { \
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used.
*/
@@ -1228,7 +1227,7 @@
struct elf_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \
-@@ -174,16 +188,23 @@ do { \
+@@ -174,10 +182,10 @@ do { \
#define ELF_CORE_WRITE_EXTRA_DATA \
do { \
const struct elf_phdr *const vsyscall_phdrs = \
@@ -1242,9 +1241,7 @@
if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
- } \
- } while (0)
-+#endif
+@@ -186,4 +194,10 @@ do { \
#endif
@@ -1255,10 +1252,10 @@
+extern void map_vsyscall(void);
+
#endif
-Index: linux-exec-shield-curr.q/include/asm-i386/mmu.h
+Index: linux/include/asm-i386/mmu.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/mmu.h
-+++ linux-exec-shield-curr.q/include/asm-i386/mmu.h
+--- linux.orig/include/asm-i386/mmu.h
++++ linux/include/asm-i386/mmu.h
@@ -7,11 +7,17 @@
* we put the segment information here.
*
@@ -1277,26 +1274,10 @@
} mm_context_t;
#endif
-Index: linux-exec-shield-curr.q/include/asm-i386/page.h
-===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/page.h
-+++ linux-exec-shield-curr.q/include/asm-i386/page.h
-@@ -118,6 +118,11 @@ extern int page_is_ram(unsigned long pag
- #endif
- #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
-
-+/*
-+ * Under exec-shield we don't use the generic fixmap gate area.
-+ * The vDSO ("gate area") has a normal vma found the normal ways.
-+ */
-+#define __HAVE_ARCH_GATE_AREA 1
-
- #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
- #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-Index: linux-exec-shield-curr.q/include/asm-i386/pgalloc.h
+Index: linux/include/asm-i386/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-i386/pgalloc.h
+--- linux.orig/include/asm-i386/pgalloc.h
++++ linux/include/asm-i386/pgalloc.h
@@ -3,6 +3,7 @@
#include <linux/config.h>
@@ -1305,10 +1286,10 @@
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
-Index: linux-exec-shield-curr.q/include/asm-i386/processor.h
+Index: linux/include/asm-i386/processor.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/processor.h
-+++ linux-exec-shield-curr.q/include/asm-i386/processor.h
+--- linux.orig/include/asm-i386/processor.h
++++ linux/include/asm-i386/processor.h
@@ -319,7 +319,10 @@ extern int bootloader_type;
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
@@ -1331,10 +1312,10 @@
} while (0)
/*
-Index: linux-exec-shield-curr.q/include/asm-i386/thread_info.h
+Index: linux/include/asm-i386/thread_info.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-i386/thread_info.h
-+++ linux-exec-shield-curr.q/include/asm-i386/thread_info.h
+--- linux.orig/include/asm-i386/thread_info.h
++++ linux/include/asm-i386/thread_info.h
@@ -38,6 +38,7 @@ struct thread_info {
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
@@ -1343,10 +1324,10 @@
struct restart_block restart_block;
unsigned long previous_esp; /* ESP of the previous stack in case
-Index: linux-exec-shield-curr.q/include/asm-ia64/pgalloc.h
+Index: linux/include/asm-ia64/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-ia64/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-ia64/pgalloc.h
+--- linux.orig/include/asm-ia64/pgalloc.h
++++ linux/include/asm-ia64/pgalloc.h
@@ -1,6 +1,10 @@
#ifndef _ASM_IA64_PGALLOC_H
#define _ASM_IA64_PGALLOC_H
@@ -1358,10 +1339,10 @@
/*
* This file contains the functions and defines necessary to allocate
* page tables.
-Index: linux-exec-shield-curr.q/include/asm-powerpc/pgalloc.h
+Index: linux/include/asm-powerpc/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-powerpc/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-powerpc/pgalloc.h
+--- linux.orig/include/asm-powerpc/pgalloc.h
++++ linux/include/asm-powerpc/pgalloc.h
@@ -23,6 +23,11 @@ extern kmem_cache_t *pgtable_cache[];
#define PGD_CACHE_NUM 0
#endif
@@ -1374,10 +1355,10 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
-Index: linux-exec-shield-curr.q/include/asm-ppc/pgalloc.h
+Index: linux/include/asm-ppc/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-ppc/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-ppc/pgalloc.h
+--- linux.orig/include/asm-ppc/pgalloc.h
++++ linux/include/asm-ppc/pgalloc.h
@@ -40,5 +40,10 @@ extern void pte_free(struct page *pte);
#define check_pgt_cache() do { } while (0)
@@ -1389,10 +1370,10 @@
+
#endif /* _PPC_PGALLOC_H */
#endif /* __KERNEL__ */
-Index: linux-exec-shield-curr.q/include/asm-s390/pgalloc.h
+Index: linux/include/asm-s390/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-s390/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-s390/pgalloc.h
+--- linux.orig/include/asm-s390/pgalloc.h
++++ linux/include/asm-s390/pgalloc.h
@@ -18,6 +18,10 @@
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -1404,10 +1385,10 @@
#define check_pgt_cache() do {} while (0)
extern void diag10(unsigned long addr);
-Index: linux-exec-shield-curr.q/include/asm-sparc/pgalloc.h
+Index: linux/include/asm-sparc/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-sparc/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-sparc/pgalloc.h
+--- linux.orig/include/asm-sparc/pgalloc.h
++++ linux/include/asm-sparc/pgalloc.h
@@ -66,4 +66,8 @@ BTFIXUPDEF_CALL(void, pte_free, struct p
#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte) pte_free(pte)
@@ -1417,10 +1398,10 @@
+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
+
#endif /* _SPARC_PGALLOC_H */
-Index: linux-exec-shield-curr.q/include/asm-sparc64/pgalloc.h
+Index: linux/include/asm-sparc64/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-sparc64/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-sparc64/pgalloc.h
+--- linux.orig/include/asm-sparc64/pgalloc.h
++++ linux/include/asm-sparc64/pgalloc.h
@@ -181,4 +181,8 @@ static inline void pte_free(struct page
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
@@ -1430,10 +1411,10 @@
+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
+
#endif /* _SPARC64_PGALLOC_H */
-Index: linux-exec-shield-curr.q/include/asm-x86_64/pgalloc.h
+Index: linux/include/asm-x86_64/pgalloc.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-x86_64/pgalloc.h
-+++ linux-exec-shield-curr.q/include/asm-x86_64/pgalloc.h
+--- linux.orig/include/asm-x86_64/pgalloc.h
++++ linux/include/asm-x86_64/pgalloc.h
@@ -6,6 +6,13 @@
#include <linux/threads.h>
#include <linux/mm.h>
@@ -1448,10 +1429,10 @@
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
#define pud_populate(mm, pud, pmd) \
-Index: linux-exec-shield-curr.q/include/asm-x86_64/pgtable.h
+Index: linux/include/asm-x86_64/pgtable.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-x86_64/pgtable.h
-+++ linux-exec-shield-curr.q/include/asm-x86_64/pgtable.h
+--- linux.orig/include/asm-x86_64/pgtable.h
++++ linux/include/asm-x86_64/pgtable.h
@@ -21,7 +21,7 @@ extern unsigned long __supported_pte_mas
#define swapper_pg_dir init_level4_pgt
@@ -1461,10 +1442,10 @@
extern void paging_init(void);
extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-Index: linux-exec-shield-curr.q/include/asm-x86_64/processor.h
+Index: linux/include/asm-x86_64/processor.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/asm-x86_64/processor.h
-+++ linux-exec-shield-curr.q/include/asm-x86_64/processor.h
+--- linux.orig/include/asm-x86_64/processor.h
++++ linux/include/asm-x86_64/processor.h
@@ -164,6 +164,11 @@ static inline void clear_in_cr4 (unsigne
*/
#define TASK_SIZE64 (0x800000000000UL - 4096)
@@ -1477,11 +1458,11 @@
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-Index: linux-exec-shield-curr.q/include/linux/mm.h
+Index: linux/include/linux/mm.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/linux/mm.h
-+++ linux-exec-shield-curr.q/include/linux/mm.h
-@@ -914,7 +914,19 @@ extern struct vm_area_struct *copy_vma(s
+--- linux.orig/include/linux/mm.h
++++ linux/include/linux/mm.h
+@@ -914,7 +914,14 @@ extern struct vm_area_struct *copy_vma(s
extern void exit_mmap(struct mm_struct *);
extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
@@ -1494,15 +1475,10 @@
+{
+ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+}
-+
-+extern int install_special_mapping(struct mm_struct *mm,
-+ unsigned long addr, unsigned long len,
-+ unsigned long vm_flags, pgprot_t pgprot,
-+ struct page **pages);
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
-@@ -966,7 +978,7 @@ unsigned long page_cache_readahead(struc
+@@ -966,7 +973,7 @@ unsigned long page_cache_readahead(struc
struct file *filp,
pgoff_t offset,
unsigned long size);
@@ -1511,10 +1487,10 @@
struct file_ra_state *ra, pgoff_t offset);
unsigned long max_sane_readahead(unsigned long nr);
-Index: linux-exec-shield-curr.q/include/linux/resource.h
+Index: linux/include/linux/resource.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/linux/resource.h
-+++ linux-exec-shield-curr.q/include/linux/resource.h
+--- linux.orig/include/linux/resource.h
++++ linux/include/linux/resource.h
@@ -52,8 +52,11 @@ struct rlimit {
/*
* Limit the stack by to some sane default: root can always
@@ -1528,10 +1504,10 @@
/*
* GPG wants 32kB of mlocked memory, to make sure pass phrases
-Index: linux-exec-shield-curr.q/include/linux/sched.h
+Index: linux/include/linux/sched.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/linux/sched.h
-+++ linux-exec-shield-curr.q/include/linux/sched.h
+--- linux.orig/include/linux/sched.h
++++ linux/include/linux/sched.h
@@ -39,6 +39,8 @@
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
@@ -1562,10 +1538,10 @@
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
unsigned long mmap_base; /* base of mmap area */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
-Index: linux-exec-shield-curr.q/include/linux/sysctl.h
+Index: linux/include/linux/sysctl.h
===================================================================
---- linux-exec-shield-curr.q.orig/include/linux/sysctl.h
-+++ linux-exec-shield-curr.q/include/linux/sysctl.h
+--- linux.orig/include/linux/sysctl.h
++++ linux/include/linux/sysctl.h
@@ -92,6 +92,9 @@ enum
KERN_CAP_BSET=14, /* int: capability bounding set */
@@ -1576,10 +1552,10 @@
KERN_REALROOTDEV=16, /* real root device to mount after initrd */
KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
-Index: linux-exec-shield-curr.q/kernel/signal.c
+Index: linux/kernel/signal.c
===================================================================
---- linux-exec-shield-curr.q.orig/kernel/signal.c
-+++ linux-exec-shield-curr.q/kernel/signal.c
+--- linux.orig/kernel/signal.c
++++ linux/kernel/signal.c
@@ -868,6 +868,37 @@ out_set:
#define LEGACY_QUEUE(sigptr, sig) \
(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
@@ -1639,15 +1615,15 @@
if (sig_kernel_coredump(signr)) {
/*
* If it was able to dump core, this kills all
-Index: linux-exec-shield-curr.q/kernel/sysctl.c
+Index: linux/kernel/sysctl.c
===================================================================
---- linux-exec-shield-curr.q.orig/kernel/sysctl.c
-+++ linux-exec-shield-curr.q/kernel/sysctl.c
+--- linux.orig/kernel/sysctl.c
++++ linux/kernel/sysctl.c
@@ -77,6 +77,19 @@ extern int proc_unknown_nmi_panic(ctl_ta
void __user *, size_t *, loff_t *);
#endif
-+extern unsigned int vdso_enabled, vdso_populate;
++extern unsigned int vdso_enabled;
+
+int exec_shield = 1;
+
@@ -1663,7 +1639,7 @@
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
-@@ -280,6 +293,40 @@ static ctl_table kern_table[] = {
+@@ -280,6 +293,32 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
@@ -1691,23 +1667,15 @@
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
-+ {
-+ .ctl_name = KERN_VDSO,
-+ .procname = "vdso_populate",
-+ .data = &vdso_populate,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec,
-+ },
+#endif
+ {
.ctl_name = KERN_CORE_USES_PID,
.procname = "core_uses_pid",
.data = &core_uses_pid,
-Index: linux-exec-shield-curr.q/mm/fremap.c
+Index: linux/mm/fremap.c
===================================================================
---- linux-exec-shield-curr.q.orig/mm/fremap.c
-+++ linux-exec-shield-curr.q/mm/fremap.c
+--- linux.orig/mm/fremap.c
++++ linux/mm/fremap.c
@@ -67,13 +67,15 @@ int install_page(struct mm_struct *mm, s
* caller about it.
*/
@@ -1731,10 +1699,10 @@
if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
inc_mm_counter(mm, file_rss);
-Index: linux-exec-shield-curr.q/mm/mmap.c
+Index: linux/mm/mmap.c
===================================================================
---- linux-exec-shield-curr.q.orig/mm/mmap.c
-+++ linux-exec-shield-curr.q/mm/mmap.c
+--- linux.orig/mm/mmap.c
++++ linux/mm/mmap.c
@@ -24,6 +24,7 @@
#include <linux/mount.h>
#include <linux/mempolicy.h>
@@ -1926,92 +1894,10 @@
/*
* Walk the list again, actually closing and freeing it,
-@@ -2060,3 +2149,81 @@ int may_expand_vm(struct mm_struct *mm,
- return 0;
- return 1;
- }
-+
-+
-+static struct page *
-+special_mapping_nopage(struct vm_area_struct *vma,
-+ unsigned long address, int *type)
-+{
-+ struct page **pages;
-+
-+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-+
-+ address -= vma->vm_start;
-+ for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-+ address -= PAGE_SIZE;
-+
-+ if (*pages) {
-+ get_page(*pages);
-+ return *pages;
-+ }
-+
-+ return NOPAGE_SIGBUS;
-+}
-+
-+static struct vm_operations_struct special_mapping_vmops = {
-+ .nopage = special_mapping_nopage,
-+};
-+
-+unsigned int vdso_populate = 1;
-+
-+/*
-+ * Insert a new vma covering the given region, with the given flags and
-+ * protections. Its pages are supplied by the given null-terminated array.
-+ * The region past the last page supplied will always produce SIGBUS.
-+ * The array pointer and the pages it points to are assumed to stay alive
-+ * for as long as this mapping might exist.
-+ */
-+int install_special_mapping(struct mm_struct *mm,
-+ unsigned long addr, unsigned long len,
-+ unsigned long vm_flags, pgprot_t pgprot,
-+ struct page **pages)
-+{
-+ struct vm_area_struct *vma;
-+ int err;
-+
-+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+ if (unlikely(vma == NULL))
-+ return -ENOMEM;
-+ memset(vma, 0, sizeof(*vma));
-+
-+ vma->vm_mm = mm;
-+ vma->vm_start = addr;
-+ vma->vm_end = addr + len;
-+
-+ vma->vm_flags = vm_flags;
-+ vma->vm_page_prot = pgprot;
-+
-+ vma->vm_ops = &special_mapping_vmops;
-+ vma->vm_private_data = pages;
-+
-+ insert_vm_struct(mm, vma);
-+ mm->total_vm += len >> PAGE_SHIFT;
-+
-+ if (!vdso_populate)
-+ return 0;
-+
-+ err = 0;
-+ while (*pages) {
-+ struct page *page = *pages++;
-+ get_page(page);
-+ err = install_page(mm, vma, addr, page, vma->vm_page_prot);
-+ if (err) {
-+ put_page(page);
-+ break;
-+ }
-+ addr += PAGE_SIZE;
-+ }
-+
-+ return err;
-+}
-Index: linux-exec-shield-curr.q/mm/mprotect.c
+Index: linux/mm/mprotect.c
===================================================================
---- linux-exec-shield-curr.q.orig/mm/mprotect.c
-+++ linux-exec-shield-curr.q/mm/mprotect.c
+--- linux.orig/mm/mprotect.c
++++ linux/mm/mprotect.c
@@ -22,6 +22,7 @@
#include <asm/uaccess.h>
@@ -2038,10 +1924,10 @@
change_protection(vma, start, end, newprot);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
-Index: linux-exec-shield-curr.q/mm/mremap.c
+Index: linux/mm/mremap.c
===================================================================
---- linux-exec-shield-curr.q.orig/mm/mremap.c
-+++ linux-exec-shield-curr.q/mm/mremap.c
+--- linux.orig/mm/mremap.c
++++ linux/mm/mremap.c
@@ -386,8 +386,8 @@ unsigned long do_mremap(unsigned long ad
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
@@ -2053,15 +1939,3 @@
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out;
-
---- linux-2.6.15/fs/proc/base.c~ 2006-01-09 13:41:33.000000000 -0500
-+++ linux-2.6.15/fs/proc/base.c 2006-01-09 13:41:42.000000000 -0500
-@@ -201,7 +201,7 @@ static struct pid_entry tgid_base_stuff[
- E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
- E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
- #ifdef CONFIG_MMU
-- E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO),
-+ E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUSR),
- #endif
- #ifdef CONFIG_SECURITY
- E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
More information about the fedora-cvs-commits
mailing list