[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

rpms/kernel/FC-3 linux-2.6.11-exec-shield.patch, NONE, 1.1 kernel-2.6.spec, 1.839, 1.840



Author: davej

Update of /cvs/dist/rpms/kernel/FC-3
In directory cvs.devel.redhat.com:/tmp/cvs-serv21025

Modified Files:
	kernel-2.6.spec 
Added Files:
	linux-2.6.11-exec-shield.patch 
Log Message:
exec-shield improvements.


linux-2.6.11-exec-shield.patch:
 arch/i386/kernel/asm-offsets.c       |    1 
 arch/i386/kernel/cpu/common.c        |    6 +
 arch/i386/kernel/entry.S             |    8 +
 arch/i386/kernel/process.c           |   69 +++++++++++++++
 arch/i386/kernel/signal.c            |    5 -
 arch/i386/kernel/smp.c               |    3 
 arch/i386/kernel/sysenter.c          |   50 +++++++++++
 arch/i386/kernel/traps.c             |   44 ++++++++++
 arch/i386/kernel/vsyscall-sysenter.S |    6 -
 arch/i386/kernel/vsyscall.lds.S      |    4 
 arch/i386/mm/init.c                  |    6 +
 arch/i386/mm/mmap.c                  |   13 ++
 arch/ia64/ia32/binfmt_elf32.c        |    2 
 arch/x86_64/ia32/ia32_binfmt.c       |    2 
 arch/x86_64/mm/Makefile              |    2 
 arch/x86_64/mm/mmap.c                |   95 +++++++++++++++++++++
 drivers/char/random.c                |   33 +++++++
 fs/binfmt_elf.c                      |  152 ++++++++++++++++++++++++++---------
 fs/exec.c                            |    6 +
 fs/proc/array.c                      |    8 +
 fs/proc/base.c                       |    4 
 fs/proc/task_mmu.c                   |   25 +++++
 include/asm-i386/desc.h              |   14 +++
 include/asm-i386/elf.h               |   42 ++++++---
 include/asm-i386/mmu.h               |    6 +
 include/asm-i386/pgalloc.h           |    1 
 include/asm-i386/processor.h         |    8 +
 include/asm-i386/thread_info.h       |    1 
 include/asm-ia64/pgalloc.h           |    4 
 include/asm-ppc/pgalloc.h            |    5 +
 include/asm-ppc64/pgalloc.h          |    5 +
 include/asm-s390/pgalloc.h           |    4 
 include/asm-sparc/pgalloc.h          |    4 
 include/asm-sparc64/pgalloc.h        |    4 
 include/asm-x86_64/pgalloc.h         |    7 +
 include/asm-x86_64/processor.h       |    9 ++
 include/linux/mm.h                   |    9 +-
 include/linux/random.h               |    3 
 include/linux/resource.h             |    5 -
 include/linux/sched.h                |   11 ++
 include/linux/sysctl.h               |    4 
 kernel/signal.c                      |   38 ++++++++
 kernel/sysctl.c                      |   57 +++++++++++++
 mm/mmap.c                            |  104 ++++++++++++++++++++++-
 mm/mprotect.c                        |    5 -
 mm/mremap.c                          |    4 
 46 files changed, 811 insertions(+), 87 deletions(-)

--- NEW FILE linux-2.6.11-exec-shield.patch ---
--- linux/fs/binfmt_elf.c.orig
+++ linux/fs/binfmt_elf.c
@@ -46,7 +46,7 @@
 
 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
 static int load_elf_library(struct file*);
-static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
+static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
 
 #ifndef elf_addr_t
@@ -165,20 +165,8 @@ create_elf_tables(struct linux_binprm *b
 	if (k_platform) {
 		size_t len = strlen(k_platform) + 1;
 
-#ifdef CONFIG_X86_HT
-		/*
-		 * In some cases (e.g. Hyper-Threading), we want to avoid L1
-		 * evictions by the processes running on the same package. One
-		 * thing we can do is to shuffle the initial stack for them.
-		 *
-		 * The conditionals here are unneeded, but kept in to make the
-		 * code behaviour the same as pre change unless we have
-		 * hyperthreaded processors. This should be cleaned up
-		 * before 2.6
-		 */
-	 
-		if (smp_num_siblings > 1)
-			STACK_ALLOC(p, ((current->pid % 64) << 7));
+#ifdef __HAVE_ARCH_ALIGN_STACK
+		p = (unsigned long)arch_align_stack((unsigned long)p);
 #endif
 		u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
 		if (__copy_to_user(u_platform, k_platform, len))
@@ -291,20 +279,59 @@ create_elf_tables(struct linux_binprm *b
 #ifndef elf_map
 
 static unsigned long elf_map(struct file *filep, unsigned long addr,
-			struct elf_phdr *eppnt, int prot, int type)
+			     struct elf_phdr *eppnt, int prot, int type,
+			     unsigned long total_size)
 {
 	unsigned long map_addr;
+	unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
+	unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
+
+	addr = ELF_PAGESTART(addr);
+	size = ELF_PAGEALIGN(size);
 
 	down_write(&current->mm->mmap_sem);
-	map_addr = do_mmap(filep, ELF_PAGESTART(addr),
-			   eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
-			   eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+
+	/*
+	 * total_size is the size of the ELF (interpreter) image.
+	 * The _first_ mmap needs to know the full size, otherwise
+	 * randomization might put this image into an overlapping
+	 * position with the ELF binary image. (since size < total_size)
+	 * So we first map the 'big' image - and unmap the remainder at
+	 * the end. (which unmap is needed for ELF images with holes.)
+	 */
+	if (total_size) {
+		total_size = ELF_PAGEALIGN(total_size);
+		map_addr = do_mmap(filep, addr, total_size, prot, type, off);
+		if (!BAD_ADDR(map_addr))
+			do_munmap(current->mm, map_addr+size, total_size-size);
+	} else
+		map_addr = do_mmap(filep, addr, size, prot, type, off);
+		
 	up_write(&current->mm->mmap_sem);
-	return(map_addr);
+
+	return map_addr;
 }
 
 #endif /* !elf_map */
 
+static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
+{
+	int i, first_idx = -1, last_idx = -1;
+
+	for (i = 0; i < nr; i++)
+		if (cmds[i].p_type == PT_LOAD) {
+			last_idx = i;
+			if (first_idx == -1)
+				first_idx = i;
+		}
+
+	if (first_idx == -1)
+		return 0;
+
+	return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
+				ELF_PAGESTART(cmds[first_idx].p_vaddr);
+}
+
 /* This is much more generalized than the library routine read function,
    so we keep this separate.  Technically the library read function
    is only provided so that we can read a.out libraries that have
@@ -312,7 +339,8 @@ static unsigned long elf_map(struct file
 
 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
 				     struct file * interpreter,
-				     unsigned long *interp_load_addr)
+				     unsigned long *interp_load_addr,
+				     unsigned long no_base)
 {
 	struct elf_phdr *elf_phdata;
 	struct elf_phdr *eppnt;
@@ -320,6 +348,7 @@ static unsigned long load_elf_interp(str
 	int load_addr_set = 0;
 	unsigned long last_bss = 0, elf_bss = 0;
 	unsigned long error = ~0UL;
+	unsigned long total_size;
 	int retval, i, size;
 
 	/* First of all, some simple consistency checks */
@@ -358,6 +387,10 @@ static unsigned long load_elf_interp(str
 		goto out_close;
 	}
 
+	total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
+	if (!total_size)
+		goto out_close;
+
 	eppnt = elf_phdata;
 	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
 	  if (eppnt->p_type == PT_LOAD) {
@@ -372,8 +405,11 @@ static unsigned long load_elf_interp(str
 	    vaddr = eppnt->p_vaddr;
 	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
 	    	elf_type |= MAP_FIXED;
+	    else if (no_base && interp_elf_ex->e_type == ET_DYN)
+		load_addr = -vaddr;
 
-	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
+	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
+	    total_size = 0;
 	    error = map_addr;
 	    if (BAD_ADDR(map_addr))
 	    	goto out_close;
@@ -520,7 +556,7 @@ static int load_elf_binary(struct linux_
 	unsigned long reloc_func_desc = 0;
 	char passed_fileno[6];
 	struct files_struct *files;
-	int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
+	int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
 	unsigned long def_flags = 0;
 	struct {
 		struct elfhdr elf_ex;
@@ -676,6 +712,8 @@ static int load_elf_binary(struct linux_
 	}
 
 	elf_ppnt = elf_phdata;
+	executable_stack = EXSTACK_DEFAULT;
+
 	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
 		if (elf_ppnt->p_type == PT_GNU_STACK) {
 			if (elf_ppnt->p_flags & PF_X)
@@ -686,6 +724,24 @@ static int load_elf_binary(struct linux_
 		}
 	have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
 
+	relocexec = 0;
+
+	if (current->personality == PER_LINUX)
+	switch (exec_shield) {
+	case 1:
+		if (executable_stack == EXSTACK_DISABLE_X) {
+			current->flags |= PF_RELOCEXEC;
+			relocexec = PF_RELOCEXEC;
+		}
+		break;
+
+	case 2:
+		executable_stack = EXSTACK_DISABLE_X;
+		current->flags |= PF_RELOCEXEC;
+		relocexec = PF_RELOCEXEC;
+		break;
+	}
+
 	/* Some simple consistency checks for the interpreter */
 	if (elf_interpreter) {
 		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
@@ -738,6 +794,16 @@ static int load_elf_binary(struct linux_
 	retval = flush_old_exec(bprm);
 	if (retval)
 		goto out_free_dentry;
+	current->flags |= relocexec;
+
+#ifdef __i386__
+	/*
+	 * Turn off the CS limit completely if exec-shield disabled or
+	 * NX active:
+	 */
+	if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
+		arch_add_exec_range(current->mm, -1);
+#endif
 
 	/* Discard our unneeded old files struct */
 	if (files) {
@@ -757,7 +823,8 @@ static int load_elf_binary(struct linux_
 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
 	   may depend on the personality.  */
 	SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
-	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+	if (exec_shield != 2 &&
+			elf_read_implies_exec(loc->elf_ex, executable_stack))
 		current->personality |= READ_IMPLIES_EXEC;
 
 	arch_pick_mmap_layout(current->mm);
@@ -774,10 +841,10 @@ static int load_elf_binary(struct linux_
 	
 	current->mm->start_stack = bprm->p;
 
+
 	/* Now we do a little grungy work by mmaping the ELF image into
-	   the correct location in memory.  At this point, we assume that
-	   the image should be loaded at fixed address, not at a variable
-	   address. */
+	   the correct location in memory.
+	 */
 
 	for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
 		int elf_prot = 0, elf_flags;
@@ -821,16 +888,16 @@ static int load_elf_binary(struct linux_
 		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
 
 		vaddr = elf_ppnt->p_vaddr;
-		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
 			elf_flags |= MAP_FIXED;
-		} else if (loc->elf_ex.e_type == ET_DYN) {
-			/* Try and get dynamic programs out of the way of the default mmap
-			   base, as well as whatever program they might try to exec.  This
-			   is because the brk will follow the loader, and is not movable.  */
+		else if (loc->elf_ex.e_type == ET_DYN)
+#ifdef __i386__
+			load_bias = 0;
+#else
 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
-		}
+#endif
 
-		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
 		if (BAD_ADDR(error)) {
 			send_sig(SIGKILL, current, 0);
 			goto out_free_dentry;
@@ -907,7 +974,8 @@ static int load_elf_binary(struct linux_
 		else
 			elf_entry = load_elf_interp(&loc->interp_elf_ex,
 						    interpreter,
-						    &interp_load_addr);
+						    &interp_load_addr,
+						    load_bias);
 		if (BAD_ADDR(elf_entry)) {
 			printk(KERN_ERR "Unable to load interpreter %.128s\n",
 				elf_interpreter);
@@ -931,6 +999,14 @@ static int load_elf_binary(struct linux_
 
 	set_binfmt(&elf_format);
 
+	/*
+	 * Map the vsyscall trampoline. This address is then passed via
+	 * AT_SYSINFO.
+	 */
+#ifdef __HAVE_ARCH_VSYSCALL
+	map_vsyscall();
+#endif
+
 	compute_creds(bprm);
 	current->flags &= ~PF_FORKNOEXEC;
 	create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
@@ -944,6 +1020,10 @@ static int load_elf_binary(struct linux_
 	current->mm->end_data = end_data;
 	current->mm->start_stack = bprm->p;
 
+#ifdef __HAVE_ARCH_RANDOMIZE_BRK
+	if (current->flags & PF_RELOCEXEC)
+		randomize_brk(elf_brk);
+#endif
 	if (current->personality & MMAP_PAGE_ZERO) {
 		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
 		   and some applications "depend" upon this behavior.
@@ -999,6 +1079,8 @@ out_free_fh:
 	}
 out_free_ph:
 	kfree(elf_phdata);
+	current->flags &= ~PF_RELOCEXEC;
+	current->flags |= old_relocexec;
 	goto out;
 }
 
--- linux/fs/proc/base.c.orig
+++ linux/fs/proc/base.c
@@ -128,7 +128,7 @@ static struct pid_entry tgid_base_stuff[
 	E(PROC_TGID_CMDLINE,   "cmdline", S_IFREG|S_IRUGO),
 	E(PROC_TGID_STAT,      "stat",    S_IFREG|S_IRUGO),
 	E(PROC_TGID_STATM,     "statm",   S_IFREG|S_IRUGO),
-	E(PROC_TGID_MAPS,      "maps",    S_IFREG|S_IRUGO),
+	E(PROC_TGID_MAPS,      "maps",    S_IFREG|S_IRUSR),
 	E(PROC_TGID_MEM,       "mem",     S_IFREG|S_IRUSR|S_IWUSR),
 	E(PROC_TGID_CWD,       "cwd",     S_IFLNK|S_IRWXUGO),
 	E(PROC_TGID_ROOT,      "root",    S_IFLNK|S_IRWXUGO),
@@ -158,7 +158,7 @@ static struct pid_entry tid_base_stuff[]
 	E(PROC_TID_CMDLINE,    "cmdline", S_IFREG|S_IRUGO),
 	E(PROC_TID_STAT,       "stat",    S_IFREG|S_IRUGO),
 	E(PROC_TID_STATM,      "statm",   S_IFREG|S_IRUGO),
-	E(PROC_TID_MAPS,       "maps",    S_IFREG|S_IRUGO),
+	E(PROC_TID_MAPS,       "maps",    S_IFREG|S_IRUSR),
 	E(PROC_TID_MEM,        "mem",     S_IFREG|S_IRUSR|S_IWUSR),
 	E(PROC_TID_CWD,        "cwd",     S_IFLNK|S_IRWXUGO),
 	E(PROC_TID_ROOT,       "root",    S_IFLNK|S_IRWXUGO),
--- linux/fs/proc/array.c.orig
+++ linux/fs/proc/array.c
@@ -376,8 +376,12 @@ static int do_task_stat(struct task_stru
 	ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
 	read_unlock(&tasklist_lock);
 
-	if (!whole || num_threads<2)
-		wchan = get_wchan(task);
+	if (!whole || num_threads<2) {
+		wchan = 0;
+		if (current->uid == task->uid || current->euid == task->uid ||
+				capable(CAP_SYS_NICE))
+			wchan = get_wchan(task);
+	}
 	if (!whole) {
 		min_flt = task->min_flt;
 		maj_flt = task->maj_flt;
--- linux/fs/proc/task_mmu.c.orig
+++ linux/fs/proc/task_mmu.c
@@ -21,13 +21,23 @@ char *task_mem(struct mm_struct *mm, cha
 		"VmStk:\t%8lu kB\n"
 		"VmExe:\t%8lu kB\n"
 		"VmLib:\t%8lu kB\n"
-		"VmPTE:\t%8lu kB\n",
+		"VmPTE:\t%8lu kB\n"
+		"StaBrk:\t%08lx kB\n"
+		"Brk:\t%08lx kB\n"
+		"StaStk:\t%08lx kB\n"
+		,
 		(mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
 		mm->locked_vm << (PAGE_SHIFT-10),
 		mm->rss << (PAGE_SHIFT-10),
 		data << (PAGE_SHIFT-10),
 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
-		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
+		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+		mm->start_brk, mm->brk, mm->start_stack);
+#if __i386__
+	if (!nx_enabled)
+		buffer += sprintf(buffer,
+				"ExecLim:\t%08lx\n", mm->context.exec_limit);
+#endif
 	return buffer;
 }
 
@@ -79,6 +89,9 @@ out:
 
 static int show_map(struct seq_file *m, void *v)
 {
+#ifdef __i386__
+	struct task_struct *task = m->private;
+#endif
 	struct vm_area_struct *map = v;
 	struct file *file = map->vm_file;
 	int flags = map->vm_flags;
@@ -97,7 +110,13 @@ static int show_map(struct seq_file *m, 
 			map->vm_end,
 			flags & VM_READ ? 'r' : '-',
 			flags & VM_WRITE ? 'w' : '-',
-			flags & VM_EXEC ? 'x' : '-',
+			(flags & VM_EXEC
+#ifdef __i386__
+				|| (!nx_enabled &&
+				(map->vm_start < task->mm->context.exec_limit))
+#endif
+			)
+				? 'x' : '-',
 			flags & VM_MAYSHARE ? 's' : 'p',
 			map->vm_pgoff << PAGE_SHIFT,
 			MAJOR(dev), MINOR(dev), ino, &len);
--- linux/fs/exec.c.orig
+++ linux/fs/exec.c
@@ -400,7 +400,12 @@ int setup_arg_pages(struct linux_binprm 
 	while (i < MAX_ARG_PAGES)
 		bprm->page[i++] = NULL;
 #else
+#ifdef __HAVE_ARCH_ALIGN_STACK
+	stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
+	stack_base = PAGE_ALIGN(stack_base);
+#else
 	stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
+#endif
 	bprm->p += stack_base;
 	mm->arg_start = bprm->p;
 	arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
@@ -877,6 +882,7 @@ int flush_old_exec(struct linux_binprm *
 	tcomm[i] = '\0';
 	set_task_comm(current, tcomm);
 
+	current->flags &= ~PF_RELOCEXEC;
 	flush_thread();
 
 	if (bprm->e_uid != current->euid || bprm->e_gid != current->egid || 
--- linux/mm/mprotect.c.orig
+++ linux/mm/mprotect.c
@@ -22,6 +22,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
@@ -145,7 +146,7 @@ mprotect_fixup(struct vm_area_struct *vm
 	struct mm_struct * mm = vma->vm_mm;
 	unsigned long oldflags = vma->vm_flags;
 	long nrpages = (end - start) >> PAGE_SHIFT;
-	unsigned long charged = 0;
+	unsigned long charged = 0, old_end = vma->vm_end;
 	pgprot_t newprot;
 	pgoff_t pgoff;
 	int error;
@@ -209,6 +210,8 @@ success:
 	 */
 	vma->vm_flags = newflags;
 	vma->vm_page_prot = newprot;
+	if (oldflags & VM_EXEC)
+		arch_remove_exec_range(current->mm, old_end);
 	change_protection(vma, start, end, newprot);
 	__vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
 	__vm_stat_account(mm, newflags, vma->vm_file, nrpages);
--- linux/mm/mremap.c.orig
+++ linux/mm/mremap.c
@@ -412,8 +412,8 @@ unsigned long do_mremap(unsigned long ad
 			if (vma->vm_flags & VM_MAYSHARE)
 				map_flags |= MAP_SHARED;
 
-			new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
-						vma->vm_pgoff, map_flags);
+			new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len, 
+				vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
 			ret = new_addr;
 			if (new_addr & ~PAGE_MASK)
 				goto out;
--- linux/mm/mmap.c.orig
+++ linux/mm/mmap.c
@@ -25,6 +25,7 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -335,6 +336,8 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+	if (vma->vm_flags & VM_EXEC)
+		arch_add_exec_range(mm, vma->vm_end);
 	if (prev) {
 		vma->vm_next = prev->vm_next;
 		prev->vm_next = vma;
@@ -439,6 +442,8 @@ __vma_unlink(struct mm_struct *mm, struc
 	rb_erase(&vma->vm_rb, &mm->mm_rb);
 	if (mm->mmap_cache == vma)
 		mm->mmap_cache = prev;
+	if (vma->vm_flags & VM_EXEC)
+		arch_remove_exec_range(mm, vma->vm_end);
 }
 
 /*
@@ -744,6 +749,8 @@ struct vm_area_struct *vma_merge(struct 
 		} else					/* cases 2, 5, 7 */
 			vma_adjust(prev, prev->vm_start,
 				end, prev->vm_pgoff, NULL);
+		if (prev->vm_flags & VM_EXEC)
+			arch_add_exec_range(mm, prev->vm_end);
 		return prev;
 	}
 
@@ -915,7 +922,7 @@ unsigned long do_mmap_pgoff(struct file 
 	/* Obtain the address to map to. we verify (or select) it and ensure
 	 * that it represents a valid section of the address space.
 	 */
-	addr = get_unmapped_area(file, addr, len, pgoff, flags);
+	addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
 	if (addr & ~PAGE_MASK)
 		return addr;
 
@@ -1311,9 +1318,10 @@ void arch_unmap_area_topdown(struct vm_a
 		area->vm_mm->free_area_cache = area->vm_end;
 }
 
+
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-		unsigned long pgoff, unsigned long flags)
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+		unsigned long pgoff, unsigned long flags, int exec)
 {
 	if (flags & MAP_FIXED) {
 		unsigned long ret;
@@ -1328,7 +1327,10 @@ get_unmapped_area_prot(struct file *file
 	if (!(flags & MAP_FIXED)) {
 		unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-		get_area = current->mm->get_unmapped_area;
+		if (exec && current->mm->get_unmapped_exec_area)
+			get_area = current->mm->get_unmapped_exec_area;
+		else
+			get_area = current->mm->get_unmapped_area;
 		if (file && file->f_op && file->f_op->get_unmapped_area)
 			get_area = file->f_op->get_unmapped_area;
 		addr = get_area(file, addr, len, pgoff, flags);
@@ -1359,7 +1361,71 @@ get_unmapped_area_prot(struct file *file
 	return addr;
 }
 
-EXPORT_SYMBOL(get_unmapped_area);
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+#define SHLIB_BASE             0x00111000
+
+unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+		unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+	unsigned long addr = addr0, len = len0;
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long tmp;
+
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+		
+	if (!addr && !(flags & MAP_FIXED))
+		addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start)) {
+			return addr;
+		}
+	}
+
+	addr = SHLIB_BASE;
+
+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+		/* At this point:  (!vma || addr < vma->vm_end). */
+		if (TASK_SIZE - len < addr) {
+			return -ENOMEM;
+		}
+		if (!vma || addr + len <= vma->vm_start) {
+			/*
+			 * Must not let a PROT_EXEC mapping get into the
+			 * brk area:
+			 */
+			if (addr + len > mm->brk)
+				goto failed;
+			
+			/*
+			 * Up until the brk area we randomize addresses
+			 * as much as possible:
+			 */
+			if (addr >= 0x01000000) {
+				tmp = randomize_range(0x01000000, mm->brk, len);
+				vma = find_vma(mm, tmp);
+				if (TASK_SIZE - len >= tmp &&
+				    (!vma || tmp + len <= vma->vm_start))
+					return tmp;
+			}
+			/*
+			 * Ok, randomization didnt work out - return
+			 * the result of the linear search:
+			 */
+			return addr;
+		}
+		addr = vma->vm_end;
+	}
+	
+failed:
+	return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
@@ -1423,6 +1501,14 @@ out:
 	return prev ? prev->vm_next : vma;
 }
 
+static int over_stack_limit(unsigned long sz)
+{
+	if (sz < EXEC_STACK_BIAS)
+		return 0;
+	return (sz - EXEC_STACK_BIAS) >
+			current->signal->rlim[RLIMIT_STACK].rlim_cur;
+}
+
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -1438,7 +1524,7 @@ static int acct_stack_growth(struct vm_a
 		return -ENOMEM;
 
 	/* Stack limit test */
-	if (size > rlim[RLIMIT_STACK].rlim_cur)
+	if (over_stack_limit(size))
 		return -ENOMEM;
 
 	/* mlock limit tests */
@@ -1780,10 +1866,14 @@ int split_vma(struct mm_struct * mm, str
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
 
-	if (new_below)
+	if (new_below) {
+		unsigned long old_end = vma->vm_end;
+
 		vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
 			((addr - new->vm_start) >> PAGE_SHIFT), new);
-	else
+		if (vma->vm_flags & VM_EXEC)
+			arch_remove_exec_range(mm, old_end);
+	} else
 		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
 	return 0;
@@ -2003,6 +2093,7 @@ void exit_mmap(struct mm_struct *mm)
 	mm->rss = 0;
 	mm->total_vm = 0;
 	mm->locked_vm = 0;
+	arch_flush_exec_range(mm);
 
 	spin_unlock(&mm->page_table_lock);
 
--- linux/kernel/signal.c.orig
+++ linux/kernel/signal.c
@@ -1176,6 +1176,37 @@ kill_proc_info(int sig, struct siginfo *
 	return error;
 }
 
+int print_fatal_signals = 0;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+	printk("%s/%d: potentially unexpected fatal signal %d.\n",
+		current->comm, current->pid, signr);
+		
+#ifdef __i386__
+	printk("code at %08lx: ", regs->eip);
+	{
+		int i;
+		for (i = 0; i < 16; i++) {
+			unsigned char insn;
+
+			__get_user(insn, (unsigned char *)(regs->eip + i));
+			printk("%02x ", insn);
+		}
+	}
+#endif	
+	printk("\n");
+	show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+	get_option (&str, &print_fatal_signals);
+
+	return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1827,6 +1858,11 @@ relock:
 		if (!signr)
 			break; /* will return 0 */
 
+		if ((signr == SIGSEGV) && print_fatal_signals) {
+			spin_unlock_irq(&current->sighand->siglock);
+			print_fatal_signal(regs, signr);
+			spin_lock_irq(&current->sighand->siglock);
+		}
 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
 			ptrace_signal_deliver(regs, cookie);
 
@@ -1922,6 +1958,8 @@ relock:
 		 * Anything else is fatal, maybe with a core dump.
 		 */
 		current->flags |= PF_SIGNALED;
+		if (print_fatal_signals)
+			print_fatal_signal(regs, signr);
 		if (sig_kernel_coredump(signr)) {
 			/*
 			 * If it was able to dump core, this kills all
--- linux/kernel/sysctl.c.orig
+++ linux/kernel/sysctl.c
@@ -72,6 +72,29 @@ extern int proc_unknown_nmi_panic(ctl_ta
 				  void __user *, size_t *, loff_t *);
 #endif
 
+extern unsigned int vdso_enabled;
+
+int exec_shield = 2;
+int exec_shield_randomize = 1;
+
+static int __init setup_exec_shield(char *str)
+{
+        get_option (&str, &exec_shield);
+
+        return 1;
+}
+
+__setup("exec-shield=", setup_exec_shield);
+
+static int __init setup_exec_shield_randomize(char *str)
+{
+        get_option (&str, &exec_shield_randomize);
+
+        return 1;
+}
+
+__setup("exec-shield-randomize=", setup_exec_shield_randomize);
+
 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
 static int maxolduid = 65535;
 static int minolduid;
@@ -273,6 +296,40 @@ static ctl_table kern_table[] = {
 		.proc_handler	= &proc_dointvec,
 	},
 	{
+		.ctl_name	= KERN_EXEC_SHIELD,
+		.procname	= "exec-shield",
+		.data		= &exec_shield,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= KERN_EXEC_SHIELD_RAND,
+		.procname	= "exec-shield-randomize",
+		.data		= &exec_shield_randomize,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= KERN_PRINT_FATAL,
+		.procname	= "print-fatal-signals",
+		.data		= &print_fatal_signals,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#if __i386__
+	{
+		.ctl_name	= KERN_VDSO,
+		.procname	= "vdso",
+		.data		= &vdso_enabled,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+	{
 		.ctl_name	= KERN_CORE_USES_PID,
 		.procname	= "core_uses_pid",
 		.data		= &core_uses_pid,
--- linux/arch/x86_64/mm/mmap.c.orig
+++ linux/arch/x86_64/mm/mmap.c
@@ -0,0 +1,95 @@
+/*
+ *  linux/arch/x86-64/mm/mmap.c
+ *
+ *  flexible mmap layout support
+ *
+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ * Started by Ingo Molnar <mingo elte hu>
+ */
+
+#include <linux/personality.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+
+/*
+ * Top of mmap area (just below the process stack).
+ *
+ * Leave an at least ~128 MB hole.
+ */
+#define MIN_GAP (128*1024*1024)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+static inline unsigned long mmap_base(void)
+{
+	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+
+	if (gap < MIN_GAP)
+		gap = MIN_GAP;
+	else if (gap > MAX_GAP)
+		gap = MAX_GAP;
+
+	return TASK_SIZE - (gap & PAGE_MASK);
+}
+
+static inline int mmap_is_legacy(void)
+{
+	/*
+	 * Force standard allocation for 64 bit programs.
+	 */
+	if (!test_thread_flag(TIF_IA32))
+		return 1;
+		
+	if (current->personality & ADDR_COMPAT_LAYOUT) 
+		return 1;
+	
+	if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
+		return 1;
+		
+	return sysctl_legacy_va_layout;
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	/*
+	 * Fall back to the standard layout if the personality
+	 * bit is set, or if the expected stack growth is unlimited:
+	 */
+	if (mmap_is_legacy()) {
+		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+		mm->unmap_area = arch_unmap_area;
+	} else {
+		mm->mmap_base = mmap_base();
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		mm->unmap_area = arch_unmap_area_topdown;
+	}
+}
+
+unsigned long arch_align_stack(unsigned long sp)
+{
+	if (current->flags & PF_RELOCEXEC)
+		sp -= ((get_random_int() % 65536) << 4);
+	return sp & ~0xf;
+}
+
--- linux/arch/x86_64/mm/Makefile.orig
+++ linux/arch/x86_64/mm/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the linux x86_64-specific parts of the memory manager.
 #
 
-obj-y	 := init.o fault.o ioremap.o extable.o pageattr.o
+obj-y	 := init.o fault.o ioremap.o extable.o pageattr.o mmap.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_DISCONTIGMEM) += numa.o
 obj-$(CONFIG_K8_NUMA) += k8topology.o
--- linux/arch/x86_64/ia32/ia32_binfmt.c.orig
+++ linux/arch/x86_64/ia32/ia32_binfmt.c
@@ -395,7 +395,7 @@ int setup_arg_pages(struct linux_binprm 
 }
 
 static unsigned long
-elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
+elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused)
 {
 	unsigned long map_addr;
 	struct task_struct *me = current; 
--- linux/arch/ia64/ia32/binfmt_elf32.c.orig
+++ linux/arch/ia64/ia32/binfmt_elf32.c
@@ -272,7 +272,7 @@ elf32_set_personality (void)
 }
 
 static unsigned long
-elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
+elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused)
 {
 	unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
 
--- linux/arch/i386/mm/mmap.c.orig
+++ linux/arch/i386/mm/mmap.c
@@ -26,6 +26,7 @@
 
 #include <linux/personality.h>
 #include <linux/mm.h>
+#include <linux/random.h>
 
 /*
  * Top of mmap area (just below the process stack).
@@ -38,13 +39,17 @@
 static inline unsigned long mmap_base(struct mm_struct *mm)
 {
 	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+	unsigned long random_factor = 0;
+
+	if (current->flags & PF_RELOCEXEC)
+		random_factor = get_random_int() % (1024*1024);
 
 	if (gap < MIN_GAP)
 		gap = MIN_GAP;
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
 
-	return TASK_SIZE - (gap & PAGE_MASK);
+	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
 }
 
 /*
@@ -57,15 +62,17 @@ void arch_pick_mmap_layout(struct mm_str
 	 * Fall back to the standard layout if the personality
 	 * bit is set, or if the expected stack growth is unlimited:
 	 */
-	if (sysctl_legacy_va_layout ||
+	if ((exec_shield != 2) && (sysctl_legacy_va_layout ||
 			(current->personality & ADDR_COMPAT_LAYOUT) ||
-			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)) {
 		mm->mmap_base = TASK_UNMAPPED_BASE;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 		mm->unmap_area = arch_unmap_area;
 	} else {
 		mm->mmap_base = mmap_base(mm);
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		if (current->flags & PF_RELOCEXEC)
+			mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
 		mm->unmap_area = arch_unmap_area_topdown;
 	}
 }
--- linux/arch/i386/mm/init.c.orig
+++ linux/arch/i386/mm/init.c
@@ -428,7 +428,7 @@ u64 __supported_pte_mask = ~_PAGE_NX;
  * Control non executable mappings.
  *
  * on      Enable
- * off     Disable
+ * off     Disable (disables exec-shield too)
  */
 void __init noexec_setup(const char *str)
 {
@@ -438,6 +438,7 @@ void __init noexec_setup(const char *str
 	} else if (!strncmp(str,"off",3)) {
 		disable_nx = 1;
 		__supported_pte_mask &= ~_PAGE_NX;
+		exec_shield = 0;
 	}
 }
 
@@ -502,7 +503,10 @@ void __init paging_init(void)
 	set_nx();
 	if (nx_enabled)
 		printk("NX (Execute Disable) protection: active\n");
+	else
 #endif
+	if (exec_shield)
+		printk("Using x86 segment limits to approximate NX protection\n");
 
 	pagetable_init();
 
--- linux/arch/i386/kernel/vsyscall.lds.S.orig
+++ linux/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
 
 SECTIONS
 {
-  . = VSYSCALL_BASE + SIZEOF_HEADERS;
+  . = SIZEOF_HEADERS;
 
   .hash           : { *(.hash) }		:text
   .dynsym         : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
      For the layouts to match, we need to skip more than enough
      space for the dynamic symbol table et al.  If this amount
      is insufficient, ld -shared will barf.  Just increase it here.  */
-  . = VSYSCALL_BASE + 0x400;
+  . = 0x400;
 
   .text           : { *(.text) }		:text =0x90909090
 
--- linux/arch/i386/kernel/asm-offsets.c.orig
+++ linux/arch/i386/kernel/asm-offsets.c
@@ -52,6 +52,7 @@ void foo(void)
 	OFFSET(TI_preempt_count, thread_info, preempt_count);
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
 	OFFSET(TI_restart_block, thread_info, restart_block);
+	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
 	BLANK();
 
 	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
--- linux/arch/i386/kernel/cpu/common.c.orig
+++ linux/arch/i386/kernel/cpu/common.c
@@ -390,6 +390,12 @@ void __init identify_cpu(struct cpuinfo_
 	if (disable_pse)
 		clear_bit(X86_FEATURE_PSE, c->x86_capability);
 
+	/* hack: disable SEP for non-NX cpus; SEP breaks Execshield. */
+	#ifdef CONFIG_HIGHMEM64G
+	if (!test_bit(X86_FEATURE_NX, c->x86_capability)) 
+	#endif
+		clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
 	/* If the model name is still unset, do table lookup. */
 	if ( !c->x86_model_id[0] ) {
 		char *p;
--- linux/arch/i386/kernel/signal.c.orig
+++ linux/arch/i386/kernel/signal.c
@@ -380,7 +380,7 @@ static void setup_frame(int sig, struct 
 			goto give_sigsegv;
 	}
 
-	restorer = &__kernel_sigreturn;
+	restorer = current->mm->context.vdso + (long)&__kernel_sigreturn;
 	if (ka->sa.sa_flags & SA_RESTORER)
 		restorer = ka->sa.sa_restorer;
 
@@ -475,9 +475,10 @@ static void setup_rt_frame(int sig, stru
 		goto give_sigsegv;
 
 	/* Set up to return from userspace.  */
-	restorer = &__kernel_rt_sigreturn;
+	restorer = current->mm->context.vdso + (long)&__kernel_rt_sigreturn;
 	if (ka->sa.sa_flags & SA_RESTORER)
 		restorer = ka->sa.sa_restorer;
+
 	err |= __put_user(restorer, &frame->pretcode);
 	 
 	/*
--- linux/arch/i386/kernel/entry.S.orig
+++ linux/arch/i386/kernel/entry.S
@@ -201,8 +201,12 @@ sysenter_past_esp:
 	pushl %ebp
 	pushfl
 	pushl $(__USER_CS)
-	pushl $SYSENTER_RETURN
-
+	/*
+	 * Push current_thread_info()->sysenter_return to the stack.
+	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+	 * pushed above, and the word being pushed now:
+	 */
+	pushl (TI_sysenter_return-THREAD_SIZE+4*4)(%esp)
 /*
  * Load the potential sixth argument from user stack.
  * Careful about security.
--- linux/arch/i386/kernel/process.c.orig
+++ linux/arch/i386/kernel/process.c
@@ -36,6 +36,8 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -581,6 +583,8 @@ struct task_struct fastcall * __switch_t
 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
 	__unlazy_fpu(prev_p);
+	if (next_p->mm)
+		load_user_cs_desc(cpu, next_p->mm);
 
 	/*
 	 * Reload esp0, LDT and the page table pointer:
@@ -828,3 +832,68 @@ asmlinkage int sys_get_thread_area(struc
 	return 0;
 }
 
+
+unsigned long arch_align_stack(unsigned long sp)
+{
+	if (current->flags & PF_RELOCEXEC)
+		sp -= ((get_random_int() % 65536) << 4);
+	return sp & ~0xf;
+}
+
+
+void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
+{
+	if (limit > mm->context.exec_limit) {
+		mm->context.exec_limit = limit;
+		set_user_cs(&mm->context.user_cs, limit);
+		if (mm == current->mm) {
+			preempt_disable();
+			load_user_cs_desc(smp_processor_id(), mm);
+			preempt_enable();
+		}
+	}
+}
+
+void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
+{
+	struct vm_area_struct *vma;
+	unsigned long limit = PAGE_SIZE;
+
+	if (old_end == mm->context.exec_limit) {
+		for (vma = mm->mmap; vma; vma = vma->vm_next)
+			if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
+				limit = vma->vm_end;
+
+		mm->context.exec_limit = limit;
+		set_user_cs(&mm->context.user_cs, limit);
+		if (mm == current->mm) {
+			preempt_disable();
+			load_user_cs_desc(smp_processor_id(), mm);
+			preempt_enable();
+		}
+	}
+}
+
+void arch_flush_exec_range(struct mm_struct *mm)
+{
+	mm->context.exec_limit = 0;
+	set_user_cs(&mm->context.user_cs, 0);
+}
+
+/*
+ * Generate random brk address between 128MB and 196MB. (if the layout
+ * allows it.)
+ */
+void randomize_brk(unsigned long old_brk)
+{
+	unsigned long new_brk, range_start, range_end;
+
+	range_start = 0x08000000;
+	if (current->mm->brk >= range_start)
+		range_start = current->mm->brk;
+	range_end = range_start + 0x02000000;
+	new_brk = randomize_range(range_start, range_end, 0);
+	if (new_brk)
+		current->mm->brk = new_brk;
+}
+
--- linux/arch/i386/kernel/traps.c.orig
+++ linux/arch/i386/kernel/traps.c
@@ -453,6 +453,10 @@ DO_ERROR(11, SIGBUS,  "segment not prese
 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
 
+/*
+ * the original non-exec stack patch was written by
+ * Solar Designer <solar at openwall.com>. Thanks!
+ */
 fastcall void do_general_protection(struct pt_regs * regs, long error_code)
 {
 	int cpu = get_cpu();
@@ -491,6 +495,46 @@ fastcall void do_general_protection(stru
 	if (!(regs->xcs & 3))
 		goto gp_in_kernel;
 
+	/*
+	 * lazy-check for CS validity on exec-shield binaries:
+	 */
+	if (current->mm) {
+		int cpu = smp_processor_id();
+		struct desc_struct *desc1, *desc2;
+		struct vm_area_struct *vma;
+		unsigned long limit = PAGE_SIZE;
+		
+		spin_lock(&current->mm->page_table_lock);
+		for (vma = current->mm->mmap; vma; vma = vma->vm_next)
+			if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
+				limit = vma->vm_end;
+		spin_unlock(&current->mm->page_table_lock);
+
+		current->mm->context.exec_limit = limit;
+		set_user_cs(&current->mm->context.user_cs, limit);
+
+		desc1 = &current->mm->context.user_cs;
+		desc2 = per_cpu(cpu_gdt_table, cpu) + GDT_ENTRY_DEFAULT_USER_CS;
+
+		/*
+		 * The CS was not in sync - reload it and retry the
+		 * instruction. If the instruction still faults then
+		 * we wont hit this branch next time around.
+		 */
+		if (desc1->a != desc2->a || desc1->b != desc2->b) {
+			if (print_fatal_signals >= 2) {
+				printk("#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
+				printk(" exec_limit: %08lx, user_cs: %08lx/%08lx, CPU_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, desc1->a, desc1->b, desc2->a, desc2->b);
+			}
+			load_user_cs_desc(cpu, current->mm);
+			return;
+		}
+	}
+	if (print_fatal_signals) {
+		printk("#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", error_code, error_code/8, regs->eip, smp_processor_id());
+		printk(" exec_limit: %08lx, user_cs: %08lx/%08lx.\n", current->mm->context.exec_limit, current->mm->context.user_cs.a, current->mm->context.user_cs.b);
+	}
+
 	current->thread.error_code = error_code;
 	current->thread.trap_no = 13;
 	force_sig(SIGSEGV, current);
--- linux/arch/i386/kernel/smp.c.orig
+++ linux/arch/i386/kernel/smp.c
@@ -22,6 +22,7 @@
 
 #include <asm/mtrr.h>
 #include <asm/tlbflush.h>
+#include <asm/desc.h>
 #include <mach_apic.h>
 
 /*
@@ -313,6 +314,8 @@ fastcall void smp_invalidate_interrupt(s
 	unsigned long cpu;
 
 	cpu = get_cpu();
+	if (current->active_mm)
+		load_user_cs_desc(cpu, current->active_mm);
 
 	if (!cpu_isset(cpu, flush_cpumask))
 		goto out;
--- linux/arch/i386/kernel/vsyscall-sysenter.S.orig
+++ linux/arch/i386/kernel/vsyscall-sysenter.S
@@ -24,11 +24,11 @@ __kernel_vsyscall:
 	/* 7: align return point with nop's to make disassembly easier */
 	.space 7,0x90
 
-	/* 14: System call restart point is here! (SYSENTER_RETURN - 2) */
+	/* 14: System call restart point is here! (SYSENTER_RETURN_OFFSET-2) */
 	jmp .Lenter_kernel
 	/* 16: System call normal return point is here! */
-	.globl SYSENTER_RETURN	/* Symbol used by entry.S.  */
-SYSENTER_RETURN:
+	.globl SYSENTER_RETURN_OFFSET	/* Symbol used by sysenter.c  */
+SYSENTER_RETURN_OFFSET:
 	pop %ebp
 .Lpop_ebp:
 	pop %edx
--- linux/arch/i386/kernel/sysenter.c.orig
+++ linux/arch/i386/kernel/sysenter.c
@@ -13,6 +13,7 @@
 #include <linux/gfp.h>
 #include <linux/string.h>
 #include <linux/elf.h>
+#include <linux/mman.h>
 
 #include <asm/cpufeature.h>
 #include <asm/msr.h>
@@ -41,11 +42,14 @@ void enable_sep_cpu(void *info)
 extern const char vsyscall_int80_start, vsyscall_int80_end;
 extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
 
+struct page *sysenter_page;
+
 static int __init sysenter_setup(void)
 {
 	void *page = (void *)get_zeroed_page(GFP_ATOMIC);
 
-	__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+	__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_KERNEL_RO);
+	sysenter_page = virt_to_page(page);
 
 	if (!boot_cpu_has(X86_FEATURE_SEP)) {
 		memcpy(page,
@@ -59,7 +63,51 @@ static int __init sysenter_setup(void)
 	       &vsyscall_sysenter_end - &vsyscall_sysenter_start);
 
 	on_each_cpu(enable_sep_cpu, NULL, 1, 1);
+
 	return 0;
 }
 
 __initcall(sysenter_setup);
+
+extern void SYSENTER_RETURN_OFFSET;
+
+unsigned int vdso_enabled = 0;
+
+void map_vsyscall(void)
+{
+	struct thread_info *ti = current_thread_info();
+	struct vm_area_struct *vma;
+	unsigned long addr;
+
+	if (1 || unlikely(!vdso_enabled)) {
+		current->mm->context.vdso = NULL;
+		return;
+	}
+
+	/*
+	 * Map the vDSO (it will be randomized):
+	 */
+	down_write(&current->mm->mmap_sem);
+	addr = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, 0);
+	current->mm->context.vdso = (void *)addr;
+	ti->sysenter_return = (void *)addr + (long)&SYSENTER_RETURN_OFFSET;
+	if (addr != -1) {
+		vma = find_vma(current->mm, addr);
+		if (vma) {
+			pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+			get_page(sysenter_page);
+			install_page(current->mm, vma, addr,
+					sysenter_page, vma->vm_page_prot);
+			
+		}
+	}
+	up_write(&current->mm->mmap_sem);
+}
+
+static int __init vdso_setup(char *str)
+{
+        vdso_enabled = simple_strtoul(str, NULL, 0);
+        return 1;
+}
+__setup("vdso=", vdso_setup);
+
--- linux/drivers/char/random.c.orig
+++ linux/drivers/char/random.c
@@ -2383,3 +2383,36 @@ __u32 check_tcp_syn_cookie(__u32 cookie,
 }
 #endif
 #endif /* CONFIG_INET */
+
+/*
+ * Get a random word:
+ */
+unsigned int get_random_int(void)
+{
+	unsigned int val = 0;
+
+	if (!exec_shield_randomize)
+		return 0;
+
+#ifdef CONFIG_X86_HAS_TSC
+	rdtscl(val);
+#endif
+	val += current->pid + jiffies + (int)val;
+
+	/*
+	 * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
+	 * every second, from the entropy pool (and thus creates a limited
+	 * drain on it), and uses halfMD4Transform within the second. We
+	 * also spice it with the TSC (if available), jiffies, PID and the
+	 * stack address:
+	 */
+	return secure_ip_id(val);
+}
+
+unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+	unsigned long range = end - len - start;
+	if (end <= start + len)
+		return 0;
+	return PAGE_ALIGN(get_random_int() % range + start);
+}
--- linux/include/asm-ia64/pgalloc.h.orig
+++ linux/include/asm-ia64/pgalloc.h
@@ -23,6 +23,10 @@
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
 
+#define arch_add_exec_range(mm, limit)		do { ; } while (0)
+#define arch_flush_exec_range(mm)		do { ; } while (0)
+#define arch_remove_exec_range(mm, limit)	do { ; } while (0)
+
 /*
  * Very stupidly, we used to get new pgd's and pmd's, init their contents
  * to point to the NULL versions of the next level page table, later on
--- linux/include/asm-sparc/pgalloc.h.orig
+++ linux/include/asm-sparc/pgalloc.h
@@ -66,4 +66,8 @@ BTFIXUPDEF_CALL(void, pte_free, struct p
 #define pte_free(pte)		BTFIXUP_CALL(pte_free)(pte)
 #define __pte_free_tlb(tlb, pte)	pte_free(pte)
 
+#define arch_add_exec_range(mm, limit)		do { ; } while (0)
+#define arch_flush_exec_range(mm)		do { ; } while (0)
+#define arch_remove_exec_range(mm, limit)	do { ; } while (0)
+
 #endif /* _SPARC_PGALLOC_H */
--- linux/include/asm-s390/pgalloc.h.orig
+++ linux/include/asm-s390/pgalloc.h
@@ -19,6 +19,10 @@
 #include <linux/gfp.h>
 #include <linux/mm.h>
 
+#define arch_add_exec_range(mm, limit) do { ; } while (0)
+#define arch_flush_exec_range(mm)      do { ; } while (0)
+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
+
 #define check_pgt_cache()	do {} while (0)
 
 extern void diag10(unsigned long addr);
--- linux/include/asm-ppc/pgalloc.h.orig
+++ linux/include/asm-ppc/pgalloc.h
@@ -40,5 +40,10 @@ extern void pte_free(struct page *pte);
 
 #define check_pgt_cache()	do { } while (0)
 
+#define arch_add_exec_range(mm, limit)         do { ; } while (0)
+#define arch_flush_exec_range(mm)              do { ; } while (0)
+#define arch_remove_exec_range(mm, limit)      do { ; } while (0)
+
+
 #endif /* _PPC_PGALLOC_H */
 #endif /* __KERNEL__ */
--- linux/include/linux/random.h.orig
+++ linux/include/linux/random.h
@@ -70,6 +70,9 @@ extern __u32 secure_tcpv6_sequence_numbe
 extern struct file_operations random_fops, urandom_fops;
 #endif
 
+unsigned int get_random_int(void);
+unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+
 #endif /* __KERNEL___ */
 
 #endif /* _LINUX_RANDOM_H */
--- linux/include/linux/resource.h.orig
+++ linux/include/linux/resource.h
@@ -52,8 +52,11 @@ struct rlimit {
 /*
  * Limit the stack by to some sane default: root can always
  * increase this limit if needed..  8MB seems reasonable.
+ *
+ * (2MB more to cover randomization effects.)
  */
-#define _STK_LIM	(8*1024*1024)
+#define _STK_LIM	(10*1024*1024)
+#define EXEC_STACK_BIAS	(2*1024*1024)
 
 /*
  * GPG wants 32kB of mlocked memory, to make sure pass phrases
--- linux/include/linux/sysctl.h.orig
+++ linux/include/linux/sysctl.h
@@ -84,6 +84,10 @@ enum
 
 	KERN_CAP_BSET=14,	/* int: capability bounding set */
 	KERN_PANIC=15,		/* int: panic timeout */
+	KERN_EXEC_SHIELD=1000,	/* int: exec-shield enabled (0/1/2) */
+	KERN_EXEC_SHIELD_RAND=1001, /* int: exec-shield randomize (0/1) */
+	KERN_PRINT_FATAL=1002,	/* int: print fatal signals (0/1/2) */
+	KERN_VDSO=1003,		/* int: VDSO enabled (0/1) */
 	KERN_REALROOTDEV=16,	/* real root device to mount after initrd */
 
 	KERN_SPARC_REBOOT=21,	/* reboot command on Sparc */
--- linux/include/linux/sched.h.orig
+++ linux/include/linux/sched.h
@@ -34,6 +34,9 @@
 #include <linux/topology.h>
 
 struct exec_domain;
+extern int exec_shield;
+extern int exec_shield_randomize;
+extern int print_fatal_signals;
 
 /*
  * cloning flags:
@@ -196,6 +199,10 @@ extern int sysctl_max_map_count;
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 		       unsigned long, unsigned long);
+
+extern unsigned long
+arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
+		       unsigned long, unsigned long);
 extern unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 			  unsigned long len, unsigned long pgoff,
@@ -211,6 +218,9 @@ struct mm_struct {
 	unsigned long (*get_unmapped_area) (struct file *filp,
 				unsigned long addr, unsigned long len,
 				unsigned long pgoff, unsigned long flags);
+	unsigned long (*get_unmapped_exec_area) (struct file *filp,
+				unsigned long addr, unsigned long len,
+				unsigned long pgoff, unsigned long flags);
 	void (*unmap_area) (struct vm_area_struct *area);
 	unsigned long mmap_base;		/* base of mmap area */
 	unsigned long free_area_cache;		/* first hole */
@@ -735,6 +745,7 @@ do { if (atomic_dec_and_test(&(tsk)->usa
 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
 #define PF_SYNCWRITE	0x00200000	/* I am doing a sync write */
 #define PF_BORROWED_MM	0x00400000	/* I am a kthread doing use_mm */
+#define PF_RELOCEXEC	0x00800000	/* relocate shared libraries */
 
 /*
  * Only the _current_ task can read/write to tsk->flags, but other
--- linux/include/linux/mm.h.orig
+++ linux/include/linux/mm.h
@@ -728,7 +728,14 @@ extern struct vm_area_struct *copy_vma(s
 	unsigned long addr, unsigned long len, pgoff_t pgoff);
 extern void exit_mmap(struct mm_struct *);
 
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
+
+
+static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr, 
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);	
+}
 
 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long prot,
--- linux/include/asm-i386/mmu.h.orig
+++ linux/include/asm-i386/mmu.h
@@ -7,11 +7,17 @@
  * we put the segment information here.
  *
  * cpu_vm_mask is used to optimize ldt flushing.
+ *
+ * exec_limit is used to track the range PROT_EXEC
+ * mappings span.
  */
 typedef struct { 
 	int size;
 	struct semaphore sem;
 	void *ldt;
+	struct desc_struct user_cs;
+	unsigned long exec_limit;
+	void *vdso;
 } mm_context_t;
 
 #endif
--- linux/include/asm-i386/processor.h.orig
+++ linux/include/asm-i386/processor.h
@@ -304,7 +304,10 @@ extern int bootloader_type;
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE/3)
+
+#define __HAVE_ARCH_ALIGN_STACK
+extern unsigned long arch_align_stack(unsigned long sp);
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
@@ -486,6 +489,9 @@ static inline void load_esp0(struct tss_
 	regs->xcs = __USER_CS;					\
 	regs->eip = new_eip;					\
 	regs->esp = new_esp;					\
+	preempt_disable();					\
+	load_user_cs_desc(smp_processor_id(), current->mm);	\
+	preempt_enable();					\
 } while (0)
 
 /* Forward declaration, a strange C thing */
--- linux/include/asm-i386/desc.h.orig
+++ linux/include/asm-i386/desc.h
@@ -135,6 +135,20 @@ static inline unsigned long get_desc_bas
 	return base;
 }
 
+static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
+{
+	limit = (limit - 1) / PAGE_SIZE;
+	desc->a = limit & 0xffff;
+	desc->b = (limit & 0xf0000) | 0x00c0fb00;
+}
+
+#define load_user_cs_desc(cpu, mm) \
+    	per_cpu(cpu_gdt_table, (cpu))[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs
+
+extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
+extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
+extern void arch_flush_exec_range(struct mm_struct *mm);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
--- linux/include/asm-i386/thread_info.h.orig
+++ linux/include/asm-i386/thread_info.h
@@ -38,6 +38,7 @@ struct thread_info {
 					 	   0-0xBFFFFFFF for user-thead
 						   0-0xFFFFFFFF for kernel-thread
 						*/
+	void			*sysenter_return;
 	struct restart_block    restart_block;
 
 	unsigned long           previous_esp;   /* ESP of the previous stack in case
--- linux/include/asm-i386/pgalloc.h.orig
+++ linux/include/asm-i386/pgalloc.h
@@ -4,6 +4,7 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/fixmap.h>
+#include <asm/desc.h>
 #include <linux/threads.h>
 #include <linux/mm.h>		/* for struct page */
 
--- linux/include/asm-i386/elf.h.orig
+++ linux/include/asm-i386/elf.h
@@ -9,6 +9,7 @@
 #include <asm/user.h>
 #include <asm/processor.h>
 #include <asm/system.h>		/* for savesegment */
+#include <asm/desc.h>
 
 #include <linux/utsname.h>
 
@@ -133,15 +134,22 @@ extern int dump_task_extended_fpu (struc
 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
 #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
 
-#define VSYSCALL_BASE	(__fix_to_virt(FIX_VSYSCALL))
-#define VSYSCALL_EHDR	((const struct elfhdr *) VSYSCALL_BASE)
-#define VSYSCALL_ENTRY	((unsigned long) &__kernel_vsyscall)
 extern void __kernel_vsyscall;
+#define VSYSCALL_BASE	((unsigned long)current->mm->context.vdso)
+#define VSYSCALL_EHDR	((const struct elfhdr *) VSYSCALL_BASE)
+#define VSYSCALL_OFFSET	((unsigned long) &__kernel_vsyscall)
+#define VSYSCALL_ENTRY	(VSYSCALL_BASE + VSYSCALL_OFFSET)
 
-#define ARCH_DLINFO						\
-do {								\
-		NEW_AUX_ENT(AT_SYSINFO,	VSYSCALL_ENTRY);	\
-		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE);	\
+/* kernel-internal fixmap address: */
+#define __VSYSCALL_BASE	(__fix_to_virt(FIX_VSYSCALL))
+#define __VSYSCALL_EHDR	((const struct elfhdr *) __VSYSCALL_BASE)
+
+#define ARCH_DLINFO							\
+do {									\
+	if (VSYSCALL_BASE) {						\
+		NEW_AUX_ENT(AT_SYSINFO,	VSYSCALL_ENTRY);		\
+		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE);		\
+	}								\
 } while (0)
 
 /*
@@ -152,15 +160,15 @@ do {								\
  * Dumping its extra ELF program headers includes all the other information
  * a debugger needs to easily find how the vsyscall DSO was being used.
  */
-#define ELF_CORE_EXTRA_PHDRS		(VSYSCALL_EHDR->e_phnum)
+#define ELF_CORE_EXTRA_PHDRS		(__VSYSCALL_EHDR->e_phnum)
 #define ELF_CORE_WRITE_EXTRA_PHDRS					      \
 do {									      \
 	const struct elf_phdr *const vsyscall_phdrs =			      \
-		(const struct elf_phdr *) (VSYSCALL_BASE		      \
-					   + VSYSCALL_EHDR->e_phoff);	      \
+		(const struct elf_phdr *) (__VSYSCALL_BASE		      \
+					   + __VSYSCALL_EHDR->e_phoff);	      \
 	int i;								      \
 	Elf32_Off ofs = 0;						      \
-	for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) {			      \
+	for (i = 0; i < __VSYSCALL_EHDR->e_phnum; ++i) {		      \
 		struct elf_phdr phdr = vsyscall_phdrs[i];		      \
 		if (phdr.p_type == PT_LOAD) {				      \
 			BUG_ON(ofs != 0);				      \
@@ -178,10 +186,10 @@ do {									      \
 #define ELF_CORE_WRITE_EXTRA_DATA					      \
 do {									      \
 	const struct elf_phdr *const vsyscall_phdrs =			      \
-		(const struct elf_phdr *) (VSYSCALL_BASE		      \
-					   + VSYSCALL_EHDR->e_phoff);	      \
+		(const struct elf_phdr *) (__VSYSCALL_BASE		      \
+					   + __VSYSCALL_EHDR->e_phoff);	      \
 	int i;								      \
-	for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) {			      \
+	for (i = 0; i < __VSYSCALL_EHDR->e_phnum; ++i) {		      \
 		if (vsyscall_phdrs[i].p_type == PT_LOAD)		      \
 			DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr,	      \
 				   PAGE_ALIGN(vsyscall_phdrs[i].p_memsz));    \
@@ -190,4 +198,10 @@ do {									      \
 
 #endif
 
+#define __HAVE_ARCH_RANDOMIZE_BRK
+extern void randomize_brk(unsigned long old_brk);
+
+#define __HAVE_ARCH_VSYSCALL
+extern void map_vsyscall(void);
+
 #endif
--- linux/include/asm-ppc64/pgalloc.h.orig
+++ linux/include/asm-ppc64/pgalloc.h
@@ -11,6 +11,11 @@
 
 extern kmem_cache_t *zero_cache;
 
+/* Dummy functions since we don't support execshield on ppc */
+#define arch_add_exec_range(mm, limit) do { ; } while (0)
+#define arch_flush_exec_range(mm)      do { ; } while (0)
+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
+
 /*
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
--- linux/include/asm-sparc64/pgalloc.h.orig
+++ linux/include/asm-sparc64/pgalloc.h
@@ -260,4 +260,8 @@ static inline void pte_free(struct page 
 #define pgd_free(pgd)		free_pgd_fast(pgd)
 #define pgd_alloc(mm)		get_pgd_fast()
 
+#define arch_add_exec_range(mm, limit)		do { ; } while (0)
+#define arch_flush_exec_range(mm)		do { ; } while (0)
+#define arch_remove_exec_range(mm, limit)	do { ; } while (0)
+
 #endif /* _SPARC64_PGALLOC_H */
--- linux/include/asm-x86_64/processor.h.orig
+++ linux/include/asm-x86_64/processor.h
@@ -164,6 +164,15 @@ static inline void clear_in_cr4 (unsigne
  */
 #define TASK_SIZE	(0x800000000000UL)
 
+#define TASK_SIZE_64	(0x800000000000)
+
+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE_64)
+
+#define __HAVE_ARCH_ALIGN_STACK
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+  
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
--- linux/include/asm-x86_64/pgalloc.h.orig
+++ linux/include/asm-x86_64/pgalloc.h
@@ -7,6 +7,13 @@
 #include <linux/threads.h>
 #include <linux/mm.h>
 
+#define arch_add_exec_range(mm, limit) \
+		do { (void)(mm), (void)(limit); } while (0)
+#define arch_flush_exec_range(mm) \
+		do { (void)(mm); } while (0)
+#define arch_remove_exec_range(mm, limit) \
+		do { (void)(mm), (void)(limit); } while (0)
+
 #define pmd_populate_kernel(mm, pmd, pte) \
 		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
 #define pud_populate(mm, pud, pmd) \


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-3/kernel-2.6.spec,v
retrieving revision 1.839
retrieving revision 1.840
diff -u -r1.839 -r1.840
--- kernel-2.6.spec	31 May 2005 06:47:14 -0000	1.839
+++ kernel-2.6.spec	1 Jun 2005 05:49:43 -0000	1.840
@@ -229,10 +229,8 @@
 # and patches related to how RPMs are build
 #
 Patch500: linux-2.4.0-nonintconfig.patch
-Patch511: linux-2.6.0-exec-shield.patch
-Patch512: linux-2.6.8-print-fatal-signals.patch
-Patch513: linux-2.6.8-execshield-vaspace.patch
-Patch515: linux-2.6.10-x86_64-read-implies-exec32.patch
+Patch511: linux-2.6.11-exec-shield.patch
+Patch512: linux-2.6.10-x86_64-read-implies-exec32.patch
 
 Patch530: linux-2.6.0-must_check.patch
 # Module signing infrastructure.
@@ -457,17 +455,10 @@
 #
 # The execshield patch series, broken into smaller pieces
 #
-# 1) Exec shield core
+# Exec shield core
 %patch511 -p1
-
-# 2) Option to printk fatal signals, useful for debugging
-%patch512 -p1
-
-# 3) The Execshield VA rearrangements
-%patch513 -p1
-
-# 4) Revert x86-64 read-implies-exec on 32 bit processes.
-%patch515 -p1 -R
+# Revert x86-64 read-implies-exec on 32 bit processes.
+%patch512 -p1 -R
 
 #
 # Patch that adds a __must_check attribute for functions for which checking
@@ -903,6 +894,9 @@
 %endif
 
 %changelog
+* Wed Jun  1 2005 Dave Jones <davej redhat com>
+- Exec-shield improvements (Should fix #154759)
+
 * Tue May 31 2005 Dave Jones <davej redhat com>
 - Rebase to 2.6.11.11
 


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]