rpms/kernel/devel unifdef-rename-getline-symbol.patch, NONE, 1.1 kernel.spec, 1.1411, 1.1412 linux-2.6-execshield.patch, 1.102, 1.103
Kyle McMartin
kyle at fedoraproject.org
Wed Mar 11 16:05:23 UTC 2009
Author: kyle
Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv21490
Modified Files:
kernel.spec linux-2.6-execshield.patch
Added Files:
unifdef-rename-getline-symbol.patch
Log Message:
* Wed Mar 11 2009 Kyle McMartin <kyle at redhat.com> 2.6.29-0.232.rc7.git4
- linux-2.6-execshield.patch:
Fix from H.J. Lu, we were doing 32-bit randomization on 64-bit vaddr
- unifdef-rename-getline-symbol.patch:
glibc 2.9.90-10 changes what symbols are exposed in stdio.h, causing
getline collision. rename the unifdef symbol to parseline.
unifdef-rename-getline-symbol.patch:
--- NEW FILE unifdef-rename-getline-symbol.patch ---
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 552025e..977e682 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -206,7 +206,7 @@ static void done(void);
static void error(const char *);
static int findsym(const char *);
static void flushline(bool);
-static Linetype getline(void);
+static Linetype parseline(void);
static Linetype ifeval(const char **);
static void ignoreoff(void);
static void ignoreon(void);
@@ -512,7 +512,7 @@ process(void)
for (;;) {
linenum++;
- lineval = getline();
+ lineval = parseline();
trans_table[ifstate[depth]][lineval]();
debug("process %s -> %s depth %d",
linetype_name[lineval],
@@ -526,7 +526,7 @@ process(void)
* help from skipcomment().
*/
static Linetype
-getline(void)
+parseline(void)
{
const char *cp;
int cursym;
Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.1411
retrieving revision 1.1412
diff -u -r1.1411 -r1.1412
--- kernel.spec 11 Mar 2009 13:44:06 -0000 1.1411
+++ kernel.spec 11 Mar 2009 16:04:52 -0000 1.1412
@@ -582,6 +582,9 @@
# ppc64 goes kerspalt with .eh_frame
Patch06: build-with-fno-dwarf2-cfi-asm.patch
+# glibc-headers 2.9.90-10 changes default _POSIX_C_SOURCE
+Patch07: unifdef-rename-getline-symbol.patch
+
%if !%{nopatches}
# revert upstream patches we get via other methods
@@ -1038,6 +1041,8 @@
ApplyPatch build-with-fno-dwarf2-cfi-asm.patch
+ApplyPatch unifdef-rename-getline-symbol.patch
+
#
# misc small stuff to make things compile
#
@@ -1819,6 +1824,13 @@
# and build.
%changelog
+* Wed Mar 11 2009 Kyle McMartin <kyle at redhat.com> 2.6.29-0.232.rc7.git4
+- linux-2.6-execshield.patch:
+ Fix from H.J. Lu, we were doing 32-bit randomization on 64-bit vaddr
+- unifdef-rename-getline-symbol.patch:
+ glibc 2.9.90-10 changes what symbols are exposed in stdio.h, causing
+ getline collision. rename the unifdef symbol to parseline.
+
* Wed Mar 11 2009 Dave Jones <davej at redhat.com>
- 2.6.29-rc7-git4
linux-2.6-execshield.patch:
Index: linux-2.6-execshield.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-execshield.patch,v
retrieving revision 1.102
retrieving revision 1.103
diff -u -r1.102 -r1.103
--- linux-2.6-execshield.patch 19 Jan 2009 06:23:28 -0000 1.102
+++ linux-2.6-execshield.patch 11 Mar 2009 16:04:53 -0000 1.103
@@ -73,7 +73,7 @@
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index ba3e2ff..42a65f4 100644
+index e299287..aaa8a35 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -113,6 +113,9 @@ struct pv_cpu_ops {
@@ -100,7 +100,7 @@
{
PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 091cd88..b1a6f6d 100644
+index 3bfd523..99c8119 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -158,6 +158,9 @@ static inline int hlt_works(int cpu)
@@ -140,10 +140,10 @@
if (!c->x86_model_id[0]) {
char *p;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index e4c8fb6..30f7508 100644
+index c6520a4..2066aa1 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
-@@ -326,6 +326,9 @@ struct pv_cpu_ops pv_cpu_ops = {
+@@ -352,6 +352,9 @@ struct pv_cpu_ops pv_cpu_ops = {
.read_tscp = native_read_tscp,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
@@ -154,10 +154,10 @@
.load_idt = native_load_idt,
.store_gdt = native_store_gdt,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index a546f55..f180caf 100644
+index bd4da2a..60823d4 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
-@@ -346,6 +346,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+@@ -343,6 +343,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
@@ -166,7 +166,7 @@
__asm__("movl %0, %%gs" : : "r"(0));
regs->fs = 0;
set_fs(USER_DS);
-@@ -355,6 +357,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+@@ -352,6 +354,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
regs->cs = __USER_CS;
regs->ip = new_ip;
regs->sp = new_sp;
@@ -178,7 +178,7 @@
/*
* Free the old FP and other extended state
*/
-@@ -522,7 +529,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -519,7 +526,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
__unlazy_fpu(prev_p);
@@ -188,7 +188,7 @@
/* we're going to use this soon, after a few expensive things */
if (next_p->fpu_counter > 5)
-@@ -695,3 +703,41 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+@@ -692,3 +700,41 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
unsigned long range_end = mm->brk + 0x02000000;
return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
}
@@ -252,10 +252,10 @@
if (!cpu_isset(cpu, flush_cpumask))
goto out;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 98c2d05..db483ea 100644
+index a9e7548..af0f8f0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
-@@ -154,6 +154,76 @@ static int lazy_iobitmap_copy(void)
+@@ -160,6 +160,76 @@ static int lazy_iobitmap_copy(void)
return 0;
}
@@ -332,7 +332,7 @@
#endif
static void __kprobes
-@@ -317,6 +387,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -323,6 +393,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
if (!user_mode(regs))
goto gp_in_kernel;
@@ -362,7 +362,7 @@
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 13;
-@@ -923,19 +1016,37 @@ do_device_not_available(struct pt_regs *regs, long error)
+@@ -934,19 +1027,37 @@ dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
}
#ifdef CONFIG_X86_32
@@ -410,10 +410,10 @@
#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 88f1b10..1e31b5e 100644
+index 2cef050..a18ae07 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
-@@ -575,7 +575,7 @@ static int disable_nx __initdata;
+@@ -617,7 +617,7 @@ static int disable_nx __initdata;
* Control non executable mappings.
*
* on Enable
@@ -422,7 +422,7 @@
*/
static int __init noexec_setup(char *str)
{
-@@ -584,14 +584,12 @@ static int __init noexec_setup(char *str)
+@@ -626,14 +626,12 @@ static int __init noexec_setup(char *str)
__supported_pte_mask |= _PAGE_NX;
disable_nx = 0;
}
@@ -443,7 +443,7 @@
return 0;
}
-@@ -850,7 +848,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+@@ -892,7 +890,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
set_nx();
if (nx_enabled)
printk(KERN_INFO "NX (Execute Disable) protection: active\n");
@@ -456,10 +456,10 @@
/* Enable PSE if available */
if (cpu_has_pse)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 56fe712..ec932ae 100644
+index 56fe712..30d2be7 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
-@@ -111,13 +111,15 @@ static unsigned long mmap_legacy_base(void)
+@@ -111,13 +111,16 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
@@ -471,7 +471,8 @@
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-+ if (!(current->personality & READ_IMPLIES_EXEC))
++ if (!(current->personality & READ_IMPLIES_EXEC)
++ mmap_is_ia32())
+ mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
mm->unmap_area = arch_unmap_area_topdown;
}
@@ -490,7 +491,7 @@
ret = addr;
goto up_fail;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index bea2152..d750141 100644
+index b58e963..cdc83ce 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -316,6 +316,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
@@ -529,7 +530,7 @@
.load_idt = xen_load_idt,
.load_tls = xen_load_tls,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index e3ff2b9..d46dcf2 100644
+index 33b7235..ce1f044 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -80,7 +80,7 @@ static struct linux_binfmt elf_format = {
@@ -612,10 +613,10 @@
min_flt = task->min_flt;
maj_flt = task->maj_flt;
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index e8ddc98..bf87f4a 100644
+index 065cdf8..aa94aa9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
-@@ -1122,7 +1122,13 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1135,7 +1135,13 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
@@ -662,7 +663,7 @@
/*
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 4cae9b8..71fe593 100644
+index 8c216e0..79eca33 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -98,6 +98,9 @@ struct robust_list_head;
@@ -675,7 +676,7 @@
/*
* List of flags we want to share for kernel threads,
* if only because they are not used by them anyway.
-@@ -343,6 +346,10 @@ extern int sysctl_max_map_count;
+@@ -346,6 +349,10 @@ extern int sysctl_max_map_count;
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -687,7 +688,7 @@
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 368d163..2e4ab66 100644
+index c5ef44f..f7abce4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -85,6 +85,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
@@ -717,7 +718,7 @@
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable;
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
-@@ -378,6 +398,14 @@ static struct ctl_table kern_table[] = {
+@@ -379,6 +399,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
@@ -733,7 +734,7 @@
.procname = "core_uses_pid",
.data = &core_uses_pid,
diff --git a/mm/mmap.c b/mm/mmap.c
-index 8d95902..c84ff1f 100644
+index 00ced3e..931bc3b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -27,6 +27,7 @@
@@ -781,7 +782,7 @@
}
/*
-@@ -799,6 +816,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -802,6 +819,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
} else /* cases 2, 5, 7 */
vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL);
@@ -790,7 +791,7 @@
return prev;
}
-@@ -954,7 +973,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -956,7 +975,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -800,7 +801,7 @@
if (addr & ~PAGE_MASK)
return addr;
-@@ -1439,13 +1459,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1436,13 +1456,17 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
}
unsigned long
@@ -821,7 +822,7 @@
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
addr = get_area(file, addr, len, pgoff, flags);
-@@ -1459,8 +1483,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+@@ -1456,8 +1480,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return arch_rebalance_pgtables(addr, len);
}
@@ -899,7 +900,7 @@
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-@@ -1535,6 +1627,14 @@ out:
+@@ -1532,6 +1624,14 @@ out:
return prev ? prev->vm_next : vma;
}
@@ -914,7 +915,7 @@
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -1551,7 +1651,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1548,7 +1648,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -923,7 +924,7 @@
return -ENOMEM;
/* mlock limit tests */
-@@ -1861,10 +1961,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1858,10 +1958,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
@@ -940,7 +941,7 @@
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
return 0;
-@@ -2111,6 +2215,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2110,6 +2214,7 @@ void exit_mmap(struct mm_struct *mm)
vm_unacct_memory(nr_accounted);
free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
@@ -949,7 +950,7 @@
/*
* Walk the list again, actually closing and freeing it,
diff --git a/mm/mprotect.c b/mm/mprotect.c
-index abe2694..eb16148 100644
+index 258197b..9af91a3 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -25,9 +25,14 @@
@@ -976,7 +977,7 @@
pgoff_t pgoff;
int error;
int dirty_accountable = 0;
-@@ -202,6 +207,9 @@ success:
+@@ -203,6 +208,9 @@ success:
dirty_accountable = 1;
}
More information about the fedora-extras-commits
mailing list