rpms/kernel/devel kernel-2.6.spec, 1.2698, 1.2699 linux-2.6-lockdep-fixes.patch, 1.28, 1.29
fedora-cvs-commits at redhat.com
fedora-cvs-commits at redhat.com
Wed Sep 27 03:37:58 UTC 2006
Author: davej
Update of /cvs/dist/rpms/kernel/devel
In directory cvs.devel.redhat.com:/tmp/cvs-serv31725
Modified Files:
kernel-2.6.spec linux-2.6-lockdep-fixes.patch
Log Message:
lockdep fixes
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.2698
retrieving revision 1.2699
diff -u -r1.2698 -r1.2699
--- kernel-2.6.spec 26 Sep 2006 06:16:18 -0000 1.2698
+++ kernel-2.6.spec 27 Sep 2006 03:37:55 -0000 1.2699
@@ -1926,6 +1926,7 @@
%changelog
* Tue Sep 26 2006 Dave Jones <davej at redhat.com>
- Enable serverworks IDE driver for x86-64.
+- More lockdep fixes.
* Mon Sep 25 2006 Dave Jones <davej at redhat.com>
- Disable 31bit s390 kernel builds.
linux-2.6-lockdep-fixes.patch:
a/drivers/net/forcedeth.c | 31 ++++++++------
a/fs/nfsd/nfsproc.c | 2
a/fs/nfsd/vfs.c | 8 +--
a/include/linux/nfsd/nfsfh.h | 11 ++++-
a/kernel/lockdep.c | 13 ++++++
a/net/ipv6/tcp_ipv6.c | 2
linux-2.6.17-mm6/include/net/sock.h | 33 ++++++++++++++-
linux-2.6.17.noarch/drivers/char/rtc.c | 5 +-
linux-2.6.18-rc1/drivers/input/serio/libps2.c | 6 +-
linux-2.6.18-rc1/include/linux/libps2.h | 12 +++++
linux-2.6.18-rc1/net/socket.c | 8 +++
linux-2.6/mm/slab.c | 55 +++++++++++++++++++-------
12 files changed, 141 insertions(+), 45 deletions(-)
Index: linux-2.6-lockdep-fixes.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/linux-2.6-lockdep-fixes.patch,v
retrieving revision 1.28
retrieving revision 1.29
diff -u -r1.28 -r1.29
--- linux-2.6-lockdep-fixes.patch 19 Sep 2006 20:36:35 -0000 1.28
+++ linux-2.6-lockdep-fixes.patch 27 Sep 2006 03:37:55 -0000 1.29
@@ -911,3 +911,138 @@
new-bd_mutex-lockdep-annotation.patch
nfsd-lockdep-annotation.patch
+Date: Wed, 13 Sep 2006 10:56:32 +0200
+From: Peter Zijlstra <pzijlstr at redhat.com>
+Subject: [RHEL5 PATCH] Slab fix alien cache lockdep warnings
+
+https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=203098
+
+This patch was not queued for .18 afaik,
+
+---
+From: Ravikiran G Thirumalai <kiran at scalex86.org>
+
+Place the alien array cache locks of on slab malloc slab caches on a
+seperate lockdep class. This avoids false positives from lockdep
+
+Signed-off-by: Ravikiran Thirumalai <kiran at scalex86.org>
+Signed-off-by: Shai Fultheim <shai at scalex86.org>
+Cc: Thomas Gleixner <tglx at linutronix.de>
+Acked-by: Arjan van de Ven <arjan at linux.intel.com>
+Cc: Ingo Molnar <mingo at elte.hu>
+Cc: Pekka Enberg <penberg at cs.helsinki.fi>
+Cc: Christoph Lameter <clameter at engr.sgi.com>
+Signed-off-by: Andrew Morton <akpm at osdl.org>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+---
+
+ mm/slab.c | 55 ++++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 42 insertions(+), 13 deletions(-)
+
+Index: linux-2.6/mm/slab.c
+===================================================================
+--- linux-2.6.orig/mm/slab.c
++++ linux-2.6/mm/slab.c
+@@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = {
+ #endif
+ };
+
++#define BAD_ALIEN_MAGIC 0x01020304ul
++
+ #ifdef CONFIG_LOCKDEP
+
+ /*
+@@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = {
+ * The locking for this is tricky in that it nests within the locks
+ * of all other slabs in a few places; to deal with this special
+ * locking we put on-slab caches into a separate lock-class.
++ *
++ * We set lock class for alien array caches which are up during init.
++ * The lock annotation will be lost if all cpus of a node goes down and
++ * then comes back up during hotplug
+ */
+-static struct lock_class_key on_slab_key;
++static struct lock_class_key on_slab_l3_key;
++static struct lock_class_key on_slab_alc_key;
++
++static inline void init_lock_keys(void)
+
+-static inline void init_lock_keys(struct cache_sizes *s)
+ {
+ int q;
++ struct cache_sizes *s = malloc_sizes;
+
+- for (q = 0; q < MAX_NUMNODES; q++) {
+- if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
+- continue;
+- lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
+- &on_slab_key);
++ while (s->cs_size != ULONG_MAX) {
++ for_each_node(q) {
++ struct array_cache **alc;
++ int r;
++ struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
++ if (!l3 || OFF_SLAB(s->cs_cachep))
++ continue;
++ lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
++ alc = l3->alien;
++ /*
++ * FIXME: This check for BAD_ALIEN_MAGIC
++ * should go away when common slab code is taught to
++ * work even without alien caches.
++ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
++ * for alloc_alien_cache,
++ */
++ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
++ continue;
++ for_each_node(r) {
++ if (alc[r])
++ lockdep_set_class(&alc[r]->lock,
++ &on_slab_alc_key);
++ }
++ }
++ s++;
+ }
+ }
+-
+ #else
+-static inline void init_lock_keys(struct cache_sizes *s)
++static inline void init_lock_keys(void)
+ {
+ }
+ #endif
+
+-
+-
+ /* Guard access to the cache-chain. */
+ static DEFINE_MUTEX(cache_chain_mutex);
+ static struct list_head cache_chain;
+@@ -1092,7 +1118,7 @@ static inline int cache_free_alien(struc
+
+ static inline struct array_cache **alloc_alien_cache(int node, int limit)
+ {
+- return (struct array_cache **) 0x01020304ul;
++ return (struct array_cache **)BAD_ALIEN_MAGIC;
+ }
+
+ static inline void free_alien_cache(struct array_cache **ac_ptr)
+@@ -1422,7 +1448,6 @@ void __init kmem_cache_init(void)
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
+ NULL, NULL);
+ }
+- init_lock_keys(sizes);
+
+ sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
+ sizes->cs_size,
+@@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void)
+ mutex_unlock(&cache_chain_mutex);
+ }
+
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
++
+ /* Done! */
+ g_cpucache_up = FULL;
+
+
More information about the fedora-cvs-commits
mailing list