rpms/kernel/devel linux-2.6-debug-periodic-slab-check.patch, 1.2, 1.3 kernel-2.6.spec, 1.1896, 1.1897

fedora-cvs-commits at redhat.com fedora-cvs-commits at redhat.com
Thu Feb 2 23:36:27 UTC 2006


Author: davej

Update of /cvs/dist/rpms/kernel/devel
In directory cvs.devel.redhat.com:/tmp/cvs-serv17052

Modified Files:
	kernel-2.6.spec 
Added Files:
	linux-2.6-debug-periodic-slab-check.patch 
Log Message:
periodic slab checking again


linux-2.6-debug-periodic-slab-check.patch:
 slab.c |  134 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 131 insertions(+), 3 deletions(-)

Index: linux-2.6-debug-periodic-slab-check.patch
===================================================================
RCS file: linux-2.6-debug-periodic-slab-check.patch
diff -N linux-2.6-debug-periodic-slab-check.patch
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ linux-2.6-debug-periodic-slab-check.patch	2 Feb 2006 23:36:23 -0000	1.3
@@ -0,0 +1,199 @@
+
+ mm/slab.c |  134 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 131 insertions(+), 3 deletions(-)
+
+Index: 2.6-git/mm/slab.c
+===================================================================
+--- 2.6-git.orig/mm/slab.c
++++ 2.6-git/mm/slab.c
+@@ -202,7 +202,7 @@
+ 
+ typedef unsigned int kmem_bufctl_t;
+ #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
+-#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
++#define BUFCTL_ALLOC	(((kmem_bufctl_t)(~0U))-1)
+ #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)
+ 
+ /* Max number of objs-per-slab for caches which use off-slab slabs.
+@@ -430,6 +430,11 @@ struct kmem_cache {
+ 	 */
+ 	int obj_offset;
+ 	int obj_size;
++
++	/*
++	 * Time for next cache verification.
++	 */
++	unsigned long next_verify;
+ #endif
+ };
+ 
+@@ -445,6 +450,7 @@ struct kmem_cache {
+  */
+ #define REAPTIMEOUT_CPUC	(2*HZ)
+ #define REAPTIMEOUT_LIST3	(4*HZ)
++#define REDZONETIMEOUT		(300*HZ)
+ 
+ #if STATS
+ #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
+@@ -1923,6 +1929,11 @@ kmem_cache_create (const char *name, siz
+ 		cachep->limit = BOOT_CPUCACHE_ENTRIES;
+ 	}
+ 
++#if DEBUG
++	cachep->next_verify = jiffies + REDZONETIMEOUT +
++		((unsigned long)cachep/L1_CACHE_BYTES)%REDZONETIMEOUT;
++#endif
++
+ 	/* cache setup completed, link it into the list */
+ 	list_add(&cachep->next, &cache_chain);
+ 	unlock_cpu_hotplug();
+@@ -2251,7 +2262,7 @@ static void *slab_get_obj(struct kmem_ca
+ 	slabp->inuse++;
+ 	next = slab_bufctl(slabp)[slabp->free];
+ #if DEBUG
+-	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
++	slab_bufctl(slabp)[slabp->free] = BUFCTL_ALLOC;
+ 	WARN_ON(slabp->nodeid != nodeid);
+ #endif
+ 	slabp->free = next;
+@@ -2268,7 +2279,7 @@ static void slab_put_obj(struct kmem_cac
+ 	/* Verify that the slab belongs to the intended node */
+ 	WARN_ON(slabp->nodeid != nodeid);
+ 
+-	if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
++	if (slab_bufctl(slabp)[objnr] != BUFCTL_ALLOC) {
+ 		printk(KERN_ERR "slab: double free detected in cache "
+ 		       "'%s', objp %p\n", cachep->name, objp);
+ 		BUG();
+@@ -3266,6 +3277,116 @@ static int alloc_kmemlist(struct kmem_ca
+ 	return err;
+ }
+ 
++#if DEBUG
++
++static void verify_slab_redzone(struct kmem_cache *cache, struct slab *slab)
++{
++	int i;
++
++	if (!(cache->flags & SLAB_RED_ZONE))
++		return;
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	/* Page alloc debugging on for this cache. Mapping & Unmapping happens
++	 * without any locking, thus parallel checks are impossible.
++	 */
++	if ((cache->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cache))
++		return;
++#endif
++
++	for (i = 0; i < cache->num; i++) {
++		unsigned long red1, red2;
++		void *obj = slab->s_mem + cache->buffer_size * i;
++
++		red1 = *dbg_redzone1(cache, obj);
++		red2 = *dbg_redzone2(cache, obj);
++
++		/* Simplest case: redzones marked as inactive.  */
++		if (red1 == RED_INACTIVE && red2 == RED_INACTIVE)
++			continue;
++
++		/*
++		 * Tricky case: if the bufctl value is BUFCTL_ALLOC, then
++		 * the object is either allocated or somewhere in a cpu
++		 * cache. The cpu caches are lockless and there might be
++		 * a concurrent alloc/free call, thus we must accept random
++		 * combinations of RED_ACTIVE and _INACTIVE
++		 */
++		if (slab_bufctl(slab)[i] == BUFCTL_ALLOC &&
++				(red1 == RED_INACTIVE || red1 == RED_ACTIVE) &&
++				(red2 == RED_INACTIVE || red2 == RED_ACTIVE))
++			continue;
++
++		printk(KERN_ERR "slab %s: redzone mismatch in slab %p,"
++		       " obj %p, bufctl 0x%lx\n", cache->name, slab, obj,
++		       slab_bufctl(slab)[i]);
++
++		print_objinfo(cache, obj, 2);
++	}
++}
++
++static void print_invalid_slab(const char *list_name, struct kmem_cache *cache,
++			       struct slab *slab)
++{
++	printk(KERN_ERR "slab %s: invalid slab found in %s list at %p (%d/%d).\n",
++	       cache->name, list_name, slab, slab->inuse, cache->num);
++}
++
++static void verify_nodelists(struct kmem_cache *cache,
++			     struct kmem_list3 *lists)
++{
++	struct list_head *q;
++	struct slab *slab;
++
++	list_for_each(q, &lists->slabs_full) {
++		slab = list_entry(q, struct slab, list);
++
++		if (slab->inuse != cache->num)
++			print_invalid_slab("full", cache, slab);
++
++		check_slabp(cache, slab);
++		verify_slab_redzone(cache, slab);
++	}
++	list_for_each(q, &lists->slabs_partial) {
++		slab = list_entry(q, struct slab, list);
++
++		if (slab->inuse == cache->num || slab->inuse == 0)
++			print_invalid_slab("partial", cache, slab);
++
++		check_slabp(cache, slab);
++		verify_slab_redzone(cache, slab);
++	}
++	list_for_each(q, &lists->slabs_free) {
++		slab = list_entry(q, struct slab, list);
++
++		if (slab->inuse != 0)
++			print_invalid_slab("free", cache, slab);
++
++		check_slabp(cache, slab);
++		verify_slab_redzone(cache, slab);
++	}
++}
++
++/*
++ * Perform a self test on all slabs from a cache
++ */
++static void verify_cache(struct kmem_cache *cache)
++{
++	int node;
++
++	check_spinlock_acquired(cache);
++
++	for_each_online_node(node) {
++		struct kmem_list3 *lists = cache->nodelists[node];
++
++		if (!lists)
++			continue;
++		verify_nodelists(cache, lists);
++	}
++}
++
++#endif
++
+ struct ccupdate_struct {
+ 	struct kmem_cache *cachep;
+ 	struct array_cache *new[NR_CPUS];
+@@ -3446,6 +3567,13 @@ static void cache_reap(void *unused)
+ 		drain_array_locked(searchp, cpu_cache_get(searchp), 0,
+ 				   numa_node_id());
+ 
++#if DEBUG
++		if (time_before(searchp->next_verify, jiffies)) {
++			searchp->next_verify = jiffies + REDZONETIMEOUT;
++			verify_cache(searchp);
++		}
++#endif
++
+ 		if (time_after(l3->next_reap, jiffies))
+ 			goto next_unlock;
+ 
+


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.1896
retrieving revision 1.1897
diff -u -r1.1896 -r1.1897
--- kernel-2.6.spec	2 Feb 2006 21:20:36 -0000	1.1896
+++ kernel-2.6.spec	2 Feb 2006 23:36:22 -0000	1.1897
@@ -295,6 +295,7 @@
 Patch1028: linux-2.6-debug-oops-pause.patch
 Patch1029: linux-2.6-debug-account-kmalloc.patch
 Patch1030: linux-2.6-debug-latency-tracing.patch
+Patch1031: linux-2.6-debug-periodic-slab-check.patch
 
 # Restrict /dev/mem usage.
 Patch1050: linux-2.6-devmem.patch
@@ -738,6 +739,7 @@
 %patch1028 -p1
 #%patch1029 -p1
 #%patch1030 -p1
+%patch1031 -p1
 
 #
 # Make /dev/mem a need-to-know function 
@@ -1383,6 +1385,7 @@
 %changelog
 * Thu Feb  2 2006 Dave Jones <davej at redhat.com>
 - 2.6.16rc1-git6
+- enable the periodic slab debugger again.
 
 * Wed Feb  1 2006 Dave Jones <davej at redhat.com>
 - Woo, 2.6.16rc1-git5 (at last)




More information about the fedora-cvs-commits mailing list