rpms/kernel/FC-5 linux-2.6.16-cachefiles.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-afs.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-filp.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-fsmisc.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-misc.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-mkwrite.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-nfs.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-nspace.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-radix-tree.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs-relpage.patch, NONE, 1.1.4.1 linux-2.6.16-cachefs.patch, NONE, 1.1.4.1 linux-2.6.16-fscache.patch, NONE, 1.1.4.1 kernel-2.6.spec, 1.2114, 1.2114.2.1

fedora-cvs-commits at redhat.com fedora-cvs-commits at redhat.com
Wed May 10 19:54:16 UTC 2006


Author: steved

Update of /cvs/dist/rpms/kernel/FC-5
In directory cvs.devel.redhat.com:/tmp/cvs-serv13284

Modified Files:
      Tag: kernel-2_6_16-1_2114_FC5_cachefs
	kernel-2.6.spec 
Added Files:
      Tag: kernel-2_6_16-1_2114_FC5_cachefs
	linux-2.6.16-cachefiles.patch linux-2.6.16-cachefs-afs.patch 
	linux-2.6.16-cachefs-filp.patch 
	linux-2.6.16-cachefs-fsmisc.patch 
	linux-2.6.16-cachefs-misc.patch 
	linux-2.6.16-cachefs-mkwrite.patch 
	linux-2.6.16-cachefs-nfs.patch 
	linux-2.6.16-cachefs-nspace.patch 
	linux-2.6.16-cachefs-radix-tree.patch 
	linux-2.6.16-cachefs-relpage.patch linux-2.6.16-cachefs.patch 
	linux-2.6.16-fscache.patch 
Log Message:
- Added cachefs bits to private branch


linux-2.6.16-cachefiles.patch:
 Kconfig                   |   12 
 Makefile                  |    1 
 cachefiles/Makefile       |   16 
 cachefiles/cf-bind.c      |  360 +++++++++++++
 cachefiles/cf-interface.c | 1242 ++++++++++++++++++++++++++++++++++++++++++++++
 cachefiles/cf-key.c       |  160 +++++
 cachefiles/cf-main.c      |  167 ++++++
 cachefiles/cf-namei.c     |  817 ++++++++++++++++++++++++++++++
 cachefiles/cf-proc.c      |  510 ++++++++++++++++++
 cachefiles/cf-xattr.c     |  284 ++++++++++
 cachefiles/internal.h     |  273 ++++++++++
 11 files changed, 3840 insertions(+), 2 deletions(-)

--- NEW FILE linux-2.6.16-cachefiles.patch ---
--- linux-2.6.16.noarch/fs/Kconfig.cachefiles	2006-04-11 21:48:34.000000000 -0400
+++ linux-2.6.16.noarch/fs/Kconfig	2006-04-11 21:49:39.000000000 -0400
@@ -526,8 +526,8 @@ config CACHEFS
 	depends on FSCACHE
 	help
 	  This filesystem acts as a cache for other filesystems - primarily
-	  networking filesystems - rather than thus allowing fast local disc to
-	  enhance the speed of slower devices.
+	  networking filesystems - thus allowing fast local disk to enhance the
+	  speed of slower devices.
 
 	  It is a filesystem so that raw block devices can be made use of more
 	  efficiently, without suffering any overhead from intermediary
@@ -540,6 +540,14 @@ config CACHEFS
 
 	  See Documentation/filesystems/caching/cachefs.txt for more information.
 
+config CACHEFILES
+	tristate "Filesystem caching on files"
+	depends on FSCACHE
+	help
+	  This permits use of a mounted filesystem as a cache for other
+	  filesystems - primarily networking filesystems - thus allowing fast
+	  local disk to enhance the speed of slower devices.
+
 endmenu
 
 menu "CD-ROM/DVD Filesystems"
--- linux-2.6.16.noarch/fs/Makefile.cachefiles	2006-04-11 21:48:34.000000000 -0400
+++ linux-2.6.16.noarch/fs/Makefile	2006-04-11 21:49:39.000000000 -0400
@@ -103,6 +103,7 @@ obj-$(CONFIG_BEFS_FS)		+= befs/
 obj-$(CONFIG_HOSTFS)		+= hostfs/
 obj-$(CONFIG_HPPFS)		+= hppfs/
 obj-$(CONFIG_CACHEFS)		+= cachefs/
+obj-$(CONFIG_CACHEFILES)	+= cachefiles/
 obj-$(CONFIG_DEBUG_FS)		+= debugfs/
 obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
 obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
--- /dev/null	2006-04-02 15:08:25.450456288 -0400
+++ linux-2.6.16.noarch/fs/cachefiles/Makefile	2006-04-11 21:49:52.000000000 -0400
@@ -0,0 +1,16 @@
+#
+# Makefile for caching in a mounted filesystem
+#
+
+#CFLAGS += -finstrument-functions
+
+cachefiles-objs := \
+	cf-bind.o \
+	cf-interface.o \
+	cf-key.o \
+	cf-main.o \
+	cf-namei.o \
+	cf-proc.o \
+	cf-xattr.o
+
+obj-$(CONFIG_CACHEFILES) := cachefiles.o
--- /dev/null	2006-04-02 15:08:25.450456288 -0400
+++ linux-2.6.16.noarch/fs/cachefiles/cf-interface.c	2006-04-11 21:49:51.000000000 -0400
@@ -0,0 +1,1242 @@
+/* cf-interface.c: CacheFiles to FS-Cache interface
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/statfs.h>
+#include <linux/buffer_head.h>
+#include "internal.h"
+
+#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
+#define log2(n) ffz(~(n))
+
+/*****************************************************************************/
+/*
+ * look up the nominated node in this cache, creating it if necessary
+ */
+static struct fscache_object *cachefiles_lookup_object(
+	struct fscache_cache *_cache,
+	struct fscache_object *_parent,
+	struct fscache_cookie *cookie)
+{
+	struct cachefiles_object *parent, *object;
+	struct cachefiles_cache *cache;
+	struct cachefiles_xattr *auxdata;
+	unsigned keylen, auxlen;
+	void *buffer;
+	char *key;
+	int ret;
+
+	ASSERT(_parent);
+
+	cache = container_of(_cache, struct cachefiles_cache, cache);
+	parent = container_of(_parent, struct cachefiles_object, fscache);
+
+	//printk("\n");
+	_enter("{%s},%p,%p", cache->cache.identifier, parent, cookie);
+
+	/* create a new object record and a temporary leaf image */
+	object = kmem_cache_alloc(cachefiles_object_jar, SLAB_KERNEL);
+	if (!object)
+		goto nomem_object;
+
+	atomic_set(&object->usage, 1);
+	atomic_set(&object->fscache_usage, 1);
+
+	fscache_object_init(&object->fscache);
+	object->fscache.cookie = cookie;
+	object->fscache.cache = parent->fscache.cache;
+
+	object->type = cookie->def->type;
+
+	/* get hold of the raw key
+	 * - stick the length on the front and leave space on the back for the
+	 *   encoder
+	 */
+	buffer = kmalloc((2 + 512) + 3, GFP_KERNEL);
+	if (!buffer)
+		goto nomem_buffer;
+
+	keylen = cookie->def->get_key(cookie->netfs_data, buffer + 2, 512);
+	ASSERTCMP(keylen, <, 512);
+
+	*(uint16_t *)buffer = keylen;
+	((char *)buffer)[keylen + 2] = 0;
+	((char *)buffer)[keylen + 3] = 0;
+	((char *)buffer)[keylen + 4] = 0;
+
+	/* turn the raw key into something that can work with as a filename */
+	key = cachefiles_cook_key(buffer, keylen + 2, object->type);
+	if (!key)
+		goto nomem_key;
+
+	/* get hold of the auxiliary data and prepend the object type */
+	auxdata = buffer;
+	auxlen = 0;
+	if (cookie->def->get_aux) {
+		auxlen = cookie->def->get_aux(cookie->netfs_data,
+					      auxdata->data, 511);
+		ASSERTCMP(auxlen, <, 511);
+	}
+
+	auxdata->len = auxlen + 1;
+	auxdata->type = cookie->def->type;
+
+	/* look up the key, creating any missing bits */
+	ret = cachefiles_walk_to_object(parent, object, key, auxdata);
+	if (ret < 0)
+		goto lookup_failed;
+
+	kfree(buffer);
+	kfree(key);
+	_leave(" = %p", &object->fscache);
+	return &object->fscache;
+
+lookup_failed:
+	kmem_cache_free(cachefiles_object_jar, object);
+	kfree(buffer);
+	kfree(key);
+	kleave(" = %d", ret);
+	return ERR_PTR(ret);
+
+nomem_key:
+	kfree(buffer);
+nomem_buffer:
+	kmem_cache_free(cachefiles_object_jar, object);
+nomem_object:
+	kleave(" = -ENOMEM");
+	return ERR_PTR(-ENOMEM);
+
+} /* end cachefiles_lookup_object() */
+
+/*****************************************************************************/
+/*
+ * increment the usage count on an inode object (may fail if unmounting)
+ */
+static struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
+{
+	struct cachefiles_object *object;
+
+	_enter("%p", _object);
+
+	object = container_of(_object, struct cachefiles_object, fscache);
+
+#ifdef CACHEFILES_DEBUG_SLAB
+	ASSERT((atomic_read(&object->fscache_usage) & 0xffff0000) != 0x6b6b0000);
+#endif
+
+	atomic_inc(&object->fscache_usage);
[...3495 lines suppressed...]
+	dput(root);
+error_open_root:
+	kmem_cache_free(cachefiles_object_jar, fsdef);
+error_root_object:
+	kerror("Failed to register: %d", ret);
+	return ret;
+
+} /* end cachefiles_proc_add_cache() */
+
+/*****************************************************************************/
+/*
+ * unbind a cache on fd release
+ */
+void cachefiles_proc_unbind(struct cachefiles_cache *cache)
+{
+	_enter("");
+
+	if (test_bit(CACHEFILES_READY, &cache->flags)) {
+		printk(KERN_INFO "CacheFiles:"
+		       " File cache on %s unregistering\n",
+		       cache->cache.identifier);
+
+		fscache_withdraw_cache(&cache->cache);
+	}
+
+	if (cache->cache.fsdef)
+		cache->cache.ops->put_object(cache->cache.fsdef);
+
+	dput(cache->graveyard);
+	mntput(cache->mnt);
+
+	kfree(cache->rootdirname);
+	kfree(cache->tag);
+
+	_leave("");
+
+} /* end cachefiles_proc_unbind() */
--- /dev/null	2006-04-02 15:08:25.450456288 -0400
+++ linux-2.6.16.noarch/fs/cachefiles/cf-key.c	2006-04-11 21:49:51.000000000 -0400
@@ -0,0 +1,160 @@
+/* cf-key.c: Key to pathname encoder
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include "internal.h"
+
+static const char cachefiles_charmap[64] =
+	"0123456789"			/* 0 - 9 */
+	"abcdefghijklmnopqrstuvwxyz"	/* 10 - 35 */
+	"ABCDEFGHIJKLMNOPQRSTUVWXYZ"	/* 36 - 61 */
+	"_-"				/* 62 - 63 */
+	;
+
+static const char cachefiles_filecharmap[256] = {
+	/* we skip space and tab and control chars */
+	[ 33 ... 46 ] = 1,		/* '!' -> '.' */
+	/* we skip '/' as it's significant to pathwalk */
+	[ 48 ... 127 ] = 1,		/* '0' -> '~' */
+};
+
+/*****************************************************************************/
+/*
+ * turn the raw key into something cooked
+ * - the raw key should include the length in the two bytes at the front
+ * - the key may be up to 514 bytes in length (including the length word)
+ *   - "base64" encode the strange keys, mapping 3 bytes of raw to four of
+ *     cooked
+ *   - need to cut the cooked key into 252 char lengths (189 raw bytes)
+ */
+char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
+{
+	unsigned char csum, ch;
+	unsigned int acc;
+	char *key;
+	int loop, len, max, seg, mark, print;
+
+	_enter(",%d", keylen);
+
+	BUG_ON(keylen < 2 || keylen > 514);
+
+	csum = raw[0] + raw[1];
+	print = 1;
+	for (loop = 2; loop < keylen; loop++) {
+		ch = raw[loop];
+		csum += ch;
+		print &= cachefiles_filecharmap[ch];
+	}
+
+	if (print) {
+		/* if the path is usable ASCII, then we render it directly */
+		max = keylen - 2;
+		max += 2;	/* two base64'd length chars on the front */
+		max += 5;	/* @checksum/M */
+		max += 3 * 2;	/* maximum number of segment dividers (".../M")
+				 * is ((514 + 251) / 252) = 3
+				 */
+		max += 1;	/* NUL on end */
+	}
+	else {
+		/* calculate the maximum length of the cooked key */
+		keylen = (keylen + 2) / 3;
+
+		max = keylen * 4;
+		max += 5;	/* @checksum/M */
+		max += 3 * 2;	/* maximum number of segment dividers (".../M")
+				 * is ((514 + 188) / 189) = 3
+				 */
+		max += 1;	/* NUL on end */
+	}
+
+	_debug("max: %d", max);
+
+	key = kmalloc(max, GFP_KERNEL);
+	if (!key)
+		return NULL;
+
+	len = 0;
+
+	/* build the cooked key */
+	sprintf(key, "@%02x/+", (unsigned) csum);
+	len = 5;
+	mark = len - 1;
+
+	if (print) {
+		acc = *(uint16_t *) raw;
+		raw += 2;
+
+		key[len + 1] = cachefiles_charmap[acc & 63];
+		acc >>= 6;
+		key[len] = cachefiles_charmap[acc & 63];
+		len += 2;
+
+		seg = 250;
+		for (loop = keylen; loop > 0; loop--) {
+			if (seg <= 0) {
+				key[len++] = '/';
+				mark = len;
+				key[len++] = '+';
+				seg = 252;
+			}
+
+			key[len++] = *raw++;
+			ASSERT(len < max);
+		}
+
+		switch (type) {
+		case FSCACHE_COOKIE_TYPE_INDEX:		type = 'I';	break;
+		case FSCACHE_COOKIE_TYPE_DATAFILE:	type = 'D';	break;
+		default:				type = 'S';	break;
+		}
+	}
+	else {
+		seg = 252;
+		for (loop = keylen; loop > 0; loop--) {
+			if (seg <= 0) {
+				key[len++] = '/';
+				mark = len;
+				key[len++] = '+';
+				seg = 252;
+			}
+
+			acc = *raw++;
+			acc |= *raw++ << 8;
+			acc |= *raw++ << 16;
+
+			_debug("acc: %06x", acc);
+
+			key[len++] = cachefiles_charmap[acc & 63];
+			acc >>= 6;
+			key[len++] = cachefiles_charmap[acc & 63];
+			acc >>= 6;
+			key[len++] = cachefiles_charmap[acc & 63];
+			acc >>= 6;
+			key[len++] = cachefiles_charmap[acc & 63];
+
+			ASSERT(len < max);
+		}
+
+		switch (type) {
+		case FSCACHE_COOKIE_TYPE_INDEX:		type = 'J';	break;
+		case FSCACHE_COOKIE_TYPE_DATAFILE:	type = 'E';	break;
+		default:				type = 'T';	break;
+		}
+	}
+
+	key[mark] = type;
+	key[len] = 0;
+
+	_leave(" = %p %d:[%s]", key, len, key);
+	return key;
+
+} /* end cachefiles_cook_key() */

linux-2.6.16-cachefs-afs.patch:
 b/fs/Kconfig         |    7 +
 b/fs/afs/cell.c      |  109 +++++++++++++---------
 b/fs/afs/cell.h      |   16 ---
 b/fs/afs/cmservice.c |    2 
 b/fs/afs/dir.c       |   15 +--
 b/fs/afs/file.c      |  224 +++++++++++++++++++++++++++++++--------------
 b/fs/afs/fsclient.c  |    4 
 b/fs/afs/inode.c     |   43 ++++++--
 b/fs/afs/internal.h  |   24 +---
 b/fs/afs/main.c      |   24 ++--
 b/fs/afs/mntpt.c     |   12 +-
 b/fs/afs/proc.c      |    1 
 b/fs/afs/server.c    |    3 
 b/fs/afs/vlocation.c |  185 +++++++++++++++++++++++--------------
 b/fs/afs/vnode.c     |  249 +++++++++++++++++++++++++++++++++++++++++----------
 b/fs/afs/vnode.h     |   10 +-
 b/fs/afs/volume.c    |   78 +++++----------
 b/fs/afs/volume.h    |   28 +----
 fs/afs/cache.h       |   27 -----
 19 files changed, 655 insertions(+), 406 deletions(-)

--- NEW FILE linux-2.6.16-cachefs-afs.patch ---
[PATCH] FS-Cache: Make kAFS use FS-Cache

From: David Howells <dhowells at redhat.com>

The attached patch makes the kAFS filesystem in fs/afs/ use FS-Cache, and
through it any attached caches.

Signed-Off-By: David Howells <dhowells at redhat.com>
---

 fs/Kconfig         |    7 +
 fs/afs/cache.h     |   27 ------
 fs/afs/cell.c      |  109 ++++++++++++++---------
 fs/afs/cell.h      |   16 +--
 fs/afs/cmservice.c |    2 
 fs/afs/dir.c       |   15 +--
 fs/afs/file.c      |  224 ++++++++++++++++++++++++++++++++---------------
 fs/afs/fsclient.c  |    4 +
 fs/afs/inode.c     |   43 ++++++---
 fs/afs/internal.h  |   24 ++---
 fs/afs/main.c      |   24 ++---
 fs/afs/mntpt.c     |   12 +--
 fs/afs/proc.c      |    1 
 fs/afs/server.c    |    3 -
 fs/afs/vlocation.c |  185 ++++++++++++++++++++++++---------------
 fs/afs/vnode.c     |  249 +++++++++++++++++++++++++++++++++++++++++++---------
 fs/afs/vnode.h     |   10 +-
 fs/afs/volume.c    |   78 ++++++----------
 fs/afs/volume.h    |   28 +-----
 19 files changed, 655 insertions(+), 406 deletions(-)

diff --git a/fs/Kconfig b/fs/Kconfig
index 23c0d74..f2193b3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1820,6 +1820,13 @@ config AFS_FS
 
 	  If unsure, say N.
 
+config AFS_FSCACHE
+	bool "Provide AFS client caching support"
+	depends on AFS_FS && FSCACHE && EXPERIMENTAL
+	help
+	  Say Y here if you want AFS data to be cached locally on through the
+	  generic filesystem cache manager
+
 config RXRPC
 	tristate
 
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
deleted file mode 100644
index 9eb7722..0000000
--- a/fs/afs/cache.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* cache.h: AFS local cache management interface
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells at redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_CACHE_H
-#define _LINUX_AFS_CACHE_H
-
-#undef AFS_CACHING_SUPPORT
-
-#include <linux/mm.h>
-#ifdef AFS_CACHING_SUPPORT
-#include <linux/cachefs.h>
-#endif
-#include "types.h"
-
-#ifdef __KERNEL__
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_AFS_CACHE_H */
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 009a9ae..93a0846 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -31,17 +31,21 @@ static DEFINE_RWLOCK(afs_cells_lock);
 static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
 static struct afs_cell *afs_cell_root;
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
-						const void *entry);
-static void afs_cell_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_cache_cell_index_def = {
-	.name			= "cell_ix",
-	.data_size		= sizeof(struct afs_cache_cell),
-	.keys[0]		= { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
-	.match			= afs_cell_cache_match,
-	.update			= afs_cell_cache_update,
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
+				       void *buffer, uint16_t buflen);
+static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
+				       void *buffer, uint16_t buflen);
+static fscache_checkaux_t afs_cell_cache_check_aux(void *cookie_netfs_data,
+						   const void *buffer,
+						   uint16_t buflen);
+
+static struct fscache_cookie_def afs_cell_cache_index_def = {
+	.name		= "AFS cell",
+	.type		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= afs_cell_cache_get_key,
+	.get_aux	= afs_cell_cache_get_aux,
+	.check_aux	= afs_cell_cache_check_aux,
 };
 #endif
 
@@ -115,12 +119,11 @@ int afs_cell_create(const char *name, ch
 	if (ret < 0)
 		goto error;
 
-#ifdef AFS_CACHING_SUPPORT
-	/* put it up for caching */
-	cachefs_acquire_cookie(afs_cache_netfs.primary_index,
-			       &afs_vlocation_cache_index_def,
-			       cell,
-			       &cell->cache);
+#ifdef CONFIG_AFS_FSCACHE
+	/* put it up for caching (this never returns an error) */
+	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
+					     &afs_cell_cache_index_def,
+					     cell);
 #endif
 
 	/* add to the cell lists */
@@ -345,8 +348,8 @@ static void afs_cell_destroy(struct afs_
 	list_del_init(&cell->proc_link);
 	up_write(&afs_proc_cells_sem);
 
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_relinquish_cookie(cell->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_relinquish_cookie(cell->cache, 0);
 #endif
 
 	up_write(&afs_cells_sem);
@@ -526,44 +529,62 @@ void afs_cell_purge(void)
 
 /*****************************************************************************/
 /*
- * match a cell record obtained from the cache
+ * set the key for the index entry
  */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
-						const void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
+				       void *buffer, uint16_t bufmax)
 {
-	const struct afs_cache_cell *ccell = entry;
-	struct afs_cell *cell = target;
+	const struct afs_cell *cell = cookie_netfs_data;
+	uint16_t klen;
 
-	_enter("{%s},{%s}", ccell->name, cell->name);
+	_enter("%p,%p,%u", cell, buffer, bufmax);
 
-	if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
-		_leave(" = SUCCESS");
-		return CACHEFS_MATCH_SUCCESS;
-	}
+	klen = strlen(cell->name);
+	if (klen > bufmax)
+		return 0;
+
+	memcpy(buffer, cell->name, klen);
+	return klen;
 
-	_leave(" = FAILED");
-	return CACHEFS_MATCH_FAILED;
-} /* end afs_cell_cache_match() */
+} /* end afs_cell_cache_get_key() */
 #endif
 
 /*****************************************************************************/
 /*
- * update a cell record in the cache
+ * provide new auxilliary cache data
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_cell_cache_update(void *source, void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
+				       void *buffer, uint16_t bufmax)
 {
-	struct afs_cache_cell *ccell = entry;
-	struct afs_cell *cell = source;
+	const struct afs_cell *cell = cookie_netfs_data;
+	uint16_t dlen;
 
-	_enter("%p,%p", source, entry);
+	_enter("%p,%p,%u", cell, buffer, bufmax);
 
-	strncpy(ccell->name, cell->name, sizeof(ccell->name));
+	dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]);
+	dlen = min(dlen, bufmax);
+	dlen &= ~(sizeof(cell->vl_addrs[0]) - 1);
 
-	memcpy(ccell->vl_servers,
-	       cell->vl_addrs,
-	       min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
+	memcpy(buffer, cell->vl_addrs, dlen);
+
+	return dlen;
+
+} /* end afs_cell_cache_get_aux() */
+#endif
+
+/*****************************************************************************/
+/*
+ * check that the auxilliary data indicates that the entry is still valid
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static fscache_checkaux_t afs_cell_cache_check_aux(void *cookie_netfs_data,
+						   const void *buffer,
+						   uint16_t buflen)
+{
+	_leave(" = OKAY");
+	return FSCACHE_CHECKAUX_OKAY;
 
-} /* end afs_cell_cache_update() */
+} /* end afs_cell_cache_check_aux() */
 #endif
diff --git a/fs/afs/cell.h b/fs/afs/cell.h
index 4834910..d670502 100644
--- a/fs/afs/cell.h
+++ b/fs/afs/cell.h
@@ -13,7 +13,7 @@
 #define _LINUX_AFS_CELL_H
 
 #include "types.h"
-#include "cache.h"
+#include <linux/fscache.h>
 
 #define AFS_CELL_MAX_ADDRS 15
 
@@ -21,16 +21,6 @@ extern volatile int afs_cells_being_purg
 
 /*****************************************************************************/
 /*
- * entry in the cached cell catalogue
- */
-struct afs_cache_cell
-{
-	char			name[64];	/* cell name (padded with NULs) */
-	struct in_addr		vl_servers[15];	/* cached cell VL servers */
-};
-
-/*****************************************************************************/
-/*
  * AFS cell record
  */
 struct afs_cell
@@ -39,8 +29,8 @@ struct afs_cell
 	struct list_head	link;		/* main cell list link */
 	struct list_head	proc_link;	/* /proc cell list link */
 	struct proc_dir_entry	*proc_dir;	/* /proc dir for this cell */
-#ifdef AFS_CACHING_SUPPORT
-	struct cachefs_cookie	*cache;		/* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+	struct fscache_cookie	*cache;		/* caching cookie */
 #endif
 
 	/* server record management */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 9eef6bf..699e74e 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -24,7 +24,7 @@
 #include "internal.h"
 
 static unsigned afscm_usage;		/* AFS cache manager usage count */
-static struct rw_semaphore afscm_sem;	/* AFS cache manager start/stop semaphore */
+static DECLARE_RWSEM(afscm_sem);	/* AFS cache manager start/stop semaphore */
 
 static int afscm_new_call(struct rxrpc_call *call);
 static void afscm_attention(struct rxrpc_call *call);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 5c61c24..9435565 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -145,7 +145,7 @@ static inline void afs_dir_check_page(st
 	qty /= sizeof(union afs_dir_block);
 
 	/* check them */
-	dbuf = page_address(page);
+	dbuf = kmap_atomic(page, KM_USER0);
 	for (tmp = 0; tmp < qty; tmp++) {
 		if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
 			printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
@@ -154,12 +154,12 @@ static inline void afs_dir_check_page(st
 			goto error;
 		}
 	}
+	kunmap_atomic(dbuf, KM_USER0);
 
-	SetPageChecked(page);
 	return;
 
  error:
-	SetPageChecked(page);
+	kunmap_atomic(dbuf, KM_USER0);
 	SetPageError(page);
 
 } /* end afs_dir_check_page() */
@@ -170,7 +170,6 @@ static inline void afs_dir_check_page(st
  */
 static inline void afs_dir_put_page(struct page *page)
 {
-	kunmap(page);
 	page_cache_release(page);
 
 } /* end afs_dir_put_page() */
@@ -190,11 +189,9 @@ static struct page *afs_dir_get_page(str
 			       NULL);
 	if (!IS_ERR(page)) {
 		wait_on_page_locked(page);
-		kmap(page);
 		if (!PageUptodate(page))
 			goto fail;
-		if (!PageChecked(page))
-			afs_dir_check_page(dir, page);
+		afs_dir_check_page(dir, page);
 		if (PageError(page))
 			goto fail;
 	}
@@ -359,7 +356,7 @@ static int afs_dir_iterate(struct inode 
 
 		limit = blkoff & ~(PAGE_SIZE - 1);
 
-		dbuf = page_address(page);
+		dbuf = kmap_atomic(page, KM_USER0);
 
 		/* deal with the individual blocks stashed on this page */
 		do {
@@ -368,6 +365,7 @@ static int afs_dir_iterate(struct inode 
 			ret = afs_dir_iterate_block(fpos, dblock, blkoff,
 						    cookie, filldir);
 			if (ret != 1) {
+				kunmap_atomic(dbuf, KM_USER0);
 				afs_dir_put_page(page);
 				goto out;
 			}
@@ -376,6 +374,7 @@ static int afs_dir_iterate(struct inode 
 
 		} while (*fpos < dir->i_size && blkoff < limit);
 
+		kunmap_atomic(dbuf, KM_USER0);
 		afs_dir_put_page(page);
 		ret = 0;
 	}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 150b192..dbac862 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -16,12 +16,15 @@
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
+#include <linux/pagevec.h>
 #include <linux/buffer_head.h>
 #include "volume.h"
 #include "vnode.h"
 #include <rxrpc/call.h>
 #include "internal.h"
 
+#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
+
 #if 0
 static int afs_file_open(struct inode *inode, struct file *file);
 static int afs_file_release(struct inode *inode, struct file *file);
@@ -30,30 +33,68 @@ static int afs_file_release(struct inode
 static int afs_file_readpage(struct file *file, struct page *page);
 static int afs_file_invalidatepage(struct page *page, unsigned long offset);
 static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
+static int afs_file_mmap(struct file * file, struct vm_area_struct * vma);
+
+#ifdef CONFIG_AFS_FSCACHE
+static int afs_file_readpages(struct file *filp, struct address_space *mapping,
+			      struct list_head *pages, unsigned nr_pages);
+static int afs_file_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+#endif
 
 struct inode_operations afs_file_inode_operations = {
 	.getattr	= afs_inode_getattr,
 };
 
+struct file_operations afs_file_file_operations = {
+	.read		= generic_file_read,
+	.mmap		= afs_file_mmap,
+};
+
 struct address_space_operations afs_fs_aops = {
 	.readpage	= afs_file_readpage,
+#ifdef CONFIG_AFS_FSCACHE
+	.readpages	= afs_file_readpages,
+#endif
 	.sync_page	= block_sync_page,
 	.set_page_dirty	= __set_page_dirty_nobuffers,
 	.releasepage	= afs_file_releasepage,
 	.invalidatepage	= afs_file_invalidatepage,
 };
 
+static struct vm_operations_struct afs_fs_vm_operations = {
+	.nopage		= filemap_nopage,
+	.populate	= filemap_populate,
+#ifdef CONFIG_AFS_FSCACHE
+	.page_mkwrite	= afs_file_page_mkwrite,
+#endif
+};
+
+/*****************************************************************************/
+/*
+ * set up a memory mapping on an AFS file
+ * - we set our own VMA ops so that we can catch the page becoming writable for
+ *   userspace for shared-writable mmap
+ */
+static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	_enter("");
+
+	file_accessed(file);
+	vma->vm_ops = &afs_fs_vm_operations;
+	return 0;
+
+} /* end afs_file_mmap() */
+
 /*****************************************************************************/
 /*
  * deal with notification that a page was read from the cache
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_file_readpage_read_complete(void *cookie_data,
-					    struct page *page,
+#ifdef CONFIG_AFS_FSCACHE
+static void afs_file_readpage_read_complete(struct page *page,
 					    void *data,
 					    int error)
 {
-	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
+	_enter("%p,%p,%d", page, data, error);
 
 	if (error)
 		SetPageError(page);
@@ -68,15 +109,16 @@ static void afs_file_readpage_read_compl
 /*
  * deal with notification that a page was written to the cache
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_file_readpage_write_complete(void *cookie_data,
-					     struct page *page,
+#ifdef CONFIG_AFS_FSCACHE
+static void afs_file_readpage_write_complete(struct page *page,
 					     void *data,
 					     int error)
 {
-	_enter("%p,%p,%p,%d", cookie_data, page, data, error);
+	_enter("%p,%p,%d", page, data, error);
 
-	unlock_page(page);
+	/* note that the page has been written to the cache and can now be
+	 * modified */
+	end_page_fs_misc(page);
 
 } /* end afs_file_readpage_write_complete() */
 #endif
@@ -88,16 +130,13 @@ static void afs_file_readpage_write_comp
 static int afs_file_readpage(struct file *file, struct page *page)
 {
 	struct afs_rxfs_fetch_descriptor desc;
-#ifdef AFS_CACHING_SUPPORT
-	struct cachefs_page *pageio;
-#endif
 	struct afs_vnode *vnode;
 	struct inode *inode;
 	int ret;
 
 	inode = page->mapping->host;
 
-	_enter("{%lu},{%lu}", inode->i_ino, page->index);
+	_enter("{%lu},%p{%lu}", inode->i_ino, page, page->index);
 
 	vnode = AFS_FS_I(inode);
 
@@ -107,13 +146,9 @@ static int afs_file_readpage(struct file
 	if (vnode->flags & AFS_VNODE_DELETED)
 		goto error;
 
-#ifdef AFS_CACHING_SUPPORT
-	ret = cachefs_page_get_private(page, &pageio, GFP_NOIO);
-	if (ret < 0)
-		goto error;
-
+#ifdef CONFIG_AFS_FSCACHE
 	/* is it cached? */
-	ret = cachefs_read_or_alloc_page(vnode->cache,
+	ret = fscache_read_or_alloc_page(vnode->cache,
 					 page,
 					 afs_file_readpage_read_complete,
 					 NULL,
@@ -123,18 +158,20 @@ static int afs_file_readpage(struct file
 #endif
 
 	switch (ret) {
-		/* read BIO submitted and wb-journal entry found */
-	case 1:
-		BUG(); // TODO - handle wb-journal match
-
 		/* read BIO submitted (page in cache) */
 	case 0:
 		break;
 
-		/* no page available in cache */
-	case -ENOBUFS:
+		/* page not yet cached */
 	case -ENODATA:
+		_debug("cache said ENODATA");
+		goto go_on;
+
+		/* page will not be cached */
+	case -ENOBUFS:
+		_debug("cache said ENOBUFS");
 	default:
+	go_on:
 		desc.fid	= vnode->fid;
 		desc.offset	= page->index << PAGE_CACHE_SHIFT;
 		desc.size	= min((size_t) (inode->i_size - desc.offset),
@@ -148,34 +185,40 @@ static int afs_file_readpage(struct file
 		ret = afs_vnode_fetch_data(vnode, &desc);
 		kunmap(page);
 		if (ret < 0) {
-			if (ret==-ENOENT) {
-				_debug("got NOENT from server"
+			if (ret == -ENOENT) {
+				kdebug("got NOENT from server"
 				       " - marking file deleted and stale");
 				vnode->flags |= AFS_VNODE_DELETED;
 				ret = -ESTALE;
 			}
 
-#ifdef AFS_CACHING_SUPPORT
-			cachefs_uncache_page(vnode->cache, page);
+#ifdef CONFIG_AFS_FSCACHE
+			fscache_uncache_page(vnode->cache, page);
+			ClearPagePrivate(page);
 #endif
 			goto error;
 		}
 
 		SetPageUptodate(page);
 
-#ifdef AFS_CACHING_SUPPORT
-		if (cachefs_write_page(vnode->cache,
-				       page,
-				       afs_file_readpage_write_complete,
-				       NULL,
-				       GFP_KERNEL) != 0
-		    ) {
-			cachefs_uncache_page(vnode->cache, page);
-			unlock_page(page);
+		/* send the page to the cache */
+#ifdef CONFIG_AFS_FSCACHE
+		if (PagePrivate(page)) {
+			if (TestSetPageFsMisc(page))
+				BUG();
+			if (fscache_write_page(vnode->cache,
+					       page,
+					       afs_file_readpage_write_complete,
+					       NULL,
+					       GFP_KERNEL) != 0
+			    ) {
+				fscache_uncache_page(vnode->cache, page);
+				ClearPagePrivate(page);
+				end_page_fs_misc(page);
+			}
 		}
-#else
-		unlock_page(page);
 #endif
+		unlock_page(page);
 	}
 
 	_leave(" = 0");
@@ -192,20 +235,63 @@ static int afs_file_readpage(struct file
 
 /*****************************************************************************/
 /*
- * get a page cookie for the specified page
+ * read a set of pages
  */
-#ifdef AFS_CACHING_SUPPORT
-int afs_cache_get_page_cookie(struct page *page,
-			      struct cachefs_page **_page_cookie)
+#ifdef CONFIG_AFS_FSCACHE
+static int afs_file_readpages(struct file *filp, struct address_space *mapping,
+			      struct list_head *pages, unsigned nr_pages)
 {
-	int ret;
+	struct afs_vnode *vnode;
+#if 0
+	struct pagevec lru_pvec;
+	unsigned page_idx;
+#endif
+	int ret = 0;
 
-	_enter("");
-	ret = cachefs_page_get_private(page,_page_cookie, GFP_NOIO);
+	_enter(",{%lu},,%d", mapping->host->i_ino, nr_pages);
 
-	_leave(" = %d", ret);
+	vnode = AFS_FS_I(mapping->host);
+	if (vnode->flags & AFS_VNODE_DELETED) {
+		_leave(" = -ESTALE");
+		return -ESTALE;
+	}
+
+	/* attempt to read as many of the pages as possible */
+	ret = fscache_read_or_alloc_pages(vnode->cache,
+					  mapping,
+					  pages,
+					  &nr_pages,
+					  afs_file_readpage_read_complete,
+					  NULL,
+					  mapping_gfp_mask(mapping));
+
+	switch (ret) {
+		/* all pages are being read from the cache */
+	case 0:
+		BUG_ON(!list_empty(pages));
+		BUG_ON(nr_pages != 0);
+		_leave(" = 0 [reading all]");
+		return 0;
+
+		/* there were pages that couldn't be read from the cache */
+	case -ENODATA:
+	case -ENOBUFS:
+		break;
+
+		/* other error */
+	default:
+		_leave(" = %d", ret);
+		return ret;
+	}
+
+	/* load the missing pages from the network */
+	ret = read_cache_pages(mapping, pages,
+			       (void *) afs_file_readpage, NULL);
+
+	_leave(" = %d [netting]", ret);
 	return ret;
-} /* end afs_cache_get_page_cookie() */
+
+} /* end afs_file_readpages() */
 #endif
 
 /*****************************************************************************/
@@ -221,19 +307,12 @@ static int afs_file_invalidatepage(struc
 	BUG_ON(!PageLocked(page));
 
 	if (PagePrivate(page)) {
-#ifdef AFS_CACHING_SUPPORT
-		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
-		cachefs_uncache_page(vnode->cache,page);
-#endif
-
 		/* We release buffers only if the entire page is being
 		 * invalidated.
 		 * The get_block cached value has been unconditionally
 		 * invalidated, so real IO is not possible anymore.
 		 */
 		if (offset == 0) {
-			BUG_ON(!PageLocked(page));
-
 			ret = 0;
 			if (!PageWriteback(page))
 				ret = page->mapping->a_ops->releasepage(page,
@@ -243,6 +322,7 @@ static int afs_file_invalidatepage(struc
 
 	_leave(" = %d", ret);
 	return ret;
+
 } /* end afs_file_invalidatepage() */
 
 /*****************************************************************************/
@@ -251,23 +331,29 @@ static int afs_file_invalidatepage(struc
  */
 static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
 {
-	struct cachefs_page *pageio;
-
 	_enter("{%lu},%x", page->index, gfp_flags);
 
-	if (PagePrivate(page)) {
-#ifdef AFS_CACHING_SUPPORT
-		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
-		cachefs_uncache_page(vnode->cache, page);
+#ifdef CONFIG_AFS_FSCACHE
+	wait_on_page_fs_misc(page);
+	fscache_uncache_page(AFS_FS_I(page->mapping->host)->cache, page);
+	ClearPagePrivate(page);
 #endif
 
-		pageio = (struct cachefs_page *) page_private(page);
-		set_page_private(page, 0);
-		ClearPagePrivate(page);
-
-		kfree(pageio);
-	}
-
 	_leave(" = 0");
 	return 0;
+
 } /* end afs_file_releasepage() */
+
+/*****************************************************************************/
+/*
+ * wait for the disc cache to finish writing before permitting modification of
+ * our page in the page cache
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static int afs_file_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+	wait_on_page_fs_misc(page);
+	return 0;
+
+} /* end afs_file_page_mkwrite() */
+#endif
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 61bc371..c88c41a 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -398,6 +398,8 @@ int afs_rxfs_fetch_file_status(struct af
 		bp++; /* spare6 */
 	}
 
+	_debug("Data Version %llx\n", vnode->status.version);
+
 	/* success */
 	ret = 0;
 
@@ -408,7 +410,7 @@ int afs_rxfs_fetch_file_status(struct af
  out_put_conn:
 	afs_server_release_callslot(server, &callslot);
  out:
-	_leave("");
+	_leave(" = %d", ret);
 	return ret;
 
  abort:
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4ebb30a..d188380 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -65,6 +65,11 @@ static int afs_inode_map_status(struct a
 		return -EBADMSG;
 	}
 
+#ifdef CONFIG_AFS_FSCACHE
+	if (vnode->status.size != inode->i_size)
+		fscache_set_i_size(vnode->cache, vnode->status.size);
+#endif
+
 	inode->i_nlink		= vnode->status.nlink;
 	inode->i_uid		= vnode->status.owner;
 	inode->i_gid		= 0;
@@ -101,13 +106,33 @@ static int afs_inode_fetch_status(struct
 	struct afs_vnode *vnode;
 	int ret;
 
+	_enter("");
+
 	vnode = AFS_FS_I(inode);
 
 	ret = afs_vnode_fetch_status(vnode);
 
-	if (ret == 0)
+	if (ret == 0) {
+#ifdef CONFIG_AFS_FSCACHE
+		if (vnode->cache == FSCACHE_NEGATIVE_COOKIE) {
+			vnode->cache =
+				fscache_acquire_cookie(vnode->volume->cache,
+						       &afs_vnode_cache_index_def,
+						       vnode);
+			if (!vnode->cache)
+				printk("Negative\n");
+		}
+#endif
 		ret = afs_inode_map_status(vnode);
+#ifdef CONFIG_AFS_FSCACHE
+		if (ret < 0) {
+			fscache_relinquish_cookie(vnode->cache, 0);
+			vnode->cache = FSCACHE_NEGATIVE_COOKIE;
+		}
+#endif
+	}
 
+	_leave(" = %d", ret);
 	return ret;
 
 } /* end afs_inode_fetch_status() */
@@ -122,6 +147,7 @@ static int afs_iget5_test(struct inode *
 
 	return inode->i_ino == data->fid.vnode &&
 		inode->i_version == data->fid.unique;
+
 } /* end afs_iget5_test() */
 
 /*****************************************************************************/
@@ -179,20 +205,11 @@ inline int afs_iget(struct super_block *
 		return ret;
 	}
 
-#ifdef AFS_CACHING_SUPPORT
-	/* set up caching before reading the status, as fetch-status reads the
-	 * first page of symlinks to see if they're really mntpts */
-	cachefs_acquire_cookie(vnode->volume->cache,
-			       NULL,
-			       vnode,
-			       &vnode->cache);
-#endif
-
 	/* okay... it's a new inode */
 	inode->i_flags |= S_NOATIME;
 	vnode->flags |= AFS_VNODE_CHANGED;
 	ret = afs_inode_fetch_status(inode);
-	if (ret<0)
+	if (ret < 0)
 		goto bad_inode;
 
 	/* success */
@@ -278,8 +295,8 @@ void afs_clear_inode(struct inode *inode
 
 	afs_vnode_give_up_callback(vnode);
 
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_relinquish_cookie(vnode->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_relinquish_cookie(vnode->cache, 0);
 	vnode->cache = NULL;
 #endif
 
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index ab8f87c..b98335a 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -16,15 +16,17 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
+#include <linux/fscache.h>
 
 /*
  * debug tracing
  */
-#define kenter(FMT, a...)	printk("==> %s("FMT")\n",__FUNCTION__ , ## a)
-#define kleave(FMT, a...)	printk("<== %s()"FMT"\n",__FUNCTION__ , ## a)
-#define kdebug(FMT, a...)	printk(FMT"\n" , ## a)
-#define kproto(FMT, a...)	printk("### "FMT"\n" , ## a)
-#define knet(FMT, a...)		printk(FMT"\n" , ## a)
+#define __kdbg(FMT, a...)	printk("[%05d] "FMT"\n", current->pid , ## a)
+#define kenter(FMT, a...)	__kdbg("==> %s("FMT")", __FUNCTION__ , ## a)
+#define kleave(FMT, a...)	__kdbg("<== %s()"FMT, __FUNCTION__ , ## a)
+#define kdebug(FMT, a...)	__kdbg(FMT , ## a)
+#define kproto(FMT, a...)	__kdbg("### "FMT , ## a)
+#define knet(FMT, a...)		__kdbg(FMT , ## a)
 
 #ifdef __KDEBUG
 #define _enter(FMT, a...)	kenter(FMT , ## a)
@@ -56,9 +58,6 @@ static inline void afs_discard_my_signal
  */
 extern struct rw_semaphore afs_proc_cells_sem;
 extern struct list_head afs_proc_cells;
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_cache_cell_index_def;
-#endif
 
 /*
  * dir.c
@@ -72,11 +71,6 @@ extern struct file_operations afs_dir_fi
 extern struct address_space_operations afs_fs_aops;
 extern struct inode_operations afs_file_inode_operations;
 
-#ifdef AFS_CACHING_SUPPORT
-extern int afs_cache_get_page_cookie(struct page *page,
-				     struct cachefs_page **_page_cookie);
-#endif
-
 /*
  * inode.c
  */
@@ -97,8 +91,8 @@ extern void afs_key_unregister(void);
 /*
  * main.c
  */
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_netfs afs_cache_netfs;
+#ifdef CONFIG_AFS_FSCACHE
+extern struct fscache_netfs afs_cache_netfs;
 #endif
 
 /*
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 913c689..5840bb2 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -1,6 +1,6 @@
 /* main.c: AFS client file system
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002,5 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells at redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -14,11 +14,11 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/fscache.h>
 #include <rxrpc/rxrpc.h>
 #include <rxrpc/transport.h>
 #include <rxrpc/call.h>
 #include <rxrpc/peer.h>
-#include "cache.h"
 #include "cell.h"
 #include "server.h"
 #include "fsclient.h"
@@ -51,12 +51,11 @@ static struct rxrpc_peer_ops afs_peer_op
 struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
 DEFINE_SPINLOCK(afs_cb_hash_lock);
 
-#ifdef AFS_CACHING_SUPPORT
-static struct cachefs_netfs_operations afs_cache_ops = {
-	.get_page_cookie	= afs_cache_get_page_cookie,
+#ifdef CONFIG_AFS_FSCACHE
+static struct fscache_netfs_operations afs_cache_ops = {
 };
 
-struct cachefs_netfs afs_cache_netfs = {
+struct fscache_netfs afs_cache_netfs = {
 	.name			= "afs",
 	.version		= 0,
 	.ops			= &afs_cache_ops,
@@ -83,10 +82,9 @@ static int __init afs_init(void)
 	if (ret < 0)
 		return ret;
 
-#ifdef AFS_CACHING_SUPPORT
+#ifdef CONFIG_AFS_FSCACHE
 	/* we want to be able to cache */
-	ret = cachefs_register_netfs(&afs_cache_netfs,
-				     &afs_cache_cell_index_def);
+	ret = fscache_register_netfs(&afs_cache_netfs);
 	if (ret < 0)
 		goto error;
 #endif
@@ -137,8 +135,8 @@ static int __init afs_init(void)
 	afs_key_unregister();
  error_cache:
 #endif
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_unregister_netfs(&afs_cache_netfs);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_unregister_netfs(&afs_cache_netfs);
  error:
 #endif
 	afs_cell_purge();
@@ -167,8 +165,8 @@ static void __exit afs_exit(void)
 #ifdef CONFIG_KEYS_TURNED_OFF
 	afs_key_unregister();
 #endif
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_unregister_netfs(&afs_cache_netfs);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_unregister_netfs(&afs_cache_netfs);
 #endif
 	afs_proc_cleanup();
 
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 31ee065..de959ed 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -82,7 +82,7 @@ int afs_mntpt_check_symlink(struct afs_v
 
 	ret = -EIO;
 	wait_on_page_locked(page);
-	buf = kmap(page);
+	buf = kmap_atomic(page, KM_USER0);
 	if (!PageUptodate(page))
 		goto out_free;
 	if (PageError(page))
@@ -105,7 +105,7 @@ int afs_mntpt_check_symlink(struct afs_v
 	ret = 0;
 
  out_free:
-	kunmap(page);
+	kunmap_atomic(buf, KM_USER0);
 	page_cache_release(page);
  out:
 	_leave(" = %d", ret);
@@ -195,9 +195,9 @@ static struct vfsmount *afs_mntpt_do_aut
 	if (!PageUptodate(page) || PageError(page))
 		goto error;
 
-	buf = kmap(page);
+	buf = kmap_atomic(page, KM_USER0);
 	memcpy(devname, buf, size);
-	kunmap(page);
+	kunmap_atomic(buf, KM_USER0);
 	page_cache_release(page);
 	page = NULL;
 
@@ -276,12 +276,12 @@ static void *afs_mntpt_follow_link(struc
  */
 static void afs_mntpt_expiry_timed_out(struct afs_timer *timer)
 {
-	kenter("");
+//	kenter("");
 
 	mark_mounts_for_expiry(&afs_vfsmounts);
 
 	afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
 				afs_mntpt_expiry_timeout * HZ);
 
-	kleave("");
+//	kleave("");
 } /* end afs_mntpt_expiry_timed_out() */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 9c81b8f..7f61853 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -177,6 +177,7 @@ int afs_proc_init(void)
  */
 void afs_proc_cleanup(void)
 {
+	remove_proc_entry("rootcell", proc_afs);
 	remove_proc_entry("cells", proc_afs);
 
 	remove_proc_entry("fs/afs", NULL);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 62b093a..7103e10 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -377,7 +377,6 @@ int afs_server_request_callslot(struct a
 	else if (list_empty(&server->fs_callq)) {
 		/* no one waiting */
 		server->fs_conn_cnt[nconn]++;
-		spin_unlock(&server->fs_lock);
 	}
 	else {
 		/* someone's waiting - dequeue them and wake them up */
@@ -395,9 +394,9 @@ int afs_server_request_callslot(struct a
 		}
 		pcallslot->ready = 1;
 		wake_up_process(pcallslot->task);
-		spin_unlock(&server->fs_lock);
 	}
 
+	spin_unlock(&server->fs_lock);
 	rxrpc_put_connection(callslot->conn);
 	callslot->conn = NULL;
 
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index eced206..cfab969 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -59,17 +59,21 @@ static LIST_HEAD(afs_vlocation_update_pe
 static struct afs_vlocation *afs_vlocation_update;	/* VL currently being updated */
 static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
-						     const void *entry);
-static void afs_vlocation_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vlocation_cache_index_def = {
-	.name		= "vldb",
-	.data_size	= sizeof(struct afs_cache_vlocation),
-	.keys[0]	= { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
-	.match		= afs_vlocation_cache_match,
-	.update		= afs_vlocation_cache_update,
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
+					    void *buffer, uint16_t buflen);
+static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
+					    void *buffer, uint16_t buflen);
+static fscache_checkaux_t afs_vlocation_cache_check_aux(void *cookie_netfs_data,
+							const void *buffer,
+							uint16_t buflen);
+
+static struct fscache_cookie_def afs_vlocation_cache_index_def = {
+	.name		= "AFS.vldb",
+	.type		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= afs_vlocation_cache_get_key,
+	.get_aux	= afs_vlocation_cache_get_aux,
+	.check_aux	= afs_vlocation_cache_check_aux,
 };
 #endif
 
@@ -300,13 +304,12 @@ int afs_vlocation_lookup(struct afs_cell
 
 	list_add_tail(&vlocation->link, &cell->vl_list);
 
-#ifdef AFS_CACHING_SUPPORT
+#ifdef CONFIG_AFS_FSCACHE
 	/* we want to store it in the cache, plus it might already be
 	 * encached */
-	cachefs_acquire_cookie(cell->cache,
-			       &afs_volume_cache_index_def,
-			       vlocation,
-			       &vlocation->cache);
+	vlocation->cache = fscache_acquire_cookie(cell->cache,
+						  &afs_vlocation_cache_index_def,
+						  vlocation);
 
 	if (vlocation->valid)
 		goto found_in_cache;
@@ -341,7 +344,7 @@ int afs_vlocation_lookup(struct afs_cell
  active:
 	active = 1;
 
-#ifdef AFS_CACHING_SUPPORT
+#ifdef CONFIG_AFS_FSCACHE
  found_in_cache:
 #endif
 	/* try to look up a cached volume in the cell VL databases by ID */
@@ -423,9 +426,9 @@ int afs_vlocation_lookup(struct afs_cell
 
 	afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ);
 
-#ifdef AFS_CACHING_SUPPORT
+#ifdef CONFIG_AFS_FSCACHE
 	/* update volume entry in local cache */
-	cachefs_update_cookie(vlocation->cache);
+	fscache_update_cookie(vlocation->cache);
 #endif
 
 	*_vlocation = vlocation;
@@ -439,8 +442,8 @@ int afs_vlocation_lookup(struct afs_cell
 		}
 		else {
 			list_del(&vlocation->link);
-#ifdef AFS_CACHING_SUPPORT
-			cachefs_relinquish_cookie(vlocation->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+			fscache_relinquish_cookie(vlocation->cache, 0);
 #endif
 			afs_put_cell(vlocation->cell);
 			kfree(vlocation);
@@ -538,8 +541,8 @@ void afs_vlocation_do_timeout(struct afs
 	}
 
 	/* we can now destroy it properly */
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_relinquish_cookie(vlocation->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_relinquish_cookie(vlocation->cache, 0);
 #endif
 	afs_put_cell(cell);
 
@@ -890,65 +893,103 @@ static void afs_vlocation_update_discard
 
 /*****************************************************************************/
 /*
- * match a VLDB record stored in the cache
- * - may also load target from entry
+ * set the key for the index entry
  */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
-						     const void *entry)
-{
-	const struct afs_cache_vlocation *vldb = entry;
-	struct afs_vlocation *vlocation = target;
-
-	_enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
-
-	if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
-	    ) {
-		if (!vlocation->valid ||
-		    vlocation->vldb.rtime == vldb->rtime
-		    ) {
-			vlocation->vldb = *vldb;
-			vlocation->valid = 1;
-			_leave(" = SUCCESS [c->m]");
-			return CACHEFS_MATCH_SUCCESS;
-		}
-		/* need to update cache if cached info differs */
-		else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
-			/* delete if VIDs for this name differ */
-			if (memcmp(&vlocation->vldb.vid,
-				   &vldb->vid,
-				   sizeof(vldb->vid)) != 0) {
-				_leave(" = DELETE");
-				return CACHEFS_MATCH_SUCCESS_DELETE;
-			}
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
+					    void *buffer, uint16_t bufmax)
+{
+	const struct afs_vlocation *vlocation = cookie_netfs_data;
+	uint16_t klen;
 
-			_leave(" = UPDATE");
-			return CACHEFS_MATCH_SUCCESS_UPDATE;
-		}
-		else {
-			_leave(" = SUCCESS");
-			return CACHEFS_MATCH_SUCCESS;
-		}
-	}
+	_enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
+
+	klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name));
+	if (klen > bufmax)
+		return 0;
+
+	memcpy(buffer, vlocation->vldb.name, klen);
+
+	_leave(" = %u", klen);
+	return klen;
 
-	_leave(" = FAILED");
-	return CACHEFS_MATCH_FAILED;
-} /* end afs_vlocation_cache_match() */
+} /* end afs_vlocation_cache_get_key() */
 #endif
 
 /*****************************************************************************/
 /*
- * update a VLDB record stored in the cache
+ * provide new auxilliary cache data
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vlocation_cache_update(void *source, void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
+					    void *buffer, uint16_t bufmax)
 {
-	struct afs_cache_vlocation *vldb = entry;
-	struct afs_vlocation *vlocation = source;
+	const struct afs_vlocation *vlocation = cookie_netfs_data;
+	uint16_t dlen;
+
+	_enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
+
+	dlen = sizeof(struct afs_cache_vlocation);
+	dlen -= offsetof(struct afs_cache_vlocation, nservers);
+	if (dlen > bufmax)
+		return 0;
+
+	memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen);
 
-	_enter("");
+	_leave(" = %u", dlen);
+	return dlen;
+
+} /* end afs_vlocation_cache_get_aux() */
+#endif
+
+/*****************************************************************************/
+/*
+ * check that the auxilliary data indicates that the entry is still valid
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static fscache_checkaux_t afs_vlocation_cache_check_aux(void *cookie_netfs_data,
+							const void *buffer,
+							uint16_t buflen)
+{
+	const struct afs_cache_vlocation *cvldb;
+	struct afs_vlocation *vlocation = cookie_netfs_data;
+	uint16_t dlen;
+
+	_enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen);
+
+	/* check the size of the data is what we're expecting */
+	dlen = sizeof(struct afs_cache_vlocation);
+	dlen -= offsetof(struct afs_cache_vlocation, nservers);
+	if (dlen != buflen)
+		return FSCACHE_CHECKAUX_OBSOLETE;
+
+	cvldb = container_of(buffer, struct afs_cache_vlocation, nservers);
+
+	/* if what's on disk is more valid than what's in memory, then use the
+	 * VL record from the cache */
+	if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) {
+		memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen);
+		vlocation->valid = 1;
+		_leave(" = SUCCESS [c->m]");
+		return FSCACHE_CHECKAUX_OKAY;
+	}
+
+	/* need to update the cache if the cached info differs */
+	if (memcmp(&vlocation->vldb, buffer, dlen) != 0) {
+		/* delete if the volume IDs for this name differ */
+		if (memcmp(&vlocation->vldb.vid, &cvldb->vid,
+			   sizeof(cvldb->vid)) != 0
+		    ) {
+			_leave(" = OBSOLETE");
+			return FSCACHE_CHECKAUX_OBSOLETE;
+		}
+
+		_leave(" = UPDATE");
+		return FSCACHE_CHECKAUX_NEEDS_UPDATE;
+	}
 
-	*vldb = vlocation->vldb;
+	_leave(" = OKAY");
+	return FSCACHE_CHECKAUX_OKAY;
 
-} /* end afs_vlocation_cache_update() */
+} /* end afs_vlocation_cache_check_aux() */
 #endif
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
index 9867fef..7116128 100644
--- a/fs/afs/vnode.c
+++ b/fs/afs/vnode.c
@@ -29,17 +29,30 @@ struct afs_timer_ops afs_vnode_cb_timed_
 	.timed_out	= afs_vnode_cb_timed_out,
 };
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
-						 const void *entry);
-static void afs_vnode_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vnode_cache_index_def = {
-	.name		= "vnode",
-	.data_size	= sizeof(struct afs_cache_vnode),
-	.keys[0]	= { CACHEFS_INDEX_KEYS_BIN, 4 },
-	.match		= afs_vnode_cache_match,
-	.update		= afs_vnode_cache_update,
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
+					void *buffer, uint16_t buflen);
+static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
+				     uint64_t *size);
+static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
+					void *buffer, uint16_t buflen);
+static fscache_checkaux_t afs_vnode_cache_check_aux(void *cookie_netfs_data,
+						    const void *buffer,
+						    uint16_t buflen);
+static void afs_vnode_cache_mark_pages_cached(void *cookie_netfs_data,
+					      struct address_space *mapping,
+					      struct pagevec *cached_pvec);
+static void afs_vnode_cache_now_uncached(void *cookie_netfs_data);
+
+struct fscache_cookie_def afs_vnode_cache_index_def = {
+	.name			= "AFS.vnode",
+	.type			= FSCACHE_COOKIE_TYPE_DATAFILE,
+	.get_key		= afs_vnode_cache_get_key,
+	.get_attr		= afs_vnode_cache_get_attr,
+	.get_aux		= afs_vnode_cache_get_aux,
+	.check_aux		= afs_vnode_cache_check_aux,
+	.mark_pages_cached	= afs_vnode_cache_mark_pages_cached,
+	.now_uncached		= afs_vnode_cache_now_uncached,
 };
 #endif
 
@@ -189,6 +202,8 @@ int afs_vnode_fetch_status(struct afs_vn
 
 	if (vnode->update_cnt > 0) {
 		/* someone else started a fetch */
+		_debug("conflict");
+
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		add_wait_queue(&vnode->update_waitq, &myself);
 
@@ -220,6 +235,7 @@ int afs_vnode_fetch_status(struct afs_vn
 		spin_unlock(&vnode->lock);
 		set_current_state(TASK_RUNNING);
 
+		_leave(" [conflicted, %d", !!(vnode->flags & AFS_VNODE_DELETED));
 		return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
 	}
 
@@ -342,54 +358,197 @@ int afs_vnode_give_up_callback(struct af
 
 /*****************************************************************************/
 /*
- * match a vnode record stored in the cache
+ * set the key for the index entry
  */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
-						 const void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
+					void *buffer, uint16_t bufmax)
 {
-	const struct afs_cache_vnode *cvnode = entry;
-	struct afs_vnode *vnode = target;
+	const struct afs_vnode *vnode = cookie_netfs_data;
+	uint16_t klen;
 
-	_enter("{%x,%x,%Lx},{%x,%x,%Lx}",
-	       vnode->fid.vnode,
-	       vnode->fid.unique,
-	       vnode->status.version,
-	       cvnode->vnode_id,
-	       cvnode->vnode_unique,
-	       cvnode->data_version);
-
-	if (vnode->fid.vnode != cvnode->vnode_id) {
-		_leave(" = FAILED");
-		return CACHEFS_MATCH_FAILED;
-	}
+	_enter("{%x,%x,%Lx},%p,%u",
+	       vnode->fid.vnode, vnode->fid.unique, vnode->status.version,
+	       buffer, bufmax);
+
+	klen = sizeof(vnode->fid.vnode);
+	if (klen > bufmax)
+		return 0;
+
+	memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode));
+
+	_leave(" = %u", klen);
+	return klen;
+
+} /* end afs_vnode_cache_get_key() */
+#endif
+
+/*****************************************************************************/
+/*
+ * provide an updated file attributes
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
+				     uint64_t *size)
+{
+	const struct afs_vnode *vnode = cookie_netfs_data;
+
+	_enter("{%x,%x,%Lx},",
+	       vnode->fid.vnode, vnode->fid.unique, vnode->status.version);
+
+	*size = i_size_read((struct inode *) &vnode->vfs_inode);
+
+} /* end afs_vnode_cache_get_attr() */
+#endif
+
+/*****************************************************************************/
+/*
+ * provide new auxilliary cache data
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
+					void *buffer, uint16_t bufmax)
+{
+	const struct afs_vnode *vnode = cookie_netfs_data;
+	uint16_t dlen;
+
+	_enter("{%x,%x,%Lx},%p,%u",
+	       vnode->fid.vnode, vnode->fid.unique, vnode->status.version,
+	       buffer, bufmax);
 
-	if (vnode->fid.unique != cvnode->vnode_unique ||
-	    vnode->status.version != cvnode->data_version) {
-		_leave(" = DELETE");
-		return CACHEFS_MATCH_SUCCESS_DELETE;
+	dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.version);
+	if (dlen > bufmax)
+		return 0;
+
+	memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique));
+	buffer += sizeof(vnode->fid.unique);
+	memcpy(buffer, &vnode->status.version, sizeof(vnode->status.version));
+
+	_leave(" = %u", dlen);
+	return dlen;
+
+} /* end afs_vnode_cache_get_aux() */
+#endif
+
+/*****************************************************************************/
+/*
+ * check that the auxilliary data indicates that the entry is still valid
+ */
+#ifdef CONFIG_AFS_FSCACHE
+static fscache_checkaux_t afs_vnode_cache_check_aux(void *cookie_netfs_data,
+						    const void *buffer,
+						    uint16_t buflen)
+{
+	struct afs_vnode *vnode = cookie_netfs_data;
+	uint16_t dlen;
+
+	_enter("{%x,%x,%Lx},%p,%u",
+	       vnode->fid.vnode, vnode->fid.unique, vnode->status.version,
+	       buffer, buflen);
+
+	/* check the size of the data is what we're expecting */
+	dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.version);
+	if (dlen != buflen) {
+		_leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen);
+		return FSCACHE_CHECKAUX_OBSOLETE;
+	}
+
+	if (memcmp(buffer,
+		   &vnode->fid.unique,
+		   sizeof(vnode->fid.unique)
+		   ) != 0
+	    ) {
+		unsigned unique;
+
+		memcpy(&unique, buffer, sizeof(unique));
+
+		_leave(" = OBSOLETE [uniq %x != %x]",
+		       unique, vnode->fid.unique);
+		return FSCACHE_CHECKAUX_OBSOLETE;
+	}
+
+	if (memcmp(buffer + sizeof(vnode->fid.unique),
+		   &vnode->status.version,
+		   sizeof(vnode->status.version)
+		   ) != 0
+	    ) {
+		afs_dataversion_t version;
+
+		memcpy(&version, buffer + sizeof(vnode->fid.unique),
+		       sizeof(version));
+
+		_leave(" = OBSOLETE [vers %llx != %llx]",
+		       version, vnode->status.version);
+		return FSCACHE_CHECKAUX_OBSOLETE;
 	}
 
 	_leave(" = SUCCESS");
-	return CACHEFS_MATCH_SUCCESS;
-} /* end afs_vnode_cache_match() */
+	return FSCACHE_CHECKAUX_OKAY;
+
+} /* end afs_vnode_cache_check_aux() */
 #endif
 
 /*****************************************************************************/
 /*
- * update a vnode record stored in the cache
+ * indication of pages that now have cache metadata retained
+ * - this function should mark the specified pages as now being cached
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vnode_cache_update(void *source, void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static void afs_vnode_cache_mark_pages_cached(void *cookie_netfs_data,
+					      struct address_space *mapping,
+					      struct pagevec *cached_pvec)
 {
-	struct afs_cache_vnode *cvnode = entry;
-	struct afs_vnode *vnode = source;
+	unsigned long loop;
 
-	_enter("");
+	for (loop = 0; loop < cached_pvec->nr; loop++) {
+		struct page *page = cached_pvec->pages[loop];
 
-	cvnode->vnode_id	= vnode->fid.vnode;
-	cvnode->vnode_unique	= vnode->fid.unique;
-	cvnode->data_version	= vnode->status.version;
+		_debug("- mark %p{%lx}", page, page->index);
 
-} /* end afs_vnode_cache_update() */
+		SetPagePrivate(page);
+	}
+
+} /* end afs_vnode_cache_mark_pages_cached() */
 #endif
+
+/*****************************************************************************/
+/*
+ * indication the cookie is no longer uncached
+ * - this function is called when the backing store currently caching a cookie
+ *   is removed
+ * - the netfs should use this to clean up any markers indicating cached pages
+ * - this is mandatory for any object that may have data
+ */
+static void afs_vnode_cache_now_uncached(void *cookie_netfs_data)
+{
+	struct afs_vnode *vnode = cookie_netfs_data;
+	struct pagevec pvec;
+	pgoff_t first;
+	int loop, nr_pages;
+
+	_enter("{%x,%x,%Lx}",
+	       vnode->fid.vnode, vnode->fid.unique, vnode->status.version);
+
+	pagevec_init(&pvec, 0);
+	first = 0;
+
+	for (;;) {
+		/* grab a bunch of pages to clean */
+		nr_pages = find_get_pages(vnode->vfs_inode.i_mapping, first,
+					  PAGEVEC_SIZE, pvec.pages);
+		if (!nr_pages)
+			break;
+
+		for (loop = 0; loop < nr_pages; loop++)
+			ClearPagePrivate(pvec.pages[loop]);
+
+		first = pvec.pages[nr_pages - 1]->index + 1;
+
+		pvec.nr = nr_pages;
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	_leave("");
+
+} /* end afs_vnode_cache_now_uncached() */
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h
index b86a971..3f0602d 100644
--- a/fs/afs/vnode.h
+++ b/fs/afs/vnode.h
@@ -13,9 +13,9 @@
 #define _LINUX_AFS_VNODE_H
 
 #include <linux/fs.h>
+#include <linux/fscache.h>
 #include "server.h"
 #include "kafstimod.h"
-#include "cache.h"
 
 #ifdef __KERNEL__
 
@@ -32,8 +32,8 @@ struct afs_cache_vnode
 	afs_dataversion_t	data_version;	/* data version */
 };
 
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vnode_cache_index_def;
+#ifdef CONFIG_AFS_FSCACHE
+extern struct fscache_cookie_def afs_vnode_cache_index_def;
 #endif
 
 /*****************************************************************************/
@@ -47,8 +47,8 @@ struct afs_vnode
 	struct afs_volume	*volume;	/* volume on which vnode resides */
 	struct afs_fid		fid;		/* the file identifier for this inode */
 	struct afs_file_status	status;		/* AFS status info for this file */
-#ifdef AFS_CACHING_SUPPORT
-	struct cachefs_cookie	*cache;		/* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+	struct fscache_cookie	*cache;		/* caching cookie */
 #endif
 
 	wait_queue_head_t	update_waitq;	/* status fetch waitqueue */
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 0ff4b86..0bd5578 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -15,10 +15,10 @@
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
+#include <linux/fscache.h>
 #include "volume.h"
 #include "vnode.h"
 #include "cell.h"
-#include "cache.h"
 #include "cmservice.h"
 #include "fsclient.h"
 #include "vlclient.h"
@@ -28,18 +28,14 @@
 static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
 #endif
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
-						  const void *entry);
-static void afs_volume_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_volume_cache_index_def = {
-	.name		= "volume",
-	.data_size	= sizeof(struct afs_cache_vhash),
-	.keys[0]	= { CACHEFS_INDEX_KEYS_BIN, 1 },
-	.keys[1]	= { CACHEFS_INDEX_KEYS_BIN, 1 },
-	.match		= afs_volume_cache_match,
-	.update		= afs_volume_cache_update,
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
+					 void *buffer, uint16_t buflen);
+
+static struct fscache_cookie_def afs_volume_cache_index_def = {
+	.name		= "AFS.volume",
+	.type		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= afs_volume_cache_get_key,
 };
 #endif
 
@@ -214,11 +210,10 @@ int afs_volume_lookup(const char *name, 
 	}
 
 	/* attach the cache and volume location */
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_acquire_cookie(vlocation->cache,
-			       &afs_vnode_cache_index_def,
-			       volume,
-			       &volume->cache);
+#ifdef CONFIG_AFS_FSCACHE
+	volume->cache = fscache_acquire_cookie(vlocation->cache,
+					       &afs_volume_cache_index_def,
+					       volume);
 #endif
 
 	afs_get_vlocation(vlocation);
@@ -286,8 +281,8 @@ void afs_put_volume(struct afs_volume *v
 	up_write(&vlocation->cell->vl_sem);
 
 	/* finish cleaning up the volume */
-#ifdef AFS_CACHING_SUPPORT
-	cachefs_relinquish_cookie(volume->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+	fscache_relinquish_cookie(volume->cache, 0);
 #endif
 	afs_put_vlocation(vlocation);
 
@@ -481,40 +476,25 @@ int afs_volume_release_fileserver(struct
 
 /*****************************************************************************/
 /*
- * match a volume hash record stored in the cache
+ * set the key for the index entry
  */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
-						  const void *entry)
+#ifdef CONFIG_AFS_FSCACHE
+static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
+					void *buffer, uint16_t bufmax)
 {
-	const struct afs_cache_vhash *vhash = entry;
-	struct afs_volume *volume = target;
+	const struct afs_volume *volume = cookie_netfs_data;
+	uint16_t klen;
 
-	_enter("{%u},{%u}", volume->type, vhash->vtype);
+	_enter("{%u},%p,%u", volume->type, buffer, bufmax);
 
-	if (volume->type == vhash->vtype) {
-		_leave(" = SUCCESS");
-		return CACHEFS_MATCH_SUCCESS;
-	}
-
-	_leave(" = FAILED");
-	return CACHEFS_MATCH_FAILED;
-} /* end afs_volume_cache_match() */
-#endif
-
-/*****************************************************************************/
-/*
- * update a volume hash record stored in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_volume_cache_update(void *source, void *entry)
-{
-	struct afs_cache_vhash *vhash = entry;
-	struct afs_volume *volume = source;
+	klen = sizeof(volume->type);
+	if (klen > bufmax)
+		return 0;
 
-	_enter("");
+	memcpy(buffer, &volume->type, sizeof(volume->type));
 
-	vhash->vtype = volume->type;
+	_leave(" = %u", klen);
+	return klen;
 
-} /* end afs_volume_cache_update() */
+} /* end afs_volume_cache_get_key() */
 #endif
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
index bfdcf19..fc9895a 100644
--- a/fs/afs/volume.h
+++ b/fs/afs/volume.h
@@ -12,11 +12,11 @@
 #ifndef _LINUX_AFS_VOLUME_H
 #define _LINUX_AFS_VOLUME_H
 
+#include <linux/fscache.h>
 #include "types.h"
 #include "fsclient.h"
 #include "kafstimod.h"
 #include "kafsasyncd.h"
-#include "cache.h"
 
 typedef enum {
 	AFS_VLUPD_SLEEP,		/* sleeping waiting for update timer to fire */
@@ -45,24 +45,6 @@ struct afs_cache_vlocation
 	time_t			rtime;		/* last retrieval time */
 };
 
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vlocation_cache_index_def;
-#endif
-
-/*****************************************************************************/
-/*
- * volume -> vnode hash table entry
- */
-struct afs_cache_vhash
-{
-	afs_voltype_t		vtype;		/* which volume variation */
-	uint8_t			hash_bucket;	/* which hash bucket this represents */
-} __attribute__((packed));
-
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_volume_cache_index_def;
-#endif
-
 /*****************************************************************************/
 /*
  * AFS volume location record
@@ -73,8 +55,8 @@ struct afs_vlocation
 	struct list_head	link;		/* link in cell volume location list */
 	struct afs_timer	timeout;	/* decaching timer */
 	struct afs_cell		*cell;		/* cell to which volume belongs */
-#ifdef AFS_CACHING_SUPPORT
-	struct cachefs_cookie	*cache;		/* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+	struct fscache_cookie	*cache;		/* caching cookie */
 #endif
 	struct afs_cache_vlocation vldb;	/* volume information DB record */
 	struct afs_volume	*vols[3];	/* volume access record pointer (index by type) */
@@ -109,8 +91,8 @@ struct afs_volume
 	atomic_t		usage;
 	struct afs_cell		*cell;		/* cell to which belongs (unrefd ptr) */
 	struct afs_vlocation	*vlocation;	/* volume location */
-#ifdef AFS_CACHING_SUPPORT
-	struct cachefs_cookie	*cache;		/* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+	struct fscache_cookie	*cache;		/* caching cookie */
 #endif
 	afs_volid_t		vid;		/* volume ID */
 	afs_voltype_t		type;		/* type of volume */

linux-2.6.16-cachefs-filp.patch:
 arch/ia64/kernel/perfmon.c            |    2 -
 drivers/infiniband/core/uverbs_main.c |    2 -
 fs/eventpoll.c                        |    2 -
 fs/file_table.c                       |   52 ++++++++++++++++++++++++----------
 fs/hugetlbfs/inode.c                  |    2 -
 fs/inotify.c                          |    2 -
 fs/namei.c                            |    2 -
 fs/open.c                             |   22 +++++++++++++-
 fs/pipe.c                             |    4 +-
 include/linux/fs.h                    |    7 +++-
 kernel/futex.c                        |    2 -
 kernel/sysctl.c                       |    2 -
 mm/shmem.c                            |    2 -
 mm/tiny-shmem.c                       |    2 -
 net/socket.c                          |    2 -
 15 files changed, 78 insertions(+), 29 deletions(-)

--- NEW FILE linux-2.6.16-cachefs-filp.patch ---
--- linux-2.6.16.noarch/arch/ia64/kernel/perfmon.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/arch/ia64/kernel/perfmon.c	2006-04-11 21:56:24.000000000 -0400
@@ -2162,7 +2162,7 @@ pfm_alloc_fd(struct file **cfile)
 
 	ret = -ENFILE;
 
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 	if (!file) goto out;
 
 	/*
--- linux-2.6.16.noarch/drivers/infiniband/core/uverbs_main.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/drivers/infiniband/core/uverbs_main.c	2006-04-11 21:56:24.000000000 -0400
@@ -523,7 +523,7 @@ struct file *ib_uverbs_alloc_event_file(
 		goto err;
 	}
 
-	filp = get_empty_filp();
+	filp = get_empty_filp(0);
 	if (!filp) {
 		ret = -ENFILE;
 		goto err_fd;
--- linux-2.6.16.noarch/fs/hugetlbfs/inode.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/hugetlbfs/inode.c	2006-04-11 21:56:24.000000000 -0400
@@ -821,7 +821,7 @@ struct file *hugetlb_zero_setup(size_t s
 		goto out_shm_unlock;
 
 	error = -ENFILE;
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 	if (!file)
 		goto out_dentry;
 
--- linux-2.6.16.noarch/fs/eventpoll.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/eventpoll.c	2006-04-11 21:56:24.000000000 -0400
@@ -722,7 +722,7 @@ static int ep_getfd(int *efd, struct ino
 
 	/* Get an ready to use file */
 	error = -ENFILE;
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 	if (!file)
 		goto eexit_1;
 
--- linux-2.6.16.noarch/fs/file_table.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/file_table.c	2006-04-11 21:59:24.000000000 -0400
@@ -35,10 +35,22 @@ __cacheline_aligned_in_smp DEFINE_SPINLO
 
 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
 
+static DEFINE_SPINLOCK(filp_count_lock);
+
 static inline void file_free_rcu(struct rcu_head *head)
 {
 	struct file *f =  container_of(head, struct file, f_u.fu_rcuhead);
+	unsigned short fkflags = f->f_kernel_flags;
+	unsigned long flags;
+
 	kmem_cache_free(filp_cachep, f);
+
+	spin_lock_irqsave(&filp_count_lock, flags);
+	if (!(fkflags & FKFLAGS_KERNEL))
+		files_stat.nr_files--;
+	else
+		files_stat.nr_kernel_files--;
+	spin_unlock_irqrestore(&filp_count_lock, flags);
 }
 
 static inline void file_free(struct file *f)
@@ -86,28 +98,30 @@ int proc_nr_files(ctl_table *table, int 
  * Returns NULL, if there are no more free file structures or
  * we run out of memory.
  */
-struct file *get_empty_filp(void)
+struct file *get_empty_filp(int kernel)
 {
 	static int old_max;
 	struct file * f;
+	int nr;
 
 	/*
 	 * Privileged users can go above max_files
 	 */
-	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
-		/*
-		 * percpu_counters are inaccurate.  Do an expensive check before
-		 * we go and fail.
-		 */
-		if (percpu_counter_sum(&nr_files) >= files_stat.max_files)
+	spin_lock_irq(&filp_count_lock);
+	if (!kernel) {
+		if (files_stat.nr_files >= files_stat.max_files &&
+		    !capable(CAP_SYS_ADMIN))
 			goto over;
+		files_stat.nr_files++;
+	} else {
+		files_stat.nr_kernel_files++;
 	}
+	spin_unlock_irq(&filp_count_lock);
 
 	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
 	if (f == NULL)
 		goto fail;
 
-	percpu_counter_inc(&nr_files);
 	memset(f, 0, sizeof(*f));
 	if (security_file_alloc(f))
 		goto fail_sec;
@@ -117,22 +131,32 @@ struct file *get_empty_filp(void)
 	f->f_uid = current->fsuid;
 	f->f_gid = current->fsgid;
 	rwlock_init(&f->f_owner.lock);
+	f->f_kernel_flags = kernel ? FKFLAGS_KERNEL : 0;
 	/* f->f_version: 0 */
 	INIT_LIST_HEAD(&f->f_u.fu_list);
 	return f;
 
 over:
 	/* Ran out of filps - report that */
-	if (get_nr_files() > old_max) {
-		printk(KERN_INFO "VFS: file-max limit %d reached\n",
-					get_max_files());
-		old_max = get_nr_files();
+	nr = files_stat.nr_files;
+	spin_unlock_irq(&filp_count_lock);
+	if (nr > old_max) {
+		printk(KERN_INFO "VFS: file-max limit %d reached\n", nr);
+		old_max = nr;
 	}
-	goto fail;
+	return NULL;
+
+fail:
+	spin_lock_irq(&filp_count_lock);
+	if (!kernel)
+		files_stat.nr_files--;
+	else
+		files_stat.nr_kernel_files--;
+	spin_unlock_irq(&filp_count_lock);
+	return NULL;
 
 fail_sec:
 	file_free(f);
-fail:
 	return NULL;
 }
 
--- linux-2.6.16.noarch/fs/inotify.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/inotify.c	2006-04-11 21:56:24.000000000 -0400
@@ -874,7 +874,7 @@ asmlinkage long sys_inotify_init(void)
 	if (fd < 0)
 		return fd;
 
-	filp = get_empty_filp();
+	filp = get_empty_filp(0);
 	if (!filp) {
 		ret = -ENFILE;
 		goto out_put_fd;
--- linux-2.6.16.noarch/fs/namei.c.filp	2006-04-11 21:55:39.000000000 -0400
+++ linux-2.6.16.noarch/fs/namei.c	2006-04-11 21:56:24.000000000 -0400
@@ -1149,7 +1149,7 @@ static int __path_lookup_intent_open(int
 		unsigned int lookup_flags, struct nameidata *nd,
 		int open_flags, int create_mode)
 {
-	struct file *filp = get_empty_filp();
+	struct file *filp = get_empty_filp(0);
 	int err;
 
 	if (filp == NULL)
--- linux-2.6.16.noarch/fs/open.c.filp	2006-04-11 21:55:40.000000000 -0400
+++ linux-2.6.16.noarch/fs/open.c	2006-04-11 21:56:24.000000000 -0400
@@ -950,7 +950,7 @@ struct file *dentry_open(struct dentry *
 	struct file *f;
 
 	error = -ENFILE;
-	f = get_empty_filp();
+	f = get_empty_filp(0);
 	if (f == NULL) {
 		dput(dentry);
 		mntput(mnt);
@@ -962,6 +962,26 @@ struct file *dentry_open(struct dentry *
 EXPORT_SYMBOL(dentry_open);
 
 /*
+ * open a specifically in-kernel file
+ */
+struct file *dentry_open_kernel(struct dentry *dentry, struct vfsmount *mnt, int flags)
+{
+	int error;
+	struct file *f;
+
+	error = -ENFILE;
+	f = get_empty_filp(1);
+	if (f == NULL) {
+		dput(dentry);
+		mntput(mnt);
+		return ERR_PTR(error);
+	}
+
+	return __dentry_open(dentry, mnt, flags, f, NULL);
+}
+EXPORT_SYMBOL(dentry_open_kernel);
+
+/*
  * Find an empty file descriptor entry, and mark it busy.
  */
 int get_unused_fd(void)
--- linux-2.6.16.noarch/fs/pipe.c.filp	2006-04-11 21:55:39.000000000 -0400
+++ linux-2.6.16.noarch/fs/pipe.c	2006-04-11 21:56:24.000000000 -0400
@@ -728,11 +728,11 @@ int do_pipe(int *fd)
 	int i,j;
 
 	error = -ENFILE;
-	f1 = get_empty_filp();
+	f1 = get_empty_filp(0);
 	if (!f1)
 		goto no_files;
 
-	f2 = get_empty_filp();
+	f2 = get_empty_filp(0);
 	if (!f2)
 		goto close_f1;
 
--- linux-2.6.16.noarch/include/linux/fs.h.filp	2006-04-11 21:55:39.000000000 -0400
+++ linux-2.6.16.noarch/include/linux/fs.h	2006-04-11 21:56:24.000000000 -0400
@@ -33,6 +33,7 @@ struct files_stat_struct {
 	int nr_files;		/* read only */
 	int nr_free_files;	/* read only */
 	int max_files;		/* tunable */
+	int nr_kernel_files;	/* read only */
 };
 extern struct files_stat_struct files_stat;
 extern int get_max_files(void);
@@ -65,6 +66,8 @@ extern int dir_notify_enable;
 #define FMODE_PREAD	8
 #define FMODE_PWRITE	FMODE_PREAD	/* These go hand in hand */
 
+#define FKFLAGS_KERNEL	1		/* kernel internal file (not accounted) */
+
 #define RW_MASK		1
 #define RWA_MASK	2
 #define READ 0
@@ -629,6 +632,7 @@ struct file {
 	atomic_t		f_count;
 	unsigned int 		f_flags;
 	mode_t			f_mode;
+	unsigned short		f_kernel_flags;
 	loff_t			f_pos;
 	struct fown_struct	f_owner;
 	unsigned int		f_uid, f_gid;
@@ -1347,6 +1351,7 @@ extern long do_sys_open(int fdf, const c
 			int mode);
 extern struct file *filp_open(const char *, int, int);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+extern struct file * dentry_open_kernel(struct dentry *, struct vfsmount *, int);
 extern int filp_close(struct file *, fl_owner_t id);
 extern char * getname(const char __user *);
 
@@ -1542,7 +1547,7 @@ static inline void insert_inode_hash(str
 	__insert_inode_hash(inode, inode->i_ino);
 }
 
-extern struct file * get_empty_filp(void);
+extern struct file * get_empty_filp(int kernel);
 extern void file_move(struct file *f, struct list_head *list);
 extern void file_kill(struct file *f);
 struct bio;
--- linux-2.6.16.noarch/kernel/futex.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/kernel/futex.c	2006-04-11 21:56:24.000000000 -0400
@@ -775,7 +775,7 @@ static int futex_fd(unsigned long uaddr,
 	ret = get_unused_fd();
 	if (ret < 0)
 		goto out;
-	filp = get_empty_filp();
+	filp = get_empty_filp(0);
 	if (!filp) {
 		put_unused_fd(ret);
 		ret = -ENFILE;
--- linux-2.6.16.noarch/kernel/sysctl.c.filp	2006-04-11 21:55:39.000000000 -0400
+++ linux-2.6.16.noarch/kernel/sysctl.c	2006-04-11 21:56:24.000000000 -0400
@@ -1001,7 +1001,7 @@ static ctl_table fs_table[] = {
 		.ctl_name	= FS_NRFILE,
 		.procname	= "file-nr",
 		.data		= &files_stat,
-		.maxlen		= 3*sizeof(int),
+		.maxlen		= 4*sizeof(int),
 		.mode		= 0444,
 		.proc_handler	= &proc_nr_files,
 	},
--- linux-2.6.16.noarch/mm/shmem.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/mm/shmem.c	2006-04-11 21:56:25.000000000 -0400
@@ -2310,7 +2310,7 @@ struct file *shmem_file_setup(char *name
 		goto put_memory;
 
 	error = -ENFILE;
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 	if (!file)
 		goto put_dentry;
 
--- linux-2.6.16.noarch/mm/tiny-shmem.c.filp	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/mm/tiny-shmem.c	2006-04-11 21:56:25.000000000 -0400
@@ -71,7 +71,7 @@ struct file *shmem_file_setup(char *name
 		goto put_memory;
 
 	error = -ENFILE;
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 	if (!file)
 		goto put_dentry;
 
--- linux-2.6.16.noarch/net/socket.c.filp	2006-04-11 21:55:39.000000000 -0400
+++ linux-2.6.16.noarch/net/socket.c	2006-04-11 21:56:25.000000000 -0400
@@ -398,7 +398,7 @@ struct file * sock_map_file(struct socke
 	struct qstr this;
 	char name[32];
 
-	file = get_empty_filp();
+	file = get_empty_filp(0);
 
 	if (!file)
 		return ERR_PTR(-ENFILE);

linux-2.6.16-cachefs-fsmisc.patch:
 include/linux/page-flags.h |   10 ++++++++++
 include/linux/pagemap.h    |   11 +++++++++++
 mm/filemap.c               |   17 +++++++++++++++++
 3 files changed, 38 insertions(+)

--- NEW FILE linux-2.6.16-cachefs-fsmisc.patch ---
--- linux-2.6.16.noarch/include/linux/page-flags.h.fsmisc	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/page-flags.h	2006-03-27 09:07:41.000000000 -0500
@@ -62,6 +62,7 @@
 #define PG_slab			 7	/* slab debug (Suparna wants this) */
 
 #define PG_checked		 8	/* kill me in 2.5.<early>. */
+#define PG_fs_misc		 8
 #define PG_arch_1		 9
 #define PG_reserved		10
 #define PG_private		11	/* Has something at ->private */
@@ -360,4 +361,13 @@ static inline void set_page_writeback(st
 	test_set_page_writeback(page);
 }
 
+/*
+ * Filesystem-specific page bit testing
+ */
+#define PageFsMisc(page)		test_bit(PG_fs_misc, &(page)->flags)
+#define SetPageFsMisc(page)		set_bit(PG_fs_misc, &(page)->flags)
+#define TestSetPageFsMisc(page)		test_and_set_bit(PG_fs_misc, &(page)->flags)
+#define ClearPageFsMisc(page)		clear_bit(PG_fs_misc, &(page)->flags)
+#define TestClearPageFsMisc(page)	test_and_clear_bit(PG_fs_misc, &(page)->flags)
+
 #endif	/* PAGE_FLAGS_H */
--- linux-2.6.16.noarch/include/linux/pagemap.h.fsmisc	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/pagemap.h	2006-03-27 09:07:41.000000000 -0500
@@ -201,6 +201,17 @@ static inline void wait_on_page_writebac
 extern void end_page_writeback(struct page *page);
 
 /*
+ * Wait for filesystem-specific page synchronisation to complete
+ */
+static inline void wait_on_page_fs_misc(struct page *page)
+{
+	if (PageFsMisc(page))
+		wait_on_page_bit(page, PG_fs_misc);
+}
+
+extern void fastcall end_page_fs_misc(struct page *page);
+
+/*
  * Fault a userspace page into pagetables.  Return non-zero on a fault.
  *
  * This assumes that two userspace pages are always sufficient.  That's
--- linux-2.6.16.noarch/mm/filemap.c.fsmisc	2006-03-27 09:06:43.000000000 -0500
+++ linux-2.6.16.noarch/mm/filemap.c	2006-03-27 09:07:41.000000000 -0500
@@ -521,6 +521,23 @@ void fastcall __lock_page(struct page *p
 EXPORT_SYMBOL(__lock_page);
 
 /*
+ * Note completion of filesystem specific page synchronisation
+ *
+ * This is used to allow a page to be written to a filesystem cache in the
+ * background without holding up the completion of readpage
+ */
+void fastcall end_page_fs_misc(struct page *page)
+{
+	smp_mb__before_clear_bit();
+	if (!TestClearPageFsMisc(page))
+		BUG();
+	smp_mb__after_clear_bit();
+	__wake_up_bit(page_waitqueue(page), &page->flags, PG_fs_misc);
+}
+
+EXPORT_SYMBOL(end_page_fs_misc);
+
+/*
  * a rather lightweight function, finding and getting a reference to a
  * hashed page atomically.
  */

linux-2.6.16-cachefs-misc.patch:
 fs/buffer.c             |    1 
 fs/fcntl.c              |    2 
 fs/file_table.c         |    2 
 fs/namespace.c          |    2 
 include/linux/fs.h      |    1 
 include/linux/pagemap.h |    9 +++
 kernel/exit.c           |    2 
 kernel/fork.c           |    1 
 mm/filemap.c            |  140 ++++++++++++++++++++++++++++++++++++++++++++++++
 mm/swap.c               |    2 
 10 files changed, 162 insertions(+)

--- NEW FILE linux-2.6.16-cachefs-misc.patch ---
--- linux-2.6.16.noarch/fs/namespace.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/fs/namespace.c	2006-04-11 22:02:04.000000000 -0400
@@ -41,6 +41,8 @@ static inline int sysfs_init(void)
 /* spinlock for vfsmount related operations, inplace of dcache_lock */
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
 
+EXPORT_SYMBOL_GPL(vfsmount_lock);
+
 static int event;
 
 static struct list_head *mount_hashtable;
--- linux-2.6.16.noarch/fs/buffer.c.misc	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/buffer.c	2006-04-11 22:02:04.000000000 -0400
@@ -180,6 +180,7 @@ int fsync_super(struct super_block *sb)
 
 	return sync_blockdev(sb->s_bdev);
 }
+EXPORT_SYMBOL(fsync_super);
 
 /*
  * Write out and wait upon all dirty data associated with this
--- linux-2.6.16.noarch/fs/fcntl.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/fs/fcntl.c	2006-04-11 22:02:04.000000000 -0400
@@ -533,6 +533,8 @@ int send_sigurg(struct fown_struct *fown
 	return ret;
 }
 
+EXPORT_SYMBOL(send_sigurg);
+
 static DEFINE_RWLOCK(fasync_lock);
 static kmem_cache_t *fasync_cache;
 
--- linux-2.6.16.noarch/fs/file_table.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/fs/file_table.c	2006-04-11 22:02:04.000000000 -0400
@@ -257,6 +257,8 @@ struct file fastcall *fget_light(unsigne
 	return file;
 }
 
+EXPORT_SYMBOL(fget_light);
+
 
 void put_filp(struct file *file)
 {
--- linux-2.6.16.noarch/include/linux/pagemap.h.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/include/linux/pagemap.h	2006-04-11 22:02:04.000000000 -0400
@@ -96,6 +96,9 @@ int add_to_page_cache(struct page *page,
 				unsigned long index, gfp_t gfp_mask);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 				unsigned long index, gfp_t gfp_mask);
+extern struct page *replace_in_page_cache(struct page *page,
+					  struct address_space *mapping,
+					  pgoff_t offset);
 extern void remove_from_page_cache(struct page *page);
 extern void __remove_from_page_cache(struct page *page);
 
@@ -212,6 +215,12 @@ static inline void wait_on_page_fs_misc(
 extern void fastcall end_page_fs_misc(struct page *page);
 
 /*
+ * permit installation of a state change monitor in the queue for a page
+ */
+extern void install_page_waitqueue_monitor(struct page *page,
+					   wait_queue_t *monitor);
+
+/*
  * Fault a userspace page into pagetables.  Return non-zero on a fault.
  *
  * This assumes that two userspace pages are always sufficient.  That's
--- linux-2.6.16.noarch/include/linux/fs.h.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/include/linux/fs.h	2006-04-11 22:02:05.000000000 -0400
@@ -1573,6 +1573,7 @@ extern ssize_t generic_file_direct_write
 		unsigned long *, loff_t, loff_t *, size_t, size_t);
 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
 		unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern int generic_file_buffered_write_one_kernel_page(struct file *, pgoff_t, struct page *);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
--- linux-2.6.16.noarch/kernel/fork.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/kernel/fork.c	2006-04-11 22:02:05.000000000 -0400
@@ -125,6 +125,7 @@ void __put_task_struct_cb(struct rcu_hea
 	if (!profile_handoff_task(tsk))
 		free_task(tsk);
 }
+EXPORT_SYMBOL(__put_task_struct_cb);
 
 void __init fork_init(unsigned long mempages)
 {
--- linux-2.6.16.noarch/kernel/exit.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/kernel/exit.c	2006-04-11 22:02:05.000000000 -0400
@@ -478,6 +478,8 @@ void put_fs_struct(struct fs_struct *fs)
 	__put_fs_struct(fs);
 }
 
+EXPORT_SYMBOL_GPL(put_fs_struct);
+
 static inline void __exit_fs(struct task_struct *tsk)
 {
 	struct fs_struct * fs = tsk->fs;
--- linux-2.6.16.noarch/mm/filemap.c.misc	2006-04-11 22:00:59.000000000 -0400
+++ linux-2.6.16.noarch/mm/filemap.c	2006-04-11 22:02:05.000000000 -0400
@@ -131,6 +131,8 @@ void remove_from_page_cache(struct page 
 	write_unlock_irq(&mapping->tree_lock);
 }
 
+EXPORT_SYMBOL(remove_from_page_cache);
+
 static int sync_page(void *word)
 {
 	struct address_space *mapping;
@@ -425,6 +427,41 @@ int add_to_page_cache_lru(struct page *p
 	return ret;
 }
 
+EXPORT_SYMBOL(add_to_page_cache_lru);
+
+/*
+ * This function replaces a page already in the page cache for a particular
+ * index with another, but only if there is already such a page in the page
+ * cache
+ */
+struct page *replace_in_page_cache(struct page *page,
+				   struct address_space *mapping,
+				   pgoff_t offset)
+{
+	struct page *old;
+	void **slot;
+
+	write_lock_irq(&mapping->tree_lock);
+
+	slot = radix_tree_lookup_slot(&mapping->page_tree, offset);
+	old = NULL;
+	if (slot) {
+		old = *slot;
+		*slot = page;
+		page_cache_get(page);
+		SetPageLocked(page);
+		page->mapping = mapping;
+		page->index = offset;
+		if (old)
+			old->mapping = NULL;
+	}
+
+	write_unlock_irq(&mapping->tree_lock);
+	return old;
+}
+
+EXPORT_SYMBOL(replace_in_page_cache);
+
 /*
  * In order to wait for pages to become available there must be
  * waitqueues associated with pages. By using a hash table of
@@ -457,6 +494,18 @@ void fastcall wait_on_page_bit(struct pa
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
+void install_page_waitqueue_monitor(struct page *page, wait_queue_t *monitor)
+{
+	wait_queue_head_t *q = page_waitqueue(page);
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__add_wait_queue(q, monitor);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(install_page_waitqueue_monitor);
+
 /**
  * unlock_page() - unlock a locked page
  *
@@ -689,6 +738,7 @@ unsigned find_get_pages(struct address_s
 	read_unlock_irq(&mapping->tree_lock);
 	return ret;
 }
+EXPORT_SYMBOL(find_get_pages);
 
 /*
  * Like find_get_pages, except we only return pages which are tagged with
@@ -2091,6 +2141,96 @@ generic_file_buffered_write(struct kiocb
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
+int
+generic_file_buffered_write_one_kernel_page(struct file *file,
+					    pgoff_t index,
+					    struct page *src)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct address_space_operations *a_ops = mapping->a_ops;
+	struct pagevec	lru_pvec;
+	struct page *page, *cached_page = NULL;
+	void *from, *to;
+	long status = 0;
+
+	pagevec_init(&lru_pvec, 0);
+
+	page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec);
+	if (!page) {
+		BUG_ON(cached_page);
+		return -ENOMEM;
+	}
+
+	status = a_ops->prepare_write(file, page, 0, PAGE_CACHE_SIZE);
+	if (unlikely(status)) {
+		loff_t isize = i_size_read(mapping->host);
+
+		if (status != AOP_TRUNCATED_PAGE)
+			unlock_page(page);
+		page_cache_release(page);
+		if (status == AOP_TRUNCATED_PAGE)
+			goto sync;
+
+		/* prepare_write() may have instantiated a few blocks outside
+		 * i_size.  Trim these off again.
+		 */
+		if ((1ULL << (index + 1)) > isize)
+			vmtruncate(mapping->host, isize);
+		goto sync;
+	}
+
+	from = kmap_atomic(src, KM_USER0);
+	to = kmap_atomic(page, KM_USER1);
+	copy_page(to, from);
+	kunmap_atomic(from, KM_USER0);
+	kunmap_atomic(to, KM_USER1);
+	flush_dcache_page(page);
+
+	status = a_ops->commit_write(file, page, 0, PAGE_CACHE_SIZE);
+	if (status == AOP_TRUNCATED_PAGE) {
+		page_cache_release(page);
+		goto sync;
+	}
+
+	if (status > 0)
+		status = 0;
+
+	unlock_page(page);
+	mark_page_accessed(page);
+	page_cache_release(page);
+	if (status < 0)
+		return status;
+
+	balance_dirty_pages_ratelimited(mapping);
+	cond_resched();
+
+sync:
+	if (cached_page)
+		page_cache_release(cached_page);
+
+	/*
+	 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
+	 */
+	if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(mapping->host))) {
+		if (!a_ops->writepage)
+			status = generic_osync_inode(
+				mapping->host, mapping,
+				OSYNC_METADATA | OSYNC_DATA);
+  	}
+	
+	/*
+	 * If we get here for O_DIRECT writes then we must have fallen through
+	 * to buffered writes (block instantiation inside i_size).  So we sync
+	 * the file data here, to try to honour O_DIRECT expectations.
+	 */
+	if (unlikely(file->f_flags & O_DIRECT))
+		status = filemap_write_and_wait(mapping);
+
+	pagevec_lru_add(&lru_pvec);
+	return status;
+}
+EXPORT_SYMBOL(generic_file_buffered_write_one_kernel_page);
+
 static ssize_t
 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
 				unsigned long nr_segs, loff_t *ppos)
--- linux-2.6.16.noarch/mm/swap.c.misc	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/mm/swap.c	2006-04-11 22:02:05.000000000 -0400
@@ -149,6 +149,8 @@ void fastcall lru_cache_add(struct page 
 	put_cpu_var(lru_add_pvecs);
 }
 
+EXPORT_SYMBOL_GPL(lru_cache_add);
+
 void fastcall lru_cache_add_active(struct page *page)
 {
 	struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);

linux-2.6.16-cachefs-mkwrite.patch:
 include/linux/mm.h |    4 ++
 mm/memory.c        |   99 ++++++++++++++++++++++++++++++++++++++++-------------
 mm/mmap.c          |   12 +++++-
 mm/mprotect.c      |   11 ++++-
 4 files changed, 98 insertions(+), 28 deletions(-)

--- NEW FILE linux-2.6.16-cachefs-mkwrite.patch ---
--- linux-2.6.16.i686/include/linux/mm.h.mkwrite	2006-03-24 08:47:48.000000000 -0500
+++ linux-2.6.16.i686/include/linux/mm.h	2006-03-24 22:25:18.000000000 -0500
@@ -203,6 +203,10 @@ struct vm_operations_struct {
 	void (*close)(struct vm_area_struct * area);
 	struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
 	int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+
+	/* notification that a previously read-only page is about to become
+	 * writable, if an error is returned it will cause a SIGBUS */
+	int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
 #ifdef CONFIG_NUMA
 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
--- linux-2.6.16.i686/mm/memory.c.mkwrite	2006-03-24 08:47:47.000000000 -0500
+++ linux-2.6.16.i686/mm/memory.c	2006-03-24 22:25:18.000000000 -0500
@@ -1558,25 +1558,59 @@ static int do_wp_page(struct mm_struct *
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
-	int ret = VM_FAULT_MINOR;
+	int reuse, ret = VM_FAULT_MINOR;
 
 	old_page = vm_normal_page(vma, address, orig_pte);
 	if (!old_page)
 		goto gotten;
 
-	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
-		int reuse = can_share_swap_page(old_page);
-		unlock_page(old_page);
-		if (reuse) {
-			flush_cache_page(vma, address, pte_pfn(orig_pte));
-			entry = pte_mkyoung(orig_pte);
-			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-			ptep_set_access_flags(vma, address, page_table, entry, 1);
-			update_mmu_cache(vma, address, entry);
-			lazy_mmu_prot_update(entry);
-			ret |= VM_FAULT_WRITE;
-			goto unlock;
+	if (unlikely(vma->vm_flags & VM_SHARED)) {
+		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+			/*
+			 * Notify the address space that the page is about to
+			 * become writable so that it can prohibit this or wait
+			 * for the page to get into an appropriate state.
+			 *
+			 * We do this without the lock held, so that it can
+			 * sleep if it needs to.
+			 */
+			page_cache_get(old_page);
+			pte_unmap_unlock(page_table, ptl);
+
+			if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+				goto unwritable_page;
+
+			page_cache_release(old_page);
+
+			/*
+			 * Since we dropped the lock we need to revalidate
+			 * the PTE as someone else may have changed it.  If
+			 * they did, we just return, as we can count on the
+			 * MMU to tell us if they didn't also make it writable.
+			 */
+			page_table = pte_offset_map_lock(mm, pmd, address,
+							 &ptl);
+			if (!pte_same(*page_table, orig_pte))
+				goto unlock;
 		}
+
+		reuse = 1;
+	} else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
+		reuse = can_share_swap_page(old_page);
+		unlock_page(old_page);
+	} else {
+		reuse = 0;
+	}
+
+	if (reuse) {
+		flush_cache_page(vma, address, pte_pfn(orig_pte));
+		entry = pte_mkyoung(orig_pte);
+		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		ptep_set_access_flags(vma, address, page_table, entry, 1);
+		update_mmu_cache(vma, address, entry);
+		lazy_mmu_prot_update(entry);
+		ret |= VM_FAULT_WRITE;
+		goto unlock;
 	}
 
 	/*
@@ -1636,6 +1670,10 @@ oom:
 	if (old_page)
 		page_cache_release(old_page);
 	return VM_FAULT_OOM;
+
+unwritable_page:
+	page_cache_release(old_page);
+	return VM_FAULT_SIGBUS;
 }
 
 /*
@@ -2187,18 +2225,31 @@ retry:
 	/*
 	 * Should we do an early C-O-W break?
 	 */
-	if (write_access && !(vma->vm_flags & VM_SHARED)) {
-		struct page *page;
+	if (write_access) {
+		if (!(vma->vm_flags & VM_SHARED)) {
+			struct page *page;
 
-		if (unlikely(anon_vma_prepare(vma)))
-			goto oom;
-		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
-		if (!page)
-			goto oom;
-		copy_user_highpage(page, new_page, address);
-		page_cache_release(new_page);
-		new_page = page;
-		anon = 1;
+			if (unlikely(anon_vma_prepare(vma)))
+				goto oom;
+			page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+			if (!page)
+				goto oom;
+			copy_user_highpage(page, new_page, address);
+			page_cache_release(new_page);
+			new_page = page;
+			anon = 1;
+
+		} else {
+			/* if the page will be shareable, see if the backing
+			 * address space wants to know that the page is about
+			 * to become writable */
+			if (vma->vm_ops->page_mkwrite &&
+			    vma->vm_ops->page_mkwrite(vma, new_page) < 0
+			    ) {
+				page_cache_release(new_page);
+				return VM_FAULT_SIGBUS;
+			}
+		}
 	}
 
 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
--- linux-2.6.16.i686/mm/mmap.c.mkwrite	2006-03-24 08:47:48.000000000 -0500
+++ linux-2.6.16.i686/mm/mmap.c	2006-03-24 22:25:18.000000000 -0500
@@ -1066,7 +1066,8 @@ munmap_back:
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
 	vma->vm_flags = vm_flags;
-	vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+	vma->vm_page_prot = protection_map[vm_flags &
+				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
 	vma->vm_pgoff = pgoff;
 
 	if (file) {
@@ -1090,6 +1091,12 @@ munmap_back:
 			goto free_vma;
 	}
 
+	/* Don't make the VMA automatically writable if it's shared, but the
+	 * backer wishes to know when pages are first written to */
+	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+		vma->vm_page_prot =
+			protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
 	/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
 	 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
 	 * that memory reservation must be checked; but that reservation
@@ -2004,7 +2011,8 @@ unsigned long do_brk(unsigned long addr,
 	vma->vm_end = addr + len;
 	vma->vm_pgoff = pgoff;
 	vma->vm_flags = flags;
-	vma->vm_page_prot = protection_map[flags & 0x0f];
+	vma->vm_page_prot = protection_map[flags &
+				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
 	mm->total_vm += len >> PAGE_SHIFT;
--- linux-2.6.16.i686/mm/mprotect.c.mkwrite	2006-03-24 08:47:48.000000000 -0500
+++ linux-2.6.16.i686/mm/mprotect.c	2006-03-24 22:26:07.000000000 -0500
@@ -107,6 +107,7 @@ mprotect_fixup(struct vm_area_struct *vm
 	unsigned long oldflags = vma->vm_flags;
 	long nrpages = (end - start) >> PAGE_SHIFT;
 	unsigned long charged = 0, old_end = vma->vm_end;
+	unsigned int mask;
 	pgprot_t newprot;
 	pgoff_t pgoff;
 	int error;
@@ -133,8 +134,6 @@ mprotect_fixup(struct vm_area_struct *vm
 		}
 	}
 
-	newprot = protection_map[newflags & 0xf];
-
 	/*
 	 * First try to merge with previous and/or next vma.
 	 */
@@ -161,6 +160,14 @@ mprotect_fixup(struct vm_area_struct *vm
 	}
 
 success:
+	/* Don't make the VMA automatically writable if it's shared, but the
+	 * backer wishes to know when pages are first written to */
+	mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
+	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+		mask &= ~VM_SHARED;
+
+	newprot = protection_map[newflags & mask];
+
 	/*
 	 * vm_flags and vm_page_prot are protected by the mmap_sem
 	 * held in write mode.

linux-2.6.16-cachefs-nfs.patch:
 fs/Kconfig                  |    7 +
 fs/nfs/Makefile             |    1 
 fs/nfs/file.c               |   31 ++++
 fs/nfs/fscache.c            |  300 ++++++++++++++++++++++++++++++++++++++++++++
 fs/nfs/inode.c              |   44 ++++++
 fs/nfs/read.c               |  244 +++++++++++++++++++++++++++++++++++
 fs/nfs/sysctl.c             |   43 ++++++
 include/linux/nfs4_mount.h  |    1 
 include/linux/nfs_fs.h      |    5 
 include/linux/nfs_fs_sb.h   |    9 +
 include/linux/nfs_fscache.h |  244 +++++++++++++++++++++++++++++++++++
 include/linux/nfs_mount.h   |    1 
 12 files changed, 930 insertions(+)

--- NEW FILE linux-2.6.16-cachefs-nfs.patch ---
--- linux-2.6.16.noarch/fs/nfs/Makefile.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/nfs/Makefile	2006-04-21 00:45:11.000000000 -0400
@@ -14,4 +14,5 @@ nfs-$(CONFIG_NFS_V4)	+= nfs4proc.o nfs4x
 			   callback.o callback_xdr.o callback_proc.o
 nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
 nfs-$(CONFIG_SYSCTL) += sysctl.o
+nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
 nfs-objs		:= $(nfs-y)
--- linux-2.6.16.noarch/fs/nfs/file.c.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/nfs/file.c	2006-04-21 00:45:11.000000000 -0400
@@ -27,6 +27,8 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
+#include <linux/nfs_fscache.h>
+#include <linux/buffer_head.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -252,6 +254,19 @@ nfs_file_sendfile(struct file *filp, lof
 	return res;
 }
 
+#ifdef CONFIG_NFS_FSCACHE
+static int nfs_file_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+	wait_on_page_fs_misc(page);
+	return 0;
+}
+static struct vm_operations_struct nfs_fs_vm_operations = {
+	.nopage			= filemap_nopage,
+	.populate		= filemap_populate,
+	.page_mkwrite   = nfs_file_page_mkwrite,
+};
+#endif
+
 static int
 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
@@ -265,6 +280,12 @@ nfs_file_mmap(struct file * file, struct
 	status = nfs_revalidate_file(inode, file);
 	if (!status)
 		status = generic_file_mmap(file, vma);
+
+#ifdef CONFIG_NFS_FSCACHE
+	if (NFS_I(inode)->fscache != NULL)
+		vma->vm_ops = &nfs_fs_vm_operations;
+#endif
+
 	return status;
 }
 
@@ -316,6 +337,11 @@ static int nfs_commit_write(struct file 
 	return status;
 }
 
+/*
+ * since we use page->private for our own nefarious purposes when using fscache, we have to
+ * override extra address space ops to prevent fs/buffer.c from getting confused, even though we
+ * may not have asked its opinion
+ */
 struct address_space_operations nfs_file_aops = {
 	.readpage = nfs_readpage,
 	.readpages = nfs_readpages,
@@ -327,6 +353,11 @@ struct address_space_operations nfs_file
 #ifdef CONFIG_NFS_DIRECTIO
 	.direct_IO = nfs_direct_IO,
 #endif
+#ifdef CONFIG_NFS_FSCACHE
+	.sync_page	= block_sync_page,
+	.releasepage	= nfs_releasepage,
+	.invalidatepage	= nfs_invalidatepage,
+#endif
 };
 
 /* 
--- linux-2.6.16.noarch/fs/nfs/inode.c.nfs	2006-04-21 00:43:45.000000000 -0400
+++ linux-2.6.16.noarch/fs/nfs/inode.c	2006-04-21 00:45:11.000000000 -0400
@@ -34,6 +34,7 @@
 #include <linux/seq_file.h>
 #include <linux/mount.h>
 #include <linux/nfs_idmap.h>
+#include <linux/nfs_fscache.h>
 #include <linux/vfs.h>
 
 #include <asm/system.h>
@@ -172,6 +173,8 @@ nfs_clear_inode(struct inode *inode)
 	cred = nfsi->cache_access.cred;
 	if (cred)
 		put_rpccred(cred);
+
+	nfs_clear_fscookie(NFS_SERVER(inode), nfsi);
 	BUG_ON(atomic_read(&nfsi->data_updates) != 0);
 }
 
@@ -519,6 +522,9 @@ nfs_fill_super(struct super_block *sb, s
 			server->namelen = NFS2_MAXNAMLEN;
 	}
 
+	if (server->flags & NFS_MOUNT_FSCACHE)
+		nfs_fill_fscookie(sb);
+
 	sb->s_op = &nfs_sops;
 	return nfs_sb_init(sb, authflavor);
 }
@@ -593,6 +599,7 @@ static int nfs_show_options(struct seq_f
 		{ NFS_MOUNT_NOAC, ",noac", "" },
 		{ NFS_MOUNT_NONLM, ",nolock", ",lock" },
 		{ NFS_MOUNT_NOACL, ",noacl", "" },
+		{ NFS_MOUNT_FSCACHE, ",fsc", "" },
 		{ 0, NULL, NULL }
 	};
 	struct proc_nfs_info *nfs_infop;
@@ -675,6 +682,8 @@ void nfs_zap_caches(struct inode *inode)
 	spin_lock(&inode->i_lock);
 	nfs_zap_caches_locked(inode);
 	spin_unlock(&inode->i_lock);
+
+	nfs_zap_fscookie(NFS_SERVER(inode), NFS_I(inode));
 }
 
 static void nfs_zap_acl_cache(struct inode *inode)
@@ -822,6 +831,8 @@ nfs_fhget(struct super_block *sb, struct
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
 		nfsi->cache_access.cred = NULL;
 
+		nfs_fhget_fscookie(sb, nfsi);
+
 		unlock_new_inode(inode);
 	} else
 		nfs_refresh_inode(inode, fattr);
@@ -901,6 +912,7 @@ void nfs_setattr_update_inode(struct ino
 	}
 	if ((attr->ia_valid & ATTR_SIZE) != 0) {
 		inode->i_size = attr->ia_size;
+		nfs_set_fscsize(NFS_SERVER(inode), NFS_I(inode), inode->i_size);
 		vmtruncate(inode, attr->ia_size);
 	}
 }
@@ -1081,6 +1093,9 @@ int nfs_open(struct inode *inode, struct
 	ctx->mode = filp->f_mode;
 	nfs_file_set_open_context(filp, ctx);
 	put_nfs_open_context(ctx);
+
+	nfs_set_fscache(inode, ((filp->f_flags & O_ACCMODE) == O_RDONLY));
+
 	return 0;
 }
 
@@ -1212,6 +1227,8 @@ void nfs_revalidate_mapping(struct inode
 		}
 		spin_unlock(&inode->i_lock);
 
+		nfs_renew_fscookie(NFS_SERVER(inode), nfsi);
+
 		dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
 				inode->i_sb->s_id,
 				(long long)NFS_FILEID(inode));
@@ -1459,11 +1476,13 @@ static int nfs_update_inode(struct inode
 			if (data_stable) {
 				inode->i_size = new_isize;
 				invalid |= NFS_INO_INVALID_DATA;
+				nfs_set_fscsize(NFS_SERVER(inode), nfsi, inode->i_size);
 			}
 			invalid |= NFS_INO_INVALID_ATTR;
 		} else if (new_isize > cur_isize) {
 			inode->i_size = new_isize;
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+			nfs_set_fscsize(NFS_SERVER(inode), nfsi, inode->i_size);
 		}
 		nfsi->cache_change_attribute = jiffies;
 		dprintk("NFS: isize change on server for file %s/%ld\n",
@@ -1633,6 +1652,15 @@ static struct super_block *nfs_get_sb(st
 		goto out_err;
 	}
 #endif /* CONFIG_NFS_V3 */
+	/* if filesystem caching isn't compiled in, then requesting its use is
+	 * invalid */
+#ifndef CONFIG_NFS_FSCACHE
+	if (data->flags & NFS_MOUNT_FSCACHE) {
+		printk(KERN_WARNING
+			"NFS: kernel not compiled with CONFIG_NFS_FSCACHE\n");
+		return -EINVAL;
+	}
+#endif
 
 	s = ERR_PTR(-ENOMEM);
 	server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL);
@@ -1698,6 +1726,8 @@ static void nfs_kill_super(struct super_
 
 	kill_anon_super(s);
 
+	nfs_kill_fscookie(server);
+
 	if (!IS_ERR(server->client))
 		rpc_shutdown_client(server->client);
 	if (!IS_ERR(server->client_sys))
@@ -1876,6 +1906,9 @@ static int nfs4_fill_super(struct super_
 	}
 
 	sb->s_time_gran = 1;
+	
+	if (server->flags & NFS4_MOUNT_FSCACHE)
+		nfs4_fill_fscookie(sb);
 
 	sb->s_op = &nfs4_sops;
 	err = nfs_sb_init(sb, authflavour);
@@ -2020,6 +2053,8 @@ static void nfs4_kill_super(struct super
 
 	nfs4_renewd_prepare_shutdown(server);
 
+	nfs_kill_fscookie(NFS_SB(sb));
+
 	if (server->client != NULL && !IS_ERR(server->client))
 		rpc_shutdown_client(server->client);
 	rpciod_down();		/* release rpciod */
@@ -2182,6 +2217,11 @@ static int __init init_nfs_fs(void)
 {
 	int err;
 
+	/* we want to be able to cache */
+	err = nfs_register_netfs();
+	if (err < 0)
+		goto out5;
+
 	err = nfs_init_nfspagecache();
 	if (err)
 		goto out4;
@@ -2229,6 +2269,9 @@ out2:
 out3:
 	nfs_destroy_nfspagecache();
 out4:
+	nfs_unregister_netfs();
+out5:
+
 	return err;
 }
 
@@ -2241,6 +2284,7 @@ static void __exit exit_nfs_fs(void)
 	nfs_destroy_readpagecache();
 	nfs_destroy_inodecache();
 	nfs_destroy_nfspagecache();
+	nfs_unregister_netfs();
 #ifdef CONFIG_PROC_FS
 	rpc_proc_unregister("nfs");
 #endif
--- /dev/null	2006-04-17 00:17:38.665575608 -0400
+++ linux-2.6.16.noarch/fs/nfs/fscache.c	2006-04-21 00:46:19.000000000 -0400
@@ -0,0 +1,300 @@
+/* fscache.c: NFS filesystem cache interface
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/nfs_fscache.h>
+#include <keys/user-type.h>
+
+/*
+ * Sysctl variables
+ */
+int nfs_fscache_to_pages;
+int nfs_fscache_from_pages;
+int nfs_fscache_uncache_page;
+int nfs_fscache_from_error;
+int nfs_fscache_to_error;
+
+#define NFSDBG_FACILITY		NFSDBG_FSCACHE
+
+struct nfs_fh_auxdata {
+	struct timespec	i_mtime;
+	struct timespec	i_ctime;
+	loff_t		i_size;
+};
+
+static struct fscache_netfs_operations nfs_cache_ops = {
+};
+
+struct fscache_netfs nfs_cache_netfs = {
+	.name			= "nfs",
+	.version		= 0,
+	.ops			= &nfs_cache_ops,
+};
+
+static const uint8_t nfs_cache_ipv6_wrapper_for_ipv4[12] = {
+	[0 ... 9]	= 0x00,
+	[10 ... 11]	= 0xff
+};
+
+static uint16_t nfs_server_get_key(const void *cookie_netfs_data,
+		void *buffer, uint16_t bufmax)
+{
+	const struct nfs_server *server = cookie_netfs_data;
+	uint16_t len = 0;
+
+	switch (server->addr.sin_family) {
+	case AF_INET:
+		memcpy(buffer + 0, &nfs_cache_ipv6_wrapper_for_ipv4, 12);
+		memcpy(buffer + 12, &server->addr.sin_addr, 4);
+		memcpy(buffer + 16, &server->addr.sin_port, 2);
+		len = 18;
+		break;
+
+	case AF_INET6:
+		memcpy(buffer + 0, &server->addr.sin_addr, 16);
+		memcpy(buffer + 16, &server->addr.sin_port, 2);
+		len = 18;
+		break;
+
+	default:
+		len = 0;
+		printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
+			server->addr.sin_family);
+		break;
+	}
+
+	return len;
+}
+
+/*
+ * the root index for the filesystem is defined by nfsd IP address and ports
+ */
+struct fscache_cookie_def nfs_cache_server_index_def = {
+	.name		= "NFS.servers",
+	.type 		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= nfs_server_get_key,
+};
+
+static uint16_t nfs_fsctag_get_key(const void *cookie_netfs_data,
+		void *buffer, uint16_t bufmax)
+{
+	const struct nfs_server *server = cookie_netfs_data;
+	uint16_t len = 0;
+
+	len = server->fsctag.size;
+	memcpy(buffer, server->fsctag.buf, len);
+
+	return len;
+}
+
+/*
+ * the root index for the filesystem is defined by nfsd IP address and ports
+ */
+struct fscache_cookie_def nfs_fsctag_index_def = {
+	.name		= "NFS.fsctag",
+	.type 		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= nfs_fsctag_get_key,
+};
+struct fscache_cookie_def nfs4_fsctag_index_def = {
+	.name		= "NFS4.fsctag",
+	.type 		= FSCACHE_COOKIE_TYPE_INDEX,
+	.get_key	= nfs_fsctag_get_key,
+};
+
+static uint16_t nfs_fh_get_key(const void *cookie_netfs_data,
+		void *buffer, uint16_t bufmax)
+{
+	const struct nfs_inode *nfsi = cookie_netfs_data;
+	uint16_t nsize;
+
+	/* set the file handle */
+	nsize = nfsi->fh.size;
+	memcpy(buffer, nfsi->fh.data, nsize);
+
+	return nsize;
+}
+
+/*
+ * indication of pages that now have cache metadata retained
+ * - this function should mark the specified pages as now being cached
+ */
+static void nfs_fh_mark_pages_cached(void *cookie_netfs_data,
+				     struct address_space *mapping,
+				     struct pagevec *cached_pvec)
+{
+	unsigned long loop;
+
+	dprintk("NFS: nfs_fh_mark_pages_cached: nfsi 0x%p pages %ld\n", 
+		cookie_netfs_data, cached_pvec->nr);
+
+	for (loop = 0; loop < cached_pvec->nr; loop++)
+		SetPagePrivate(cached_pvec->pages[loop]);
+
+	return;
+}
+
+/*
+ * indication the cookie is no longer uncached
+ * - this function is called when the backing store currently caching a cookie
+ *   is removed
+ * - the netfs should use this to clean up any markers indicating cached pages
+ * - this is mandatory for any object that may have data
+ */
+static void nfs_fh_now_uncached(void *cookie_netfs_data)
+{
+	struct nfs_inode *nfsi = cookie_netfs_data;
+	struct pagevec pvec;
+	pgoff_t first;
+	int loop, nr_pages;
+
+	pagevec_init(&pvec, 0);
+	first = 0;
+
+	dprintk("NFS: nfs_fh_now_uncached: nfs_inode 0x%p\n", nfsi);
+
+	for (;;) {
+		/* grab a bunch of pages to clean */
+		nr_pages = find_get_pages(nfsi->vfs_inode.i_mapping, first,
+					  PAGEVEC_SIZE, pvec.pages);
+		if (!nr_pages)
+			break;
+
+		for (loop = 0; loop < nr_pages; loop++)
+			ClearPagePrivate(pvec.pages[loop]);
+
+		first = pvec.pages[nr_pages - 1]->index + 1;
+
+		pvec.nr = nr_pages;
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+}
+
+/*****************************************************************************/
+/*
+ * get certain file attributes from the netfs data
+ * - this function can be absent for an index
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ *   presented
+ */
+static void nfs_fh_get_attr(const void *cookie_netfs_data, uint64_t *size)
+{
+	const struct nfs_inode *nfsi = cookie_netfs_data;
+
+	*size = nfsi->vfs_inode.i_size;
+}
+
+/*****************************************************************************/
+/*
+ * get the auxilliary data from netfs data
+ * - this function can be absent if the index carries no state data
+ * - should store the auxilliary data in the buffer
+ * - should return the amount of amount stored
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ *   presented
+ */
+static uint16_t nfs_fh_get_aux(const void *cookie_netfs_data,
+			       void *buffer, uint16_t bufmax)
+{
+	struct nfs_fh_auxdata auxdata;
+	const struct nfs_inode *nfsi = cookie_netfs_data;
+
+	auxdata.i_size = nfsi->vfs_inode.i_size;
+	auxdata.i_mtime = nfsi->vfs_inode.i_mtime;
+	auxdata.i_ctime = nfsi->vfs_inode.i_ctime;
+
+	if (bufmax > sizeof(auxdata))
+		bufmax = sizeof(auxdata);
+
+	memcpy(buffer, &auxdata, bufmax);
+	return bufmax;
+}
+
+/*****************************************************************************/
+/*
+ * consult the netfs about the state of an object
+ * - this function can be absent if the index carries no state data
+ * - the netfs data from the cookie being used as the target is
+ *   presented, as is the auxilliary data
+ */
+static fscache_checkaux_t nfs_fh_check_aux(void *cookie_netfs_data,
+					   const void *data, uint16_t datalen)
+{
+	struct nfs_fh_auxdata auxdata;
+	struct nfs_inode *nfsi = cookie_netfs_data;
+
+	if (datalen > sizeof(auxdata))
+		return FSCACHE_CHECKAUX_OBSOLETE;
+
+	auxdata.i_size = nfsi->vfs_inode.i_size;
+	auxdata.i_mtime = nfsi->vfs_inode.i_mtime;
+	auxdata.i_ctime = nfsi->vfs_inode.i_ctime;
+
+	if (memcmp(data, &auxdata, datalen) != 0)
+		return FSCACHE_CHECKAUX_OBSOLETE;
+
+	return FSCACHE_CHECKAUX_OKAY;
+}
+
+/*
+ * the primary index for each server is simply made up of a series of NFS file
+ * handles
+ */
+struct fscache_cookie_def nfs_cache_fh_index_def = {
+	.name			= "NFS.fh",
+	.type			= FSCACHE_COOKIE_TYPE_DATAFILE,
+	.get_key		= nfs_fh_get_key,
+	.get_attr		= nfs_fh_get_attr,
+	.get_aux		= nfs_fh_get_aux,
+	.check_aux		= nfs_fh_check_aux,
+	.mark_pages_cached	= nfs_fh_mark_pages_cached,
+	.now_uncached		= nfs_fh_now_uncached,
+};
+
+int nfs_load_fsctag(const char *tag, struct nfs_server *server)
+{
+	struct user_key_payload *payload;
+	struct key *key;
+	int plen = 0;
+	char *mntpt = NULL;
+
+	server->fsctag.size = 0;
+	server->fsctag.buf = NULL;
+
+	key = request_key(&key_type_user, tag, NULL);
+	if (IS_ERR(key)) {
+		dprintk("NFS: request_key failed: %ld\n", PTR_ERR(key));
+		return plen;
+	}
+	rcu_read_lock();
+	payload = key->payload.data;
+	if (payload) {
+		plen = payload->datalen;
+		if (plen) {
+			if ((mntpt = kmalloc(plen, GFP_KERNEL)))
+				memcpy(mntpt, payload->data, plen);
+		}
+	}
+	rcu_read_unlock();
+	if (plen && mntpt) {
+		server->fsctag.size = plen;
+		server->fsctag.buf = mntpt;
+	}
+	return plen;
+}
--- linux-2.6.16.noarch/fs/nfs/read.c.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/nfs/read.c	2006-04-21 00:45:11.000000000 -0400
@@ -27,8 +27,11 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_page.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs_fscache.h>
 #include <linux/smp_lock.h>
 
+
 #include <asm/system.h>
 
 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
@@ -72,6 +75,53 @@ int nfs_return_empty_page(struct page *p
 	return 0;
 }
 
+#ifdef CONFIG_NFS_FSCACHE
+/*
+ * store a newly fetched page in fscache
+ */
+static void
+nfs_readpage_to_fscache_complete(struct page *page, void *data, int error)
+{
+	dfprintk(FSCACHE, 
+		"NFS:     readpage_to_fscache_complete (p:%p(i:%lx f:%lx)/%d)\n", 
+		page, page->index, page->flags, error);
+
+	end_page_fs_misc(page);
+}
+
+static inline void
+nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+{
+	int ret;
+
+	dfprintk(FSCACHE, "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
+		NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+
+	if (TestSetPageFsMisc(page))
+		BUG();
+	ret = fscache_write_page(NFS_I(inode)->fscache, page,
+		nfs_readpage_to_fscache_complete, NULL, GFP_KERNEL);
+	dfprintk(FSCACHE, 
+		"NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n", 
+			page, page->index, page->flags, ret);
+	if (ret != 0) {
+		fscache_uncache_page(NFS_I(inode)->fscache, page);
+		nfs_fscache_uncache_page++;
+		ClearPagePrivate(page);
+		end_page_fs_misc(page);
+		nfs_fscache_to_error = ret;
+	} else
+		nfs_fscache_to_pages++;
+}
+#else
+static inline void
+nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+{
+	BUG();
+}
+#endif
+
+
 /*
  * Read a page synchronously.
  */
@@ -150,6 +200,14 @@ static int nfs_readpage_sync(struct nfs_
 		ClearPageError(page);
 	result = 0;
 
+#ifdef CONFIG_NFS_FSCACHE
+	if (PagePrivate(page))
+		nfs_readpage_to_fscache(inode, page, 1);
+#endif
+	unlock_page(page);
+
+	return result;
+
 io_error:
 	unlock_page(page);
 	nfs_readdata_free(rdata);
@@ -181,6 +239,12 @@ static int nfs_readpage_async(struct nfs
 
 static void nfs_readpage_release(struct nfs_page *req)
 {
+#ifdef CONFIG_NFS_FSCACHE
+	struct inode *d_inode = req->wb_context->dentry->d_inode;
+
+	if (PagePrivate(req->wb_page) && PageUptodate(req->wb_page))
+		nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
+#endif
 	unlock_page(req->wb_page);
 
 	dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
@@ -477,6 +541,118 @@ void nfs_readpage_result(struct rpc_task
 	data->complete(data, status);
 }
 
+
+/*
+ * Read a page through the on-disc cache if possible
+ */
+#ifdef CONFIG_NFS_FSCACHE
+static void
+nfs_readpage_from_fscache_complete(struct page *page, void *data, int error)
+{
+	dfprintk(FSCACHE, 
+		"NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
+		page, data, error);
+
+	if (error)
+		SetPageError(page);
+	else
+		SetPageUptodate(page);
+
+	unlock_page(page);
+}
+
+static inline int
+nfs_readpage_from_fscache(struct inode *inode, struct page *page)
+{
+	int ret;
+
+	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_FSCACHE))
+		return 1;
+
+	dfprintk(FSCACHE, 
+		"NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
+		NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+
+	ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+					 page,
+					 nfs_readpage_from_fscache_complete,
+					 NULL,
+					 GFP_KERNEL);
+
+	switch (ret) {
+	case 0: /* read BIO submitted (page in fscache) */
+		dfprintk(FSCACHE, 
+			"NFS:    readpage_from_fscache: BIO submitted\n");
+		nfs_fscache_from_pages++;
+		return ret;
+
+	case -ENOBUFS: /* inode not in cache */
+	case -ENODATA: /* page not in cache */
+		dfprintk(FSCACHE, 
+			"NFS:    readpage_from_fscache error %d\n", ret);
+		return 1;
+
+	default:
+		dfprintk(FSCACHE, "NFS:    readpage_from_fscache %d\n", ret);
+		nfs_fscache_from_error = ret;
+	}
+    return ret;
+}
+
+static inline
+int nfs_getpages_from_fscache(struct inode *inode,
+	struct address_space *mapping,
+	struct list_head *pages,
+	unsigned *nr_pages)
+{
+	int ret, npages = *nr_pages;
+
+	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_FSCACHE))
+		return 1;
+
+	dfprintk(FSCACHE, 
+		"NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
+		NFS_I(inode)->fscache, *nr_pages, inode);
+
+	ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+	  	mapping, pages, nr_pages, 
+	  	nfs_readpage_from_fscache_complete,
+	  	NULL, mapping_gfp_mask(mapping));
+
+
+	switch (ret) {
+	case 0: /* read BIO submitted (page in fscache) */
+		BUG_ON(!list_empty(pages));
+		BUG_ON(*nr_pages != 0);
+		dfprintk(FSCACHE, 
+			"NFS: nfs_getpages_from_fscache: BIO submitted\n");
+
+		nfs_fscache_from_pages += npages;
+		return ret;
+
+	case -ENOBUFS: /* inode not in cache */
+	case -ENODATA: /* page not in cache */
+		dfprintk(FSCACHE, 
+			"NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
+		return 1;
+
+	default:
+		dfprintk(FSCACHE, 
+			"NFS: nfs_getpages_from_fscache: ret  %d\n", ret);
+		nfs_fscache_from_error = ret;
+	}
+
+	return ret;
+}
+#else
+static inline
+int nfs_getpages_from_fscache(struct inode *inode,
+	struct address_space *mapping,
+	struct list_head *pages,
+	unsigned *nr_pages)
+{ return 1; }
+#endif
+
 /*
  * Read a page over NFS.
  * We read the page synchronously in the following case:
@@ -510,6 +686,15 @@ int nfs_readpage(struct file *file, stru
 		ctx = get_nfs_open_context((struct nfs_open_context *)
 				file->private_data);
 	if (!IS_SYNC(inode)) {
+#ifdef CONFIG_NFS_FSCACHE
+		error = nfs_readpage_from_fscache(inode, page);
+#if 0
+		if (error < 0)
+			goto out_error;
+#endif
+		if (error == 0)
+			goto out;
+#endif
 		error = nfs_readpage_async(ctx, inode, page);
 		goto out;
 	}
@@ -540,6 +725,7 @@ readpage_async_filler(void *data, struct
 	unsigned int len;
 
 	nfs_wb_page(inode, page);
+
 	len = nfs_page_length(inode, page);
 	if (len == 0)
 		return nfs_return_empty_page(page);
@@ -571,6 +757,15 @@ int nfs_readpages(struct file *filp, str
 			(long long)NFS_FILEID(inode),
 			nr_pages);
 
+#ifdef CONFIG_NFS_FSCACHE
+	/* attempt to read as many of the pages as possible from the cache
+	 * - this returns -ENOBUFS immediately if the cookie is negative
+	 */
+	ret = nfs_getpages_from_fscache(inode, mapping, pages, &nr_pages);
+	if (ret == 0)
+		return ret; /* all read */
+#endif
+
 	if (filp == NULL) {
 		desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
 		if (desc.ctx == NULL)
@@ -613,3 +808,52 @@ void nfs_destroy_readpagecache(void)
 	if (kmem_cache_destroy(nfs_rdata_cachep))
 		printk(KERN_INFO "nfs_read_data: not all structures were freed\n");
 }
+
+#ifdef CONFIG_NFS_FSCACHE
+int nfs_invalidatepage(struct page *page, unsigned long offset)
+{
+	int ret = 1;
+
+	BUG_ON(!PageLocked(page));
+
+	if (PagePrivate(page)) {
+		struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+
+		BUG_ON(nfsi->fscache == NULL);
+
+		dfprintk(FSCACHE,
+			"NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
+			 nfsi->fscache, page, nfsi);
+
+		if (offset == 0) {
+			BUG_ON(!PageLocked(page));
+			ret = 0;
+			if (!PageWriteback(page))
+				ret = page->mapping->a_ops->releasepage(page, 0);
+		}
+	} else
+		ret = 0;
+
+	return ret;
+}
+int nfs_releasepage(struct page *page, gfp_t gfp_flags)
+{
+	struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+
+	BUG_ON(nfsi->fscache == NULL);
+
+	dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
+		 nfsi->fscache, page, nfsi);
+
+	wait_on_page_fs_misc(page);
+	fscache_uncache_page(nfsi->fscache, page);
+	nfs_fscache_uncache_page++;
+	ClearPagePrivate(page);
+	return 0;
+}
+int nfs_mkwrite(struct page *page)
+{
+	wait_on_page_fs_misc(page);
+	return 0;
+}
+#endif
--- linux-2.6.16.noarch/fs/nfs/sysctl.c.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/fs/nfs/sysctl.c	2006-04-21 00:45:11.000000000 -0400
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/nfs4.h>
 #include <linux/nfs_idmap.h>
+#include <linux/nfs_fscache.h>
 
 #include "callback.h"
 
@@ -46,6 +47,48 @@ static ctl_table nfs_cb_sysctls[] = {
 		.strategy = &sysctl_jiffies,
 	},
 #endif
+#ifdef CONFIG_NFS_FSCACHE
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "fscache_from_error",
+		.data = &nfs_fscache_from_error,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "fscache_to_error",
+		.data = &nfs_fscache_to_error,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "fscache_uncache_page",
+		.data = &nfs_fscache_uncache_page,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "fscache_to_pages",
+		.data = &nfs_fscache_to_pages,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec_minmax,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "fscache_from_pages",
+		.data = &nfs_fscache_from_pages,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+#endif
 	{ .ctl_name = 0 }
 };
 
--- linux-2.6.16.noarch/fs/Kconfig.nfs	2006-04-21 00:43:47.000000000 -0400
+++ linux-2.6.16.noarch/fs/Kconfig	2006-04-21 00:45:11.000000000 -0400
@@ -1484,6 +1484,13 @@ config NFS_V4
 
 	  If unsure, say N.
 
+config NFS_FSCACHE
+	bool "Provide NFS client caching support (EXPERIMENTAL)"
+	depends on NFS_FS && FSCACHE && EXPERIMENTAL
+	help
+	  Say Y here if you want NFS data to be cached locally on disc through
+	  the general filesystem cache manager
+
 config NFS_DIRECTIO
 	bool "Allow direct I/O on NFS files (EXPERIMENTAL)"
 	depends on NFS_FS && EXPERIMENTAL
--- linux-2.6.16.noarch/include/linux/nfs4_mount.h.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/nfs4_mount.h	2006-04-21 00:45:11.000000000 -0400
@@ -65,6 +65,7 @@ struct nfs4_mount_data {
 #define NFS4_MOUNT_NOCTO	0x0010	/* 1 */
 #define NFS4_MOUNT_NOAC		0x0020	/* 1 */
 #define NFS4_MOUNT_STRICTLOCK	0x1000	/* 1 */
+#define NFS4_MOUNT_FSCACHE	0x2000	/* 1 */
 #define NFS4_MOUNT_FLAGMASK	0xFFFF
 
 #endif
--- linux-2.6.16.noarch/include/linux/nfs_fs.h.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/nfs_fs.h	2006-04-21 00:45:11.000000000 -0400
@@ -29,6 +29,7 @@
 #include <linux/nfs_xdr.h>
 #include <linux/rwsem.h>
 #include <linux/mempool.h>
+#include <linux/fscache.h>
 
 /*
  * Enable debugging support for nfs client.
@@ -186,6 +187,9 @@ struct nfs_inode {
 	int			 delegation_state;
 	struct rw_semaphore	rwsem;
 #endif /* CONFIG_NFS_V4*/
+#ifdef CONFIG_NFS_FSCACHE
+	struct fscache_cookie	*fscache;
+#endif
 	struct inode		vfs_inode;
 };
 
@@ -631,6 +635,7 @@ extern void * nfs_root_data(void);
 #define NFSDBG_FILE		0x0040
 #define NFSDBG_ROOT		0x0080
 #define NFSDBG_CALLBACK		0x0100
+#define NFSDBG_FSCACHE		0x0200
 #define NFSDBG_ALL		0xFFFF
 
 #ifdef __KERNEL__
--- linux-2.6.16.noarch/include/linux/nfs_fs_sb.h.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/nfs_fs_sb.h	2006-04-21 00:45:11.000000000 -0400
@@ -3,6 +3,7 @@
 
 #include <linux/list.h>
 #include <linux/backing-dev.h>
+#include <linux/fscache.h>
 
 /*
  * NFS client parameters stored in the superblock.
@@ -47,6 +48,14 @@ struct nfs_server {
 						   that are supported on this
 						   filesystem */
 #endif
+
+#ifdef CONFIG_NFS_FSCACHE
+	struct fscache_cookie	*fscache;	/* cache cookie */
+	struct {
+		uint16_t size;
+		void     *buf;
+	} fsctag;
+#endif
 };
 
 /* Server capabilities */
--- linux-2.6.16.noarch/include/linux/nfs_mount.h.nfs	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/nfs_mount.h	2006-04-21 00:45:11.000000000 -0400
@@ -61,6 +61,7 @@ struct nfs_mount_data {
 #define NFS_MOUNT_NOACL		0x0800	/* 4 */
 #define NFS_MOUNT_STRICTLOCK	0x1000	/* reserved for NFSv4 */
 #define NFS_MOUNT_SECFLAVOUR	0x2000	/* 5 */
+#define NFS_MOUNT_FSCACHE	0x4000
 #define NFS_MOUNT_FLAGMASK	0xFFFF
 
 #endif
--- /dev/null	2006-04-17 00:17:38.665575608 -0400
+++ linux-2.6.16.noarch/include/linux/nfs_fscache.h	2006-04-21 00:46:19.000000000 -0400
@@ -0,0 +1,244 @@
+/* nfs_fscache.h: NFS filesystem cache interface definitions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NFS_FSCACHE_H
+#define _NFS_FSCACHE_H
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+
+#ifdef CONFIG_NFS_FSCACHE
+#include <linux/fscache.h>
+
+extern struct fscache_netfs nfs_cache_netfs;
+extern struct fscache_cookie_def nfs_cache_server_index_def;
+extern struct fscache_cookie_def nfs_fsctag_index_def;
+extern struct fscache_cookie_def nfs_cache_fh_index_def;
+extern struct fscache_cookie_def nfs4_fsctag_index_def;
+
+extern int nfs_invalidatepage(struct page *, unsigned long);
+extern int nfs_releasepage(struct page *, gfp_t);
+extern int nfs_mkwrite(struct page *);
+extern int nfs_load_fsctag(const char *tag, struct nfs_server *server);
+
+extern int nfs_fscache_to_pages;
+extern int nfs_fscache_from_pages;
+extern int nfs_fscache_uncache_page;
+extern int nfs_fscache_from_error;
+extern int nfs_fscache_to_error;
+
+static inline void
+nfs_set_fscsize(struct nfs_server *server, 
+	struct nfs_inode *nfsi, loff_t i_size)
+{
+	if (!(server->flags & NFS_MOUNT_FSCACHE))
+		return;
+
+	fscache_set_i_size(nfsi->fscache, i_size);
+
+	return;
+}
+static inline void
+nfs_renew_fscookie(struct nfs_server *server, struct nfs_inode *nfsi)
+{
+	struct fscache_cookie *old =  nfsi->fscache;
+
+	if (!(server->flags & NFS_MOUNT_FSCACHE)) {
+		nfsi->fscache = NULL;
+		return;
+	}
+
+	/* retire the current fscache cache and get a new one */
+	fscache_relinquish_cookie(nfsi->fscache, 1);
+	nfsi->fscache = fscache_acquire_cookie(server->fscache, 
+		&nfs_cache_fh_index_def, nfsi);
+	fscache_set_i_size(nfsi->fscache, nfsi->vfs_inode.i_size);
+
+	dfprintk(FSCACHE,
+		"NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
+		server, nfsi, old, nfsi->fscache);
+
+	return;
+}
+
+static inline void nfs4_fill_fscookie(struct super_block *sb)
+{
+	struct nfs_server *server = NFS_SB(sb);
+
+	if (!(server->flags & NFS4_MOUNT_FSCACHE)) {
+		server->fscache = NULL;
+		return;
+	}
+	server->fscache = NULL;
+	if (nfs_load_fsctag("mount:nfs4:fsctag", server)) {
+		/* create a cache index for looking up filehandles */
+		server->fscache = fscache_acquire_cookie(nfs_cache_netfs.primary_index,
+				&nfs4_fsctag_index_def, server);
+	}
+
+	if (server->fscache == NULL) {
+		printk(KERN_WARNING "NFS4: No Fscache cookie. Turning "
+				"Fscache off!\n");
+	} else {
+		/* reuse the NFS mount option */
+		server->flags |= NFS_MOUNT_FSCACHE;
+	}
+
+	dfprintk(FSCACHE,"NFS: nfs4 cookie (0x%p,0x%p/0x%p)\n",
+		sb, server, server->fscache);
+
+	return;
+}
+
+static inline void nfs_fill_fscookie(struct super_block *sb)
+{
+	struct nfs_server *server = NFS_SB(sb);
+
+	if (!(server->flags & NFS_MOUNT_FSCACHE)) {
+		server->fscache = NULL;
+		return;
+	}
+
+	server->fscache = NULL;
+	if (nfs_load_fsctag("mount:nfs:fsctag", server)) {
+		/* create a cache index for looking up filehandles */
+		server->fscache = fscache_acquire_cookie(nfs_cache_netfs.primary_index,
+				&nfs_fsctag_index_def, server);
+	}
+	if (server->fscache == NULL) {
+		server->flags &= ~NFS_MOUNT_FSCACHE;
+		printk(KERN_WARNING "NFS: No Fscache cookie. Turning "
+			"Fscache off!\n");
+	}
+
+	dfprintk(FSCACHE,"NFS: server cookie (0x%p/0x%p/0x%p)\n",
+		sb, server, server->fscache);
+
+	return;
+}
+
+static inline void
+nfs_fhget_fscookie(struct super_block *sb, struct nfs_inode *nfsi)
+{
+	struct nfs_server *server = NFS_SB(sb);
+
+	if (!(server->flags & NFS_MOUNT_FSCACHE)) {
+		nfsi->fscache = NULL;
+		return;
+	}
+
+	nfsi->fscache = fscache_acquire_cookie(server->fscache, 
+		&nfs_cache_fh_index_def, nfsi);
+	if (server->fscache == NULL)
+		printk(KERN_WARNING "NFS: NULL FScache cookie: "
+				"sb 0x%p nfsi 0x%p\n", sb, nfsi);
+	fscache_set_i_size(nfsi->fscache, nfsi->vfs_inode.i_size);
+
+	dfprintk(FSCACHE, "NFS: fhget new cookie (0x%p/0x%p/0x%p)\n",
+		sb, nfsi, nfsi->fscache);
+
+	return;
+}
+
+static inline void nfs_kill_fscookie(struct nfs_server *server)
+{
+	if (!(server->flags & NFS_MOUNT_FSCACHE))
+		return;
+
+	dfprintk(FSCACHE,"NFS: killing cookie (0x%p/0x%p)\n",
+		server, server->fscache);
+
+	fscache_relinquish_cookie(server->fscache, 0);
+	server->fscache = NULL;
+
+	return;
+}
+
+static inline void nfs_clear_fscookie(
+	struct nfs_server *server, struct nfs_inode *nfsi)
+{
+	if (!(server->flags & NFS_MOUNT_FSCACHE))
+		return;
+
+	dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
+			nfsi, nfsi->fscache);
+
+	fscache_relinquish_cookie(nfsi->fscache, 0);
+	nfsi->fscache = NULL;
+
+	return;
+}
+
+static inline void nfs_zap_fscookie(
+	struct nfs_server *server, struct nfs_inode *nfsi)
+{
+	if (!(server->flags & NFS_MOUNT_FSCACHE))
+		return;
+
+	dfprintk(FSCACHE,"NFS: zapping cookie (0x%p/0x%p)\n",
+		nfsi, nfsi->fscache);
+
+	fscache_relinquish_cookie(nfsi->fscache, 1);
+	nfsi->fscache = NULL;
+
+	return;
+}
+
+static inline void nfs_set_fscache(struct inode *inode, int cache_on)
+{
+	if (!cache_on && NFS_I(inode)->fscache) {
+		dfprintk(FSCACHE, 
+			"NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
+		/*
+		 * Need to invalided any mapped pages that were
+		 * read in before turning off the cache. 
+		 */
+		if (inode->i_mapping && inode->i_mapping->nrpages)
+			invalidate_inode_pages2(inode->i_mapping);
+
+		nfs_zap_fscookie(NFS_SERVER(inode), NFS_I(inode));
+	}
+
+	return;
+}
+
+static inline int nfs_register_netfs(void)
+{
+	int err;
+
+	err = fscache_register_netfs(&nfs_cache_netfs);
+
+	return err;
+}
+
+static inline void nfs_unregister_netfs(void)
+{
+	fscache_unregister_netfs(&nfs_cache_netfs);
+
+	return;
+}
+#else
+static inline void nfs_set_fscsize(struct nfs_server *server, struct nfs_inode *nfsi, loff_t i_size) {}
+static inline void nfs_fill_fscookie(struct super_block *sb) {}
+static inline void nfs_fhget_fscookie(struct super_block *sb, struct nfs_inode *nfsi) {}
+static inline void nfs4_fill_fscookie(struct super_block *sb) {}
+static inline void nfs_kill_fscookie(struct nfs_server *server) {}
+static inline void nfs_clear_fscookie(struct nfs_server *server, struct nfs_inode *nfsi) {}
+static inline void nfs_zap_fscookie(struct nfs_server *server, struct nfs_inode *nfsi) {}
+static inline void nfs_set_fscache(struct inode *inode, int cache_on) {}
+static inline void
+	nfs_renew_fscookie(struct nfs_server *server, struct nfs_inode *nfsi) {}
+static inline int nfs_register_netfs(void) { return 0; }
+static inline void nfs_unregister_netfs(void) {}
+
+#endif
+#endif /* _NFS_FSCACHE_H */

linux-2.6.16-cachefs-nspace.patch:
 fs/namespace.c            |   32 +++++++++++++++++++++++++-------
 include/linux/namespace.h |    1 +
 2 files changed, 26 insertions(+), 7 deletions(-)

--- NEW FILE linux-2.6.16-cachefs-nspace.patch ---
--- linux-2.6.16.noarch/fs/namespace.c.nspace	2006-04-07 06:05:13.000000000 -0400
+++ linux-2.6.16.noarch/fs/namespace.c	2006-04-07 06:07:34.000000000 -0400
@@ -86,6 +86,8 @@ struct vfsmount *alloc_vfsmnt(const char
 	return mnt;
 }
 
+EXPORT_SYMBOL_GPL(alloc_vfsmnt);
+
 void free_vfsmnt(struct vfsmount *mnt)
 {
 	kfree(mnt->mnt_devname);
@@ -1668,6 +1670,26 @@ out3:
 	goto out2;
 }
 
+/*
+ * initialise a namespace, rooting it at the given specified mountpoint if one
+ * is given
+ */
+void init_namespace(struct namespace *namespace, struct vfsmount *mnt)
+{
+	atomic_set(&namespace->count, 1);
+	INIT_LIST_HEAD(&namespace->list);
+	init_waitqueue_head(&namespace->poll);
+	namespace->event = 0;
+	namespace->root = mnt;
+
+	if (mnt) {
+		list_add(&mnt->mnt_list, &namespace->list);
+		mnt->mnt_namespace = namespace;
+	}
+}
+
+EXPORT_SYMBOL_GPL(init_namespace);
+
 static void __init init_mount_tree(void)
 {
 	struct vfsmount *mnt;
@@ -1680,13 +1702,7 @@ static void __init init_mount_tree(void)
 	namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
 	if (!namespace)
 		panic("Can't allocate initial namespace");
-	atomic_set(&namespace->count, 1);
-	INIT_LIST_HEAD(&namespace->list);
-	init_waitqueue_head(&namespace->poll);
-	namespace->event = 0;
-	list_add(&mnt->mnt_list, &namespace->list);
-	namespace->root = mnt;
-	mnt->mnt_namespace = namespace;
+	init_namespace(namespace, mnt);
 
 	init_task.namespace = namespace;
 	read_lock(&tasklist_lock);
@@ -1765,3 +1781,5 @@ void __put_namespace(struct namespace *n
 	release_mounts(&umount_list);
 	kfree(namespace);
 }
+
+EXPORT_SYMBOL_GPL(__put_namespace);
--- linux-2.6.16.noarch/include/linux/namespace.h.nspace	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/namespace.h	2006-04-07 06:06:12.000000000 -0400
@@ -16,6 +16,7 @@ struct namespace {
 extern int copy_namespace(int, struct task_struct *);
 extern void __put_namespace(struct namespace *namespace);
 extern struct namespace *dup_namespace(struct task_struct *, struct fs_struct *);
+extern void init_namespace(struct namespace *, struct vfsmount *);
 
 static inline void put_namespace(struct namespace *namespace)
 {

linux-2.6.16-cachefs-radix-tree.patch:
 include/linux/radix-tree.h |   15 ++++
 include/linux/sched.h      |    2 
 kernel/exit.c              |    1 
 kernel/fork.c              |    1 
 lib/radix-tree.c           |  136 +++++++++++++++++++++++++++++++++++----------
 5 files changed, 123 insertions(+), 32 deletions(-)

--- NEW FILE linux-2.6.16-cachefs-radix-tree.patch ---
--- linux-2.6.16.noarch/include/linux/radix-tree.h.radix-tree	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/radix-tree.h	2006-03-27 10:28:26.000000000 -0500
@@ -19,7 +19,6 @@
 #ifndef _LINUX_RADIX_TREE_H
 #define _LINUX_RADIX_TREE_H
 
-#include <linux/sched.h>
 #include <linux/preempt.h>
 #include <linux/types.h>
 
@@ -52,7 +51,6 @@ void *radix_tree_delete(struct radix_tre
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items);
-int radix_tree_preload(gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
 			unsigned long index, int tag);
@@ -65,6 +63,19 @@ radix_tree_gang_lookup_tag(struct radix_
 		unsigned long first_index, unsigned int max_items, int tag);
 int radix_tree_tagged(struct radix_tree_root *root, int tag);
 
+/*
+ * radix tree advance loading
+ */
+struct radix_tree_preload {
+	int count;
+	struct radix_tree_node *nodes;
+};
+
+int radix_tree_preload(unsigned int gfp_mask);
+
+extern int radix_tree_preload_task(unsigned int gfp_mask, int nitems);
+extern void radix_tree_preload_drain_task(void);
+
 static inline void radix_tree_preload_end(void)
 {
 	preempt_enable();
--- linux-2.6.16.noarch/include/linux/sched.h.radix-tree	2006-03-27 10:27:23.000000000 -0500
+++ linux-2.6.16.noarch/include/linux/sched.h	2006-03-27 10:28:26.000000000 -0500
@@ -35,6 +35,7 @@
 #include <linux/topology.h>
 #include <linux/seccomp.h>
 #include <linux/rcupdate.h>
+#include <linux/radix-tree.h>
 
 #include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */
 
@@ -852,6 +853,7 @@ struct task_struct {
 
 /* VM state */
 	struct reclaim_state *reclaim_state;
+	struct radix_tree_preload radix_preload;
 
 	struct dentry *proc_dentry;
 	struct backing_dev_info *backing_dev_info;
--- linux-2.6.16.noarch/kernel/exit.c.radix-tree	2006-03-27 10:27:23.000000000 -0500
+++ linux-2.6.16.noarch/kernel/exit.c	2006-03-27 10:28:26.000000000 -0500
@@ -66,6 +66,7 @@ void release_task(struct task_struct * p
 	struct dentry *proc_dentry;
 
 repeat: 
+	radix_tree_preload_drain_task();
 	atomic_dec(&p->user->processes);
 	spin_lock(&p->proc_lock);
 	proc_dentry = proc_pid_unhash(p);
--- linux-2.6.16.noarch/kernel/fork.c.radix-tree	2006-03-27 10:27:23.000000000 -0500
+++ linux-2.6.16.noarch/kernel/fork.c	2006-03-27 10:28:53.000000000 -0500
@@ -981,6 +981,7 @@ static task_t *copy_process(unsigned lon
 			goto bad_fork_cleanup;
 
 	p->proc_dentry = NULL;
+	memset(&p->radix_preload, 0, sizeof(p->radix_preload));
 
 	INIT_LIST_HEAD(&p->children);
 	INIT_LIST_HEAD(&p->sibling);
--- linux-2.6.16.noarch/lib/radix-tree.c.radix-tree	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.noarch/lib/radix-tree.c	2006-03-27 10:28:27.000000000 -0500
@@ -30,6 +30,7 @@
 #include <linux/gfp.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
+#include <linux/sched.h>
 
 
 #ifdef __KERNEL__
@@ -69,10 +70,6 @@ static kmem_cache_t *radix_tree_node_cac
 /*
  * Per-cpu pool of preloaded nodes
  */
-struct radix_tree_preload {
-	int nr;
-	struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
-};
 DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
 /*
@@ -89,10 +86,12 @@ radix_tree_node_alloc(struct radix_tree_
 		struct radix_tree_preload *rtp;
 
 		rtp = &__get_cpu_var(radix_tree_preloads);
-		if (rtp->nr) {
-			ret = rtp->nodes[rtp->nr - 1];
-			rtp->nodes[rtp->nr - 1] = NULL;
-			rtp->nr--;
+		ret = rtp->nodes;
+		if (ret) {
+			rtp->nodes = ret->slots[0];
+			if (rtp->nodes)
+				ret->slots[0] = NULL;
+			rtp->count--;
 		}
 	}
 	return ret;
@@ -113,28 +112,90 @@ radix_tree_node_free(struct radix_tree_n
 int radix_tree_preload(gfp_t gfp_mask)
 {
 	struct radix_tree_preload *rtp;
-	struct radix_tree_node *node;
-	int ret = -ENOMEM;
+	struct radix_tree_node *node, *sp;
+	int ret = -ENOMEM, n;
 
 	preempt_disable();
+
 	rtp = &__get_cpu_var(radix_tree_preloads);
-	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
-		preempt_enable();
-		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
-		if (node == NULL)
-			goto out;
-		preempt_disable();
-		rtp = &__get_cpu_var(radix_tree_preloads);
-		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
-			rtp->nodes[rtp->nr++] = node;
-		else
-			kmem_cache_free(radix_tree_node_cachep, node);
+
+	if (rtp->count < RADIX_TREE_MAX_PATH) {
+		/* load up from the per-task cache first */
+		n = current->radix_preload.count;
+		if (n > 0) {
+			if (RADIX_TREE_MAX_PATH - rtp->count < n)
+				n = RADIX_TREE_MAX_PATH - rtp->count;
+			current->radix_preload.count -= n;
+			rtp->count += n;
+
+			sp = current->radix_preload.nodes;
+
+			for (; n > 0; n--) {
+				node = sp;
+				sp = node->slots[0];
+				node->slots[0] = rtp->nodes;
+				rtp->nodes = node;
+			}
+
+			current->radix_preload.nodes = sp;
+		}
+
+		/* then load up from the slab */
+		while (rtp->count < RADIX_TREE_MAX_PATH) {
+			preempt_enable();
+			node = kmem_cache_alloc(radix_tree_node_cachep,
+						gfp_mask);
+			if (node == NULL)
+				goto out;
+			preempt_disable();
+			rtp = &__get_cpu_var(radix_tree_preloads);
+
+			if (rtp->count < RADIX_TREE_MAX_PATH) {
+				node->slots[0] = rtp->nodes;
+				rtp->nodes = node;
+				rtp->count++;
+			} else {
+				kmem_cache_free(radix_tree_node_cachep, node);
+			}
+		}
 	}
+
 	ret = 0;
 out:
 	return ret;
 }
 
+/*
+ * Load up an auxiliary cache with sufficient objects to ensure a number of
+ * items may be added to the radix tree
+ */
+int radix_tree_preload_task(unsigned int __nocast gfp_mask,
+			    int nitems)
+{
+	struct radix_tree_preload *rtp = &current->radix_preload;
+	struct radix_tree_node *node;
+
+	nitems *= RADIX_TREE_MAX_PATH;
+
+	while (rtp->count < nitems) {
+		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+		if (node == NULL)
+			goto nomem;
+
+		node->slots[0] = rtp->nodes;
+		rtp->nodes = node;
+		rtp->count++;
+	}
+	return 0;
+
+nomem:
+	radix_tree_preload_drain_task();
+	return -ENOMEM;
+}
+
+EXPORT_SYMBOL(radix_tree_preload_task);
+
+
 static inline void tag_set(struct radix_tree_node *node, int tag, int offset)
 {
 	__set_bit(offset, node->tags[tag]);
@@ -835,6 +896,28 @@ static __init void radix_tree_init_maxin
 		height_to_maxindex[i] = __maxindex(i);
 }
 
+/*
+ * drain a preload cache back to the slab from whence the nodes came
+ */
+static void radix_tree_preload_drain(struct radix_tree_preload *rtp)
+{
+	while (rtp->nodes) {
+		struct radix_tree_node *node = rtp->nodes;
+		rtp->nodes = node->slots[0];
+		rtp->count--;
+		kmem_cache_free(radix_tree_node_cachep, node);
+	}
+
+	BUG_ON(rtp->count != 0);
+}
+
+void radix_tree_preload_drain_task(void)
+{
+	radix_tree_preload_drain(&current->radix_preload);
+}
+
+EXPORT_SYMBOL(radix_tree_preload_drain_task);
+
 #ifdef CONFIG_HOTPLUG_CPU
 static int radix_tree_callback(struct notifier_block *nfb,
                             unsigned long action,
@@ -844,15 +927,8 @@ static int radix_tree_callback(struct no
        struct radix_tree_preload *rtp;
 
        /* Free per-cpu pool of perloaded nodes */
-       if (action == CPU_DEAD) {
-               rtp = &per_cpu(radix_tree_preloads, cpu);
-               while (rtp->nr) {
-                       kmem_cache_free(radix_tree_node_cachep,
-                                       rtp->nodes[rtp->nr-1]);
-                       rtp->nodes[rtp->nr-1] = NULL;
-                       rtp->nr--;
-               }
-       }
+       if (action == CPU_DEAD)
+               radix_tree_preload_drain(&per_cpu(radix_tree_preloads, cpu));
        return NOTIFY_OK;
 }
 #endif /* CONFIG_HOTPLUG_CPU */

linux-2.6.16-cachefs-relpage.patch:
 readahead.c |   18 ++++++++++++++++++
 1 files changed, 18 insertions(+)

--- NEW FILE linux-2.6.16-cachefs-relpage.patch ---
--- linux-2.6.16.i686/mm/readahead.c.relpage	2006-03-20 00:53:29.000000000 -0500
+++ linux-2.6.16.i686/mm/readahead.c	2006-03-24 23:00:45.000000000 -0500
@@ -39,6 +39,8 @@ file_ra_state_init(struct file_ra_state 
 	ra->prev_page = -1;
 }
 
+EXPORT_SYMBOL_GPL(file_ra_state_init);
+
 /*
  * Return max readahead size for this inode in number-of-pages.
  */
@@ -130,6 +132,12 @@ int read_cache_pages(struct address_spac
 		page = list_to_page(pages);
 		list_del(&page->lru);
 		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
+			if (PagePrivate(page) && mapping->a_ops->releasepage) {
+				page->mapping = mapping;
+				mapping->a_ops->releasepage(page, GFP_KERNEL);
+				page->mapping = NULL;
+			}
+				
 			page_cache_release(page);
 			continue;
 		}
@@ -142,6 +150,16 @@ int read_cache_pages(struct address_spac
 
 				victim = list_to_page(pages);
 				list_del(&victim->lru);
+
+				if (PagePrivate(victim) &&
+				    mapping->a_ops->releasepage
+				    ) {
+					victim->mapping = mapping;
+					mapping->a_ops->releasepage(
+						victim, GFP_KERNEL);
+					victim->mapping = NULL;
+				}
+
 				page_cache_release(victim);
 			}
 			break;

linux-2.6.16-cachefs.patch:
 Documentation/filesystems/caching/cachefs.txt |  375 +++++
 fs/Kconfig                                    |   19 
 fs/Makefile                                   |    1 
 fs/cachefs/Makefile                           |   37 
 fs/cachefs/allocator.c                        | 1382 +++++++++++++++++++++
 fs/cachefs/cachefs-debug.h                    |  132 ++
 fs/cachefs/cachefs-inode.h                    |   64 
 fs/cachefs/cachefs-int.h                      |  739 +++++++++++
 fs/cachefs/cachefs-layout.h                   |  312 ++++
 fs/cachefs/inode.c                            |  224 +++
 fs/cachefs/interface.c                        |  724 +++++++++++
 fs/cachefs/journal-replay.c                   |  485 +++++++
 fs/cachefs/journal.c                          |  483 +++++++
 fs/cachefs/kcachefsd.c                        |  280 ++++
 fs/cachefs/main.c                             |  224 +++
 fs/cachefs/meta-aops.c                        |  794 ++++++++++++
 fs/cachefs/meta-misc.c                        |  348 +++++
 fs/cachefs/operation.c                        |  580 +++++++++
 fs/cachefs/reaper.c                           |  120 +
 fs/cachefs/recycling.c                        |  966 +++++++++++++++
 fs/cachefs/rootdir.c                          |  146 ++
 fs/cachefs/status.c                           |  234 +++
 fs/cachefs/super.c                            | 1345 ++++++++++++++++++++
 fs/cachefs/tree-cull.c                        |   19 
 fs/cachefs/tree-data.c                        | 1669 ++++++++++++++++++++++++++
 fs/cachefs/tree-delete.c                      |  597 +++++++++
 fs/cachefs/tree-insert-fanout.c               | 1126 +++++++++++++++++
 fs/cachefs/tree-insert.c                      |  675 ++++++++++
 fs/cachefs/tree-keys.c                        |  587 +++++++++
 fs/cachefs/tree-list.c                        |  544 ++++++++
 fs/cachefs/tree-lookup.c                      |  598 +++++++++
 fs/cachefs/tree-misc.c                        |  346 +++++
 fs/cachefs/tree-move.c                        |  299 ++++
 fs/cachefs/tree-node.c                        |  284 ++++
 fs/cachefs/tree-scan.c                        |  972 +++++++++++++++
 fs/cachefs/tree-update.c                      |  175 ++
 36 files changed, 17905 insertions(+)

--- NEW FILE linux-2.6.16-cachefs.patch ---
--- /dev/null	2006-03-18 10:17:16.106812576 -0500
+++ linux-2.6.16.noarch/Documentation/filesystems/caching/cachefs.txt	2006-03-27 11:12:35.000000000 -0500
@@ -0,0 +1,375 @@
+			  ===========================
+			  CacheFS: Caching Filesystem
+			  ===========================
+
+========
+OVERVIEW
+========
+
+CacheFS is a backend for the general filesystem cache facility.
+
+CacheFS uses a block device directly rather than a bunch of files under an
+already mounted filesystem. For why this is so, see further on. If necessary,
+however, a file can be loopback mounted as a cache.
+
+CacheFS is based on a wandering tree approach. This means that data already on
+the disk are not changed (more or less), only replaced. This means that
+CacheFS provides both metadata integrity and data integrity. There is a small,
+simple journal that tracks the state of the tree and the block allocation
+management. Should the power be cut to a computer, or should it crash, all
+changes made to the cache since the last time the journal was cranked will be
+lost; but a valid tree will remain, albeit slightly out of date.
+
+
+========
+MOUNTING
+========
+
+Since CacheFS is actually a quasi-filesystem, it requires a block device behind
+it. The way to give it one is to mount it as cachefs type on a directory
+somewhere. The mounted filesystem will then present the user with a single file
+describing the current cache management status.
+
+There are a number of mount options that can be provided when the cache is
+mounted:
+
+ (*) -o tag=<name>
+
+     This tells FS-Cache the name by which netfs's will refer to the cache.
+     This is not strictly a necessity; if it's not given, a tag will be
+     invented based on the major and minor numbers of the block device. If the
+     netfs doesn't give FS-Cache any specific instructions, the first cache in
+     the list will be picked by default.
+
+ (*) -o wander=<n>
+
+     Set the wander timer so that CacheFS will commit the journal that long
+     after a change is made if nothing else causes the tree to wander.
+
+     n may be in the range 0 to 3600. If n is 0 then automatic wandering will
+     be disabled, otherwise it's a number of seconds. The tree is also forced
+     to wander by allocator underrun, sync and unmounting the cache.
+
+     A smaller number means that the cache will be more up to date if the power
+     fails, but that the allocator will cycle faster and blocks will be
+     replaced more often, lowering performance.
+
+ (*) -o autodel
+
+     All files should be deleted when the last reference to them is dropped.
+     This is primarily for debugging purposes.
+
+For instance, the cache might by mounted thusly:
+
+	root>mount -t cachefs /dev/hdg9 /cache-hdg9 -o tag=mycache
+	root>ls -1 /cache-hdg9
+	status
+
+However, a block device that's going to be used for a cache must be prepared
+before it can be mounted initially. This is done very simply by:
+
+	echo "cachefs___" >/dev/hdg9
+
+During the initial mount, the basic structure will be written into the cache
+and then the journal will be replayed as during a normal mount.
+
+Note that trying to mount a cache read only will result in an error.
+
+
+=============================================
+WHY A BLOCK DEVICE? WHY NOT A BUNCH OF FILES?
+=============================================
+
+CacheFS is backed by a block device rather than being backed by a bunch of
+files on a filesystem. This confers several advantages:
+
+ (1) Performance.
+
+     Going directly to a block device means that we can DMA directly to/from
+     the the netfs's pages. If another filesystem was managing the backing
+     store, everything would have to be copied between pages. Whilst DirectIO
+     does exist, it doesn't appear easy to make use of in this situation.
+
+     New address space or file operations could be added to make it possible to
+     persuade a backing diskfs to generate block I/O directly to/from disk
+     blocks under its control, but that then means the diskfs has to keep track
+     of I/O requests to pages not under its control.
+
+     Furthermore, we only have to do one lot of readahead calculations, not
+     two; in the diskfs backing case, the netfs would do one and the diskfs
+     would also do one.
+
+ (2) Memory.
+
+     Using a block device means that we have a lower memory usage - all data
+     pages belong to the netfs we're backing. If we used a filesystem, we would
+     have twice as many pages at certain points - one from the netfs and one
+     from the backing diskfs. In the backing diskfs model, under situations of
+     memory pressure, we'd have to allocate or keep around a diskfs page to be
+     able to write out a netfs page; or else we'd need to be able to punch a
+     hole in the backing file.
+
+     Furthermore, whilst we have to keep a certain amount of memory around for
+     every netfs inode we're backing, a backing diskfs would have to keep the
+     inode, dentry and possibly a file struct, in addition to FS-specific
+     stuff, thus adding to the burden.
+
+ (3) Holes.
+
+     The cache uses holes in files to indicate to the netfs that it hasn't yet
+     downloaded the data for that page.
+
+     Since CacheFS is its own filesystem, it can support holes in files
+     trivially. Running on top of another diskfs would limit us to using ones
+     that can support holes.
+
+     Furthermore, it would have to be made possible to detect holes in a diskfs
+     file, rather than just seeing zero filled blocks.
+
+ (4) Integrity
+
+     CacheFS maintains filesystem integrity through its use of a wandering
+     tree. It (for the most part) replaces blocks that need updating rather
+     than overwriting them in place. That said, certain non-structural changes
+     - such as the updating of atimes - are done in place.
+
+     CacheFS gets data integrity for free - more or less - by treating the
+     data exactly as it treats the metadata. Data blocks that need changing
+     are simply replaced. Whilst this does mean that the meta data pointing to
+     it also needs updating, quite often these changes elide between journal
+     updates.
+
+     Knowing that your cache is in a good state is vitally important if you,
+     say, put /usr on AFS. Some organisations put everything barring /etc,
+     /sbin, /lib and /var on AFS and have an enormous cache on every
+     computer. Imagine if the power goes out and renders every cache
+     inconsistent, requiring all the computers to re-initialise their caches
+     when the power comes back on...
+
+ (5) Disk Space.
+
+     Whilst the block device does set a hard ceiling on the amount of space
+     available, CacheFS can guarantee that all that space will be available to
+     the cache. On a diskfs-backed cache, the administrator would probably want
+     to set a cache size limit, but the system wouldn't be able guarantee that
+     all that space would be available to the cache - not unless that cache was
+     on a partition of its own.
+
+     Furthermore, with a diskfs-backed cache, if the recycler starts to reclaim
+     cache files to make space, the freed blocks may just be eaten directly by
+     userspace programs, potentially resulting in the entire cache being
+     consumed. Alternatively, netfs operations may end up being held up because
+     the cache can't get blocks on which to store the data.
+
+ (6) Users.
+
+     Users can't so easily go into CacheFS and run amok. The worst they can do
+     is cause bits of the cache to be recycled early. With a diskfs-backed
+     cache, they can do all sorts of bad things to the files belonging to the
+     cache, and they can do this quite by accident.
+
+
+On the other hand, there would be some advantages to using a file-based cache
+rather than a blockdev-based cache:
+
+ (1) Having to copy to a diskfs's page would mean that a netfs could just make
+     the copy and then assume its own page is ready to go.
+
+ (2) Backing onto a diskfs wouldn't require a committed block device. You would
+     just nominate a directory and go from there. With CacheFS you have to
+     repartition or install an extra drive to make use of it in an existing
+     system (though the loopback device offers a way out).
+
+ (3) You could easily make your cache bigger if the diskfs has plenty of space,
+     you could even go across multiple mountpoints. This last isn't so much of
+     a problem as you can have multiple caches.
+
+
+======================
+CACHEFS ON-DISK LAYOUT
+======================
+
+The filesystem is divided into a number of parts:
+
+  0	+---------------------------+
+	|        Superblock         |
+  1	+---------------------------+
[...17626 lines suppressed...]
+	_leave("");
+
+} /* end cachefs_scan_reaping_object() */
+
+/*****************************************************************************/
+/*
+ * wait for the reaper to dispose of the object we gave it last time
+ */
+static void cachefs_scan_waiting_for_reaper(struct cachefs_super *super)
+{
+	_enter("%x{%d},%llx",
+	       super->scan_node->bix, super->scan_node->scan_state,
+	       super->scan_reap->objid);
+
+	if (!super->reaper_target) {
+		super->scan_state = CACHEFS_SCAN_REAPING_OBJECT;
+		set_bit(CACHEFS_SUPER_DO_SCAN, &super->flags);
+	}
+
+	_leave("");
+
+} /* end cachefs_scan_waiting_for_reaper() */
--- /dev/null	2006-03-18 10:17:16.106812576 -0500
+++ linux-2.6.16.noarch/fs/cachefs/tree-update.c	2006-03-27 11:12:37.000000000 -0500
@@ -0,0 +1,175 @@
+/* tree-update.c: CacheFS indexing tree update
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+//#define __KENTER
+//#define __KLEAVE
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "cachefs-int.h"
+
+/*****************************************************************************/
+/*
+ * walk from the root of the tree to the object sliding a read lock down the
+ * tree to the parent of the specified object
+ * - the object must be resident in the tree and must be pinning the nodes on
+ *   the path through the tree
+ */
+struct cachefs_tree *cachefs_tree_slide_readlock(struct cachefs_super *super,
+						 struct cachefs_object *object)
+{
+	struct cachefs_object *xobject;
+	struct cachefs_tree *point, *branch;
+	uint16_t level, offset;
+
+	_enter("");
+
+	/* attempt to lock the node holding the object leaf directly
+	 * - lock the object to prevent the parent pointer changing whilst we
+	 *   do it
+	 */
+	read_lock(&object->lock);
+
+	if (down_read_trylock(&object->node->sem)) {
+		cachefs_tree_get(object->node);
+		read_unlock(&object->lock);
+		_leave(" = %p [fast]", object->node);
+		return object->node;
+	}
+
+	read_unlock(&object->lock);
+
+	/* walk the tree from the root looking for the object and
+	 * sliding the lock down appropriately */
+	point = cachefs_tree_get(super->metadata_tree);
+	level = 0;
+
+	down_read(&point->sem);
+
+begin_step:
+	/* extract the bits of key in which we're immediately interested */
+	offset = cachefs_extract_subkey_obj(object, level);
+
+	_debug("step %d subkey=%04x", level, offset);
+
+	/* start by checking the cached branches and shortcuts leading off of
+	 * this one
+	 */
+	read_lock(&point->lock);
+
+	xobject = cachefs_tree_find_object(point, object->offset);
+	if (xobject == object)
+		goto found_object;
+
+	branch = cachefs_tree_find_node(point, CACHEFS_TREE_TYPE_NODE, offset);
+	if (branch)
+		goto move_to_cached_branch;
+
+	branch = cachefs_tree_find_shortcut_obj(point, object);
+	if (branch)
+		goto move_to_cached_shortcut;
+
+	read_unlock(&point->lock);
+
+	/* uh oh... the object should be in the tree somewhere */
+	printk(KERN_ERR "Object missing from in-mem tree\n");
+	printk(KERN_ERR "- obj %llx node %p{%x} level %d offset %04x\n",
+	       object->objid, point, point->bix, level, offset);
+	BUG();
+
+	/* we found the object we were looking for
+	 * - return with the point node's semaphore still read-locked and a ref
+	 *   held on its usage count
+	 */
+found_object:
+	read_unlock(&point->lock);
+
+	_leave(" = %p [found]", point);
+	return point;
+
+	/* we found a suitable branch to move to in the topology cache */
+move_to_cached_shortcut:
+	_debug(">>>> skip to %p [lev %d]", branch, branch->level);
+	goto move_to_cached_branch2;
+
+move_to_cached_branch:
+	_debug(">>>> move to %p [lev %d]", branch, branch->level);
+
+move_to_cached_branch2:
+	cachefs_tree_get(branch);
+	read_unlock(&point->lock);
+
+	down_read(&branch->sem);
+	up_read(&point->sem);
+	cachefs_tree_put(point);
+
+	ASSERT(branch->level > level);
+	level = branch->level;
+	point = branch;
+	goto begin_step;
+
+} /* end cachefs_tree_slide_readlock() */
+
+/*****************************************************************************/
+/*
+ * update an object in place
+ * - called by the netfs and also used to update the object flags on disk
+ */
+void cachefs_tree_update_object(struct cachefs_super *super,
+				struct cachefs_object *object)
+{
+	struct cachefs_ondisc_leaf *leaf;
+	struct fscache_cookie *cookie;
+	struct cachefs_tree *node;
+	uint16_t maxdlen, dlen;
+	void *data, *dbuf;
+
+	_enter(",{%llx,%x}", object->objid, object->offset);
+
+	ASSERT(object->key);
+
+	node = cachefs_tree_slide_readlock(super, object);
+
+	lock_page(node->page);
+	data = kmap_atomic(node->page, KM_USER0);
+
+	/* change the atime */
+	leaf = data + object->offset;
+	leaf->u.object.atime = CURRENT_TIME.tv_sec;
+	leaf->u.object.flags = object->flags;
+
+	ASSERT(leaf->type != CACHEFS_NULL_PTR);
+
+	/* update the netfs auxilliary data */
+	cookie = object->fscache.cookie;
+	if (cookie && cookie->def->get_aux) {
+		maxdlen = CACHEFS_ONDISC_LEAF_SIZE;
+
+		maxdlen -= offsetof(struct cachefs_ondisc_leaf,
+				    u.object.netfs_data);
+		maxdlen -= leaf->u.object.netfs_klen;
+		dbuf = leaf->u.object.netfs_data;
+		dbuf += leaf->u.object.netfs_klen;
+
+		dlen = cookie->def->get_aux(cookie->netfs_data, dbuf, maxdlen);
+		BUG_ON(dlen > maxdlen);
+		leaf->u.object.netfs_dlen = dlen;
+	}
+
+	/* schedule the page to be written back */
+	kunmap_atomic(data, KM_USER0);
+	set_page_dirty(node->page);
+	unlock_page(node->page);
+	up_read(&node->sem);
+	cachefs_tree_put(node);
+	_leave("");
+
+} /* end cachefs_tree_update_object() */

linux-2.6.16-fscache.patch:
 Documentation/filesystems/caching/backend-api.txt |  334 +++++++
 Documentation/filesystems/caching/fscache.txt     |  150 +++
 Documentation/filesystems/caching/netfs-api.txt   |  726 +++++++++++++++
 fs/Kconfig                                        |   15 
 fs/Makefile                                       |    1 
 fs/fscache/Makefile                               |   13 
 fs/fscache/cookie.c                               | 1030 ++++++++++++++++++++++
 fs/fscache/fscache-int.h                          |   71 +
 fs/fscache/fsdef.c                                |  113 ++
 fs/fscache/main.c                                 |  150 +++
 fs/fscache/page.c                                 |  521 +++++++++++
 include/linux/fscache-cache.h                     |  216 ++++
 include/linux/fscache.h                           |  484 ++++++++++
 13 files changed, 3824 insertions(+)

--- NEW FILE linux-2.6.16-fscache.patch ---
--- /dev/null	2006-04-02 15:08:25.450456288 -0400
+++ linux-2.6.16.noarch/Documentation/filesystems/caching/backend-api.txt	2006-04-06 19:51:46.000000000 -0400
@@ -0,0 +1,334 @@
+			  ==========================
+			  FS-CACHE CACHE BACKEND API
+			  ==========================
+
+The FS-Cache system provides an API by which actual caches can be supplied to
+FS-Cache for it to then serve out to network filesystems and other interested
+parties.
+
+This API is declared in <linux/fscache-cache.h>.
+
+
+====================================
+INITIALISING AND REGISTERING A CACHE
+====================================
+
+To start off, a cache definition must be initialised and registered for each
+cache the backend wants to make available. For instance, CacheFS does this in
+the fill_super() operation on mounting.
+
+The cache definition (struct fscache_cache) should be initialised by calling:
+
+	void fscache_init_cache(struct fscache_cache *cache,
+				struct fscache_cache_ops *ops,
+				const char *idfmt,
+				...)
+
+Where:
+
+ (*) "cache" is a pointer to the cache definition;
+
+ (*) "ops" is a pointer to the table of operations that the backend supports on
+     this cache;
+
+ (*) and a format and printf-style arguments for constructing a label for the
+     cache.
+
+
+The cache should then be registered with FS-Cache by passing a pointer to the
+previously initialised cache definition to:
+
+	int fscache_add_cache(struct fscache_cache *cache,
+			      struct fscache_object *fsdef,
+			      const char *tagname);
+
+Two extra arguments should also be supplied:
+
+ (*) "fsdef" which should point to the object representation for the FS-Cache
+     master index in this cache. Netfs primary index entries will be created
+     here.
+
+ (*) "tagname" which, if given, should be a text string naming this cache. If
+     this is NULL, the identifier will be used instead. For CacheFS, the
+     identifier is set to name the underlying block device and the tag can be
+     supplied by mount.
+
+This function may return -ENOMEM if it ran out of memory or -EEXIST if the tag
+is already in use. 0 will be returned on success.
+
+
+=====================
+UNREGISTERING A CACHE
+=====================
+
+A cache can be withdrawn from the system by calling this function with a
+pointer to the cache definition:
+
+	void fscache_withdraw_cache(struct fscache_cache *cache)
+
+In CacheFS's case, this is called by put_super().
+
+
+==================
+FS-CACHE UTILITIES
+==================
+
+FS-Cache provides some utilities that a cache backend may make use of:
+
+ (*) Find the parent of an object:
+
+	struct fscache_object *
+	fscache_find_parent_object(struct fscache_object *object)
+
+     This allows a backend to find the logical parent of an index or data file
+     in the cache hierarchy.
+
+
+========================
+RELEVANT DATA STRUCTURES
+========================
+
+ (*) Index/Data file FS-Cache representation cookie.
+
+	struct fscache_cookie {
+		struct fscache_object_def	*def;
+		struct fscache_netfs		*netfs;
+		void				*netfs_data;
+		...
+	};
+
+     The fields that might be of use to the backend describe the object
+     definition, the netfs definition and the netfs's data for this
+     cookie. The object definition contain functions supplied by the netfs for
+     loading and matching index entries; these are required to provide some of
+     the cache operations.
+
+ (*) In-cache object representation.
+
+	struct fscache_object {
+		struct fscache_cache		*cache;
+		struct fscache_cookie		*cookie;
+		unsigned long			flags;
+	#define FSCACHE_OBJECT_RECYCLING	1
+		...
+	};
+
+     Structures of this type should be allocated by the cache backend and
+     passed to FS-Cache when requested by the appropriate cache operation. In
+     the case of CacheFS, they're embedded in CacheFS's internal object
+     structures.
+
+     Each object contains a pointer to the cookie that represents the object it
+     is backing. It also contains a flag that indicates whether this is an
+     index or not. This should be initialised by calling
+     fscache_object_init(object).
+
+
+================
+CACHE OPERATIONS
+================
+
+The cache backend provides FS-Cache with a table of operations that can be
+performed on the denizens of the cache. These are held in a structure of type
+
+	struct fscache_cache_ops
+
+ (*) Name of cache provider [mandatory].
+
+	const char *name
+
+     This isn't strictly an operation, but should be pointed at a string naming
+     the backend.
+
+ (*) Object lookup [mandatory].
+
+	struct fscache_object *(*lookup_object)(struct fscache_cache *cache,
+						struct fscache_object *parent,
+						struct fscache_cookie *cookie)
+
+     This method is used to look up an object in the specified cache, given a
+     pointer to the parent object and the cookie to which the object will be
+     attached. This should instantiate that object in the cache if it can, or
+     return -ENOBUFS or -ENOMEM if it can't.
+
+ (*) Increment object refcount [mandatory].
+
+	struct fscache_object *(*grab_object)(struct fscache_object *object)
+
+     This method is called to increment the reference count on an object. It
+     may fail (for instance if the cache is being withdrawn) by returning
+     NULL. It should return the object pointer if successful.
+
+ (*) Lock/Unlock object [mandatory].
+
+	void (*lock_object)(struct fscache_object *object)
+	void (*unlock_object)(struct fscache_object *object)
+
+     These methods are used to exclusively lock an object. It must be possible
+     to schedule with the lock held, so a spinlock isn't sufficient.
+
+ (*) Pin/Unpin object [optional].
+
+	int (*pin_object)(struct fscache_object *object)
+	void (*unpin_object)(struct fscache_object *object)
+
+     These methods are used to pin an object into the cache. Once pinned an
+     object cannot be reclaimed to make space. Return -ENOSPC if there's not
+     enough space in the cache to permit this.
+
+ (*) Update object [mandatory].
+
+	int (*update_object)(struct fscache_object *object)
+
+     This is called to update the index entry for the specified object. The new
+     information should be in object->cookie->netfs_data. This can be obtained
+     by calling object->cookie->def->get_aux()/get_attr().
+
+ (*) Release object reference [mandatory].
+
+	void (*put_object)(struct fscache_object *object)
+
+     This method is used to discard a reference to an object. The object may
+     be destroyed when all the references held by FS-Cache are released.
+
+ (*) Synchronise a cache [mandatory].
+
+	void (*sync)(struct fscache_cache *cache)
[...3476 lines suppressed...]
+		return __fscache_set_i_size(cookie, i_size);
+#endif
+	return -ENOBUFS;
+}
+
+/*****************************************************************************/
+/*
+ * reserve data space for a cached object
+ * - returns -ENOBUFS if the file is not backed
+ * - returns -ENOSPC if there isn't enough space to honour the reservation
+ * - returns 0 if okay
+ */
+#ifdef CONFIG_FSCACHE
+extern int __fscache_reserve_space(struct fscache_cookie *cookie, loff_t size);
+#endif
+
+static inline
+int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_reserve_space(cookie, size);
+#endif
+	return -ENOBUFS;
+}
+
+/*****************************************************************************/
+/*
+ * read a page from the cache or allocate a block in which to store it
+ * - if the page is not backed by a file:
+ *   - -ENOBUFS will be returned and nothing more will be done
+ * - else if the page is backed by a block in the cache:
+ *   - a read will be started which will call end_io_func on completion
+ * - else if the page is unbacked:
+ *   - a block will be allocated
+ *   - -ENODATA will be returned
+ */
+#ifdef CONFIG_FSCACHE
+extern int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+					struct page *page,
+					fscache_rw_complete_t end_io_func,
+					void *end_io_data,
+					gfp_t gfp);
+#endif
+
+static inline
+int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+			       struct page *page,
+			       fscache_rw_complete_t end_io_func,
+			       void *end_io_data,
+			       gfp_t gfp)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_read_or_alloc_page(cookie, page, end_io_func,
+						    end_io_data, gfp);
+#endif
+	return -ENOBUFS;
+}
+
+#ifdef CONFIG_FSCACHE
+extern int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+					 struct address_space *mapping,
+					 struct list_head *pages,
+					 unsigned *nr_pages,
+					 fscache_rw_complete_t end_io_func,
+					 void *end_io_data,
+					 gfp_t gfp);
+#endif
+
+static inline
+int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+				struct address_space *mapping,
+				struct list_head *pages,
+				unsigned *nr_pages,
+				fscache_rw_complete_t end_io_func,
+				void *end_io_data,
+				gfp_t gfp)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_read_or_alloc_pages(cookie, mapping, pages,
+						     nr_pages, end_io_func,
+						     end_io_data, gfp);
+#endif
+	return -ENOBUFS;
+}
+
+/*
+ * allocate a block in which to store a page
+ * - if the page is not backed by a file:
+ *   - -ENOBUFS will be returned and nothing more will be done
+ * - else
+ *   - a block will be allocated if there isn't one
+ *   - 0 will be returned
+ */
+#ifdef CONFIG_FSCACHE
+extern int __fscache_alloc_page(struct fscache_cookie *cookie,
+				struct page *page,
+				gfp_t gfp);
+#endif
+
+static inline
+int fscache_alloc_page(struct fscache_cookie *cookie,
+		       struct page *page,
+		       gfp_t gfp)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_alloc_page(cookie, page, gfp);
+#endif
+	return -ENOBUFS;
+}
+
+/*
+ * request a page be stored in the cache
+ * - this request may be ignored if no cache block is currently allocated, in
+ *   which case it:
+ *   - returns -ENOBUFS
+ * - if a cache block was already allocated:
+ *   - a BIO will be dispatched to write the page (end_io_func will be called
+ *     from the completion function)
+ *   - returns 0
+ */
+#ifdef CONFIG_FSCACHE
+extern int __fscache_write_page(struct fscache_cookie *cookie,
+				struct page *page,
+				fscache_rw_complete_t end_io_func,
+				void *end_io_data,
+				gfp_t gfp);
+
+extern int __fscache_write_pages(struct fscache_cookie *cookie,
+				 struct pagevec *pagevec,
+				 fscache_rw_complete_t end_io_func,
+				 void *end_io_data,
+				 gfp_t gfp);
+#endif
+
+static inline
+int fscache_write_page(struct fscache_cookie *cookie,
+		       struct page *page,
+		       fscache_rw_complete_t end_io_func,
+		       void *end_io_data,
+		       gfp_t gfp)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_write_page(cookie, page, end_io_func,
+					    end_io_data, gfp);
+#endif
+	return -ENOBUFS;
+}
+
+static inline
+int fscache_write_pages(struct fscache_cookie *cookie,
+			struct pagevec *pagevec,
+			fscache_rw_complete_t end_io_func,
+			void *end_io_data,
+			gfp_t gfp)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		return __fscache_write_pages(cookie, pagevec, end_io_func,
+					     end_io_data, gfp);
+#endif
+	return -ENOBUFS;
+}
+
+/*
+ * indicate that caching is no longer required on a page
+ * - note: cannot cancel any outstanding BIOs between this page and the cache
+ */
+#ifdef CONFIG_FSCACHE
+extern void __fscache_uncache_page(struct fscache_cookie *cookie,
+				   struct page *page);
+extern void __fscache_uncache_pages(struct fscache_cookie *cookie,
+				    struct pagevec *pagevec);
+#endif
+
+static inline
+void fscache_uncache_page(struct fscache_cookie *cookie,
+			  struct page *page)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		__fscache_uncache_page(cookie, page);
+#endif
+}
+
+static inline
+void fscache_uncache_pagevec(struct fscache_cookie *cookie,
+			     struct pagevec *pagevec)
+{
+#ifdef CONFIG_FSCACHE
+	if (cookie != FSCACHE_NEGATIVE_COOKIE)
+		__fscache_uncache_pages(cookie, pagevec);
+#endif
+}
+
+#endif /* _LINUX_FSCACHE_H */


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-5/kernel-2.6.spec,v
retrieving revision 1.2114
retrieving revision 1.2114.2.1
diff -u -r1.2114 -r1.2114.2.1
--- kernel-2.6.spec	10 May 2006 00:04:22 -0000	1.2114
+++ kernel-2.6.spec	10 May 2006 19:54:13 -0000	1.2114.2.1
@@ -24,7 +24,7 @@
 %define sublevel 16
 %define kversion 2.6.%{sublevel}
 %define rpmversion 2.6.%{sublevel}
-%define rhbsys  %([ -r /etc/beehive-root -o -n "%{?__beehive_build}" ] && echo || echo .`whoami`)
+%define rhbsys  %(echo .cachefs)
 %define release %(R="$Revision$"; RR="${R##: }"; echo ${RR%%?})_FC5%{rhbsys}
 %define signmodules 0
 %define make_target bzImage
@@ -414,6 +414,20 @@
 Patch5007: linux-2.6-bcm43xx-set-chan-lockup.patch
 Patch5008: linux-2.6-softmac-assoc-events.patch
 
+# Cachefs Bits
+Patch7000: linux-2.6.16-cachefs-fsmisc.patch
+Patch7001: linux-2.6.16-cachefs-mkwrite.patch
+Patch7002: linux-2.6.16-cachefs-relpage.patch
+Patch7003: linux-2.6.16-cachefs-radix-tree.patch
+Patch7004: linux-2.6.16-cachefs-nspace.patch
+Patch7005: linux-2.6.16-cachefs-filp.patch
+Patch7006: linux-2.6.16-cachefs-misc.patch
+Patch7007: linux-2.6.16-fscache.patch
+Patch7008: linux-2.6.16-cachefs-nfs.patch
+Patch7009: linux-2.6.16-cachefs-afs.patch
+Patch7010: linux-2.6.16-cachefs.patch
+Patch7011: linux-2.6.16-cachefiles.patch
+
 #
 # 10000 to 20000 is for stuff that has to come last due to the
 # amount of drivers they touch. But only these should go here.
@@ -1072,6 +1086,20 @@
 # Send standard events on {dis,}association
 %patch5008 -p1
 
+# Cachefs bits
+%patch7000 -p1
+%patch7001 -p1
+%patch7002 -p1
+%patch7003 -p1
+%patch7004 -p1
+%patch7005 -p1
+%patch7006 -p1
+%patch7007 -p1
+%patch7008 -p1
+%patch7009 -p1
+%patch7010 -p1
+%patch7011 -p1
+
 #
 # final stuff
 #
@@ -1684,6 +1712,9 @@
 %endif
 
 %changelog
+* Tue May  10 2006 Steve Dickson <steved at redhat.com>
+- Added cachefs bits to private branch
+
 * Tue May  9 2006 Dave Jones <davej at redhat.com>
 - 2.6.16.15
 - Fix exec-shield default, which should fix a few programs that




More information about the fedora-cvs-commits mailing list