[Cluster-devel] cluster/gfs-kernel/src/gfs main.c ops_export.c ...

teigland at sourceware.org teigland at sourceware.org
Mon Jul 23 16:41:46 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2007-07-23 16:41:46

Modified files:
	gfs-kernel/src/gfs: main.c ops_export.c ops_vm.c 

Log message:
	Brute-force porting to 2.6.23-rc1.  There are non-trivial changes for
	which I just copied what had been done to gfs2 without investigating
	whether gfs1 needs something different.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/main.c.diff?cvsroot=cluster&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_export.c.diff?cvsroot=cluster&r1=1.11&r2=1.12
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_vm.c.diff?cvsroot=cluster&r1=1.7&r2=1.8

--- cluster/gfs-kernel/src/gfs/main.c	2006/07/18 20:48:20	1.9
+++ cluster/gfs-kernel/src/gfs/main.c	2007/07/23 16:41:46	1.10
@@ -50,7 +50,7 @@
 
 	gfs_glock_cachep = kmem_cache_create("gfs_glock", sizeof(struct gfs_glock),
 					     0, 0,
-					     NULL, NULL);
+					     NULL);
 	gfs_inode_cachep = NULL;
 	gfs_bufdata_cachep = NULL;
 	gfs_mhc_cachep = NULL;
@@ -60,19 +60,19 @@
 
 	gfs_inode_cachep = kmem_cache_create("gfs_inode", sizeof(struct gfs_inode),
 					     0, 0,
-					     NULL, NULL);
+					     NULL);
 	if (!gfs_inode_cachep)
 		goto fail1;
 
 	gfs_bufdata_cachep = kmem_cache_create("gfs_bufdata", sizeof(struct gfs_bufdata),
 					       0, 0,
-					       NULL, NULL);
+					       NULL);
 	if (!gfs_bufdata_cachep)
 		goto fail1;
 
 	gfs_mhc_cachep = kmem_cache_create("gfs_meta_header_cache", sizeof(struct gfs_meta_header_cache),
 					   0, 0,
-					   NULL, NULL);
+					   NULL);
 	if (!gfs_mhc_cachep)
 		goto fail;
 
--- cluster/gfs-kernel/src/gfs/ops_export.c	2007/06/05 18:15:51	1.11
+++ cluster/gfs-kernel/src/gfs/ops_export.c	2007/07/23 16:41:46	1.12
@@ -18,6 +18,7 @@
 #include <asm/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 
 #include "gfs.h"
 #include "dio.h"
--- cluster/gfs-kernel/src/gfs/ops_vm.c	2006/07/10 23:22:34	1.7
+++ cluster/gfs-kernel/src/gfs/ops_vm.c	2007/07/23 16:41:46	1.8
@@ -13,7 +13,6 @@
 
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/spinlock.h>
 #include <asm/semaphore.h>
 #include <linux/completion.h>
@@ -53,7 +52,7 @@
 }
 
 /**
- * gfs_private_nopage -
+ * gfs_private_fault -
  * @area:
  * @address:
  * @type:
@@ -61,31 +60,29 @@
  * Returns: the page
  */
 
-static struct page *
-gfs_private_nopage(struct vm_area_struct *area,
-		   unsigned long address, int *type)
+static int gfs_private_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct gfs_inode *ip = get_v2ip(area->vm_file->f_mapping->host);
+	struct gfs_inode *ip = get_v2ip(vma->vm_file->f_mapping->host);
 	struct gfs_holder i_gh;
-	struct page *result;
 	int error;
+	int ret = 0;
 
 	atomic_inc(&ip->i_sbd->sd_ops_vm);
 
 	error = gfs_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 	if (error)
-		return NULL;
+		goto out;
 
 	set_bit(GIF_PAGED, &ip->i_flags);
 
-	result = filemap_nopage(area, address, type);
+	ret = filemap_fault(vma, vmf);
 
-	if (result && result != NOPAGE_OOM)
+	if (ret && ret != VM_FAULT_OOM)
 		pfault_be_greedy(ip);
 
 	gfs_glock_dq_uninit(&i_gh);
-
-	return result;
+ out:
+	return ret;
 }
 
 /**
@@ -170,7 +167,7 @@
 }
 
 /**
- * gfs_sharewrite_nopage -
+ * gfs_sharewrite_fault -
  * @area:
  * @address:
  * @type:
@@ -178,61 +175,72 @@
  * Returns: the page
  */
 
-static struct page *
-gfs_sharewrite_nopage(struct vm_area_struct *area,
-		      unsigned long address, int *type)
+static int gfs_sharewrite_fault(struct vm_area_struct *vma,
+				struct vm_fault *vmf)
 {
-	struct gfs_inode *ip = get_v2ip(area->vm_file->f_mapping->host);
+	struct file *file = vma->vm_file;
+	struct gfs_file *gf = file->private_data;
+	struct gfs_inode *ip = get_v2ip(vma->vm_file->f_mapping->host);
 	struct gfs_holder i_gh;
-	struct page *result = NULL;
-	unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
 	int alloc_required;
 	int error;
+	int ret = 0;
 
 	atomic_inc(&ip->i_sbd->sd_ops_vm);
 
 	error = gfs_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 	if (error)
-		return NULL;
+		goto out;
 
 	if (gfs_is_jdata(ip))
-		goto out;
+		goto out_unlock;
 
 	set_bit(GIF_PAGED, &ip->i_flags);
 	set_bit(GIF_SW_PAGED, &ip->i_flags);
 
-	error = gfs_write_alloc_required(ip, (uint64_t)index << PAGE_CACHE_SHIFT,
+	error = gfs_write_alloc_required(ip,
+					 (u64)vmf->pgoff << PAGE_CACHE_SHIFT,
 					 PAGE_CACHE_SIZE, &alloc_required);
-	if (error)
-		goto out;
+	if (error) {
+		ret = VM_FAULT_OOM; /* XXX: are these right? */
+		goto out_unlock;
+	}
 
-	result = filemap_nopage(area, address, type);
-	if (!result || result == NOPAGE_OOM)
-		goto out;
+	ret = filemap_fault(vma, vmf);
+	if (ret & VM_FAULT_ERROR)
+		goto out_unlock;
 
 	if (alloc_required) {
-		error = alloc_page_backing(ip, index);
+		/* XXX: do we need to drop page lock around alloc_page_backing?*/
+		error = alloc_page_backing(ip, vmf->page);
 		if (error) {
-			page_cache_release(result);
-			result = NULL;
-			goto out;
+                        /*
+                         * VM_FAULT_LOCKED should always be the case for
+                         * filemap_fault, but it may not be in a future
+                         * implementation.
+                         */
+			if (ret & VM_FAULT_LOCKED)
+				unlock_page(vmf->page);
+			page_cache_release(vmf->page);
+			ret = VM_FAULT_OOM;
+			goto out_unlock;
 		}
-		set_page_dirty(result);
+		set_page_dirty(vmf->page);
 	}
 
 	pfault_be_greedy(ip);
 
- out:
+ out_unlock:
 	gfs_glock_dq_uninit(&i_gh);
-
-	return result;
+ out:
+	return ret;
 }
 
 struct vm_operations_struct gfs_vm_ops_private = {
-	.nopage = gfs_private_nopage,
+	.fault = gfs_private_fault,
 };
 
 struct vm_operations_struct gfs_vm_ops_sharewrite = {
-	.nopage = gfs_sharewrite_nopage,
+	.fault = gfs_sharewrite_fault,
 };
 




More information about the Cluster-devel mailing list