[Cluster-devel] [PATCH 02/11] gfs2: Move rs_{sizehint, rgd_gh} fields into the inode

Steven Whitehouse swhiteho at redhat.com
Mon Oct 8 10:34:42 UTC 2018


Hi,


On 05/10/18 20:18, Andreas Gruenbacher wrote:
> Move the rs_sizehint and rs_rgd_gh fields from struct gfs2_blkreserv
> into the inode: they are more closely related to the inode than to a
> particular reservation.
Yes, that makes sense I think - these fields have moved around a bit 
during the discussions on getting this code right. The only real issue 
here is that the gh is quite large, which was why it was separate in the 
first place, but it probably makes more sense to do it this way,

Steve.

> Signed-off-by: Andreas Gruenbacher <agruenba at redhat.com>
> ---
>   fs/gfs2/file.c   |  4 ++--
>   fs/gfs2/incore.h |  6 ++----
>   fs/gfs2/main.c   |  2 ++
>   fs/gfs2/rgrp.c   | 16 +++++++---------
>   4 files changed, 13 insertions(+), 15 deletions(-)
>
> diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
> index 08369c6cd127..e8864ff2ed03 100644
> --- a/fs/gfs2/file.c
> +++ b/fs/gfs2/file.c
> @@ -347,8 +347,8 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
>   	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
>   	int hint = min_t(size_t, INT_MAX, blks);
>   
> -	if (hint > atomic_read(&ip->i_res.rs_sizehint))
> -		atomic_set(&ip->i_res.rs_sizehint, hint);
> +	if (hint > atomic_read(&ip->i_sizehint))
> +		atomic_set(&ip->i_sizehint, hint);
>   }
>   
>   /**
> diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
> index b96d39c28e17..9d7d9bd8c3a9 100644
> --- a/fs/gfs2/incore.h
> +++ b/fs/gfs2/incore.h
> @@ -309,10 +309,6 @@ struct gfs2_qadata { /* quota allocation data */
>   */
>   
>   struct gfs2_blkreserv {
> -	/* components used during write (step 1): */
> -	atomic_t rs_sizehint;         /* hint of the write size */
> -
> -	struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
>   	struct rb_node rs_node;       /* link to other block reservations */
>   	struct gfs2_rbm rs_rbm;       /* Start of reservation */
>   	u32 rs_free;                  /* how many blocks are still free */
> @@ -417,8 +413,10 @@ struct gfs2_inode {
>   	struct gfs2_holder i_iopen_gh;
>   	struct gfs2_holder i_gh; /* for prepare/commit_write only */
>   	struct gfs2_qadata *i_qadata; /* quota allocation data */
> +	struct gfs2_holder i_rgd_gh;
>   	struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
>   	u64 i_goal;	/* goal block for allocations */
> +	atomic_t i_sizehint;  /* hint of the write size */
>   	struct rw_semaphore i_rw_mutex;
>   	struct list_head i_ordered;
>   	struct list_head i_trunc_list;
> diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
> index 2d55e2c3333c..c7603063f861 100644
> --- a/fs/gfs2/main.c
> +++ b/fs/gfs2/main.c
> @@ -39,9 +39,11 @@ static void gfs2_init_inode_once(void *foo)
>   	struct gfs2_inode *ip = foo;
>   
>   	inode_init_once(&ip->i_inode);
> +	atomic_set(&ip->i_sizehint, 0);
>   	init_rwsem(&ip->i_rw_mutex);
>   	INIT_LIST_HEAD(&ip->i_trunc_list);
>   	ip->i_qadata = NULL;
> +	gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
>   	memset(&ip->i_res, 0, sizeof(ip->i_res));
>   	RB_CLEAR_NODE(&ip->i_res.rs_node);
>   	ip->i_hash_cache = NULL;
> diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
> index c9caddc2627c..34122c546576 100644
> --- a/fs/gfs2/rgrp.c
> +++ b/fs/gfs2/rgrp.c
> @@ -1564,7 +1564,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
>   	if (S_ISDIR(inode->i_mode))
>   		extlen = 1;
>   	else {
> -		extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
> +		extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
>   		extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
>   	}
>   	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
> @@ -2076,7 +2076,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
>   			}
>   			error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
>   						   LM_ST_EXCLUSIVE, flags,
> -						   &rs->rs_rgd_gh);
> +						   &ip->i_rgd_gh);
>   			if (unlikely(error))
>   				return error;
>   			if (!gfs2_rs_active(rs) && (loops < 2) &&
> @@ -2085,7 +2085,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
>   			if (sdp->sd_args.ar_rgrplvb) {
>   				error = update_rgrp_lvb(rs->rs_rbm.rgd);
>   				if (unlikely(error)) {
> -					gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
> +					gfs2_glock_dq_uninit(&ip->i_rgd_gh);
>   					return error;
>   				}
>   			}
> @@ -2128,7 +2128,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
>   
>   		/* Unlock rgrp if required */
>   		if (!rg_locked)
> -			gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
> +			gfs2_glock_dq_uninit(&ip->i_rgd_gh);
>   next_rgrp:
>   		/* Find the next rgrp, and continue looking */
>   		if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
> @@ -2165,10 +2165,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
>   
>   void gfs2_inplace_release(struct gfs2_inode *ip)
>   {
> -	struct gfs2_blkreserv *rs = &ip->i_res;
> -
> -	if (gfs2_holder_initialized(&rs->rs_rgd_gh))
> -		gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
> +	if (gfs2_holder_initialized(&ip->i_rgd_gh))
> +		gfs2_glock_dq_uninit(&ip->i_rgd_gh);
>   }
>   
>   /**
> @@ -2326,7 +2324,7 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
>   				goto out;
>   			/* We used up our block reservation, so we should
>   			   reserve more blocks next time. */
> -			atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
> +			atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
>   		}
>   		__rs_deltree(rs);
>   	}




More information about the Cluster-devel mailing list