[PATCH 09/10] audit: Allocate fsnotify mark independently of chunk

Jan Kara jack at suse.cz
Tue Jul 10 10:02:15 UTC 2018


Allocate fsnotify mark independently instead of embedding it inside
chunk. This will allow us to just replace chunk attached to mark when
growing / shrinking chunk instead of replacing mark attached to inode
which is a more complex operation.

Signed-off-by: Jan Kara <jack at suse.cz>
---
 kernel/audit_tree.c | 59 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 45 insertions(+), 14 deletions(-)

diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index bce3b04a365d..aec9b27a20ff 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -25,7 +25,7 @@ struct audit_tree {
 struct audit_chunk {
 	struct list_head hash;
 	unsigned long key;
-	struct fsnotify_mark mark;
+	struct fsnotify_mark *mark;
 	struct list_head trees;		/* with root here */
 	int dead;
 	int count;
@@ -38,6 +38,11 @@ struct audit_chunk {
 	} owners[];
 };
 
+struct audit_tree_mark {
+	struct fsnotify_mark fsn_mark;
+	struct audit_chunk *chunk;
+};
+
 static LIST_HEAD(tree_list);
 static LIST_HEAD(prune_list);
 static struct task_struct *prune_thread;
@@ -73,6 +78,7 @@ static struct task_struct *prune_thread;
  */
 
 static struct fsnotify_group *audit_tree_group;
+static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
 
 static struct audit_tree *alloc_tree(const char *s)
 {
@@ -142,10 +148,28 @@ static void audit_mark_put_chunk(struct audit_chunk *chunk)
 	call_rcu(&chunk->head, __put_chunk);
 }
 
+static inline struct audit_tree_mark *AUDIT_M(struct fsnotify_mark *entry)
+{
+	return container_of(entry, struct audit_tree_mark, fsn_mark);
+}
+
 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 {
-	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+	struct audit_chunk *chunk = AUDIT_M(entry)->chunk;
 	audit_mark_put_chunk(chunk);
+	kmem_cache_free(audit_tree_mark_cachep, entry);
+}
+
+static struct fsnotify_mark *alloc_fsnotify_mark(void)
+{
+	struct audit_tree_mark *mark;
+
+	mark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
+	if (!mark)
+		return NULL;
+	fsnotify_init_mark(&mark->fsn_mark, audit_tree_group);
+	mark->fsn_mark.mask = FS_IN_IGNORED;
+	return &mark->fsn_mark;
 }
 
 static struct audit_chunk *alloc_chunk(int count)
@@ -159,6 +183,13 @@ static struct audit_chunk *alloc_chunk(int count)
 	if (!chunk)
 		return NULL;
 
+	chunk->mark = alloc_fsnotify_mark();
+	if (!chunk->mark) {
+		kfree(chunk);
+		return NULL;
+	}
+	AUDIT_M(chunk->mark)->chunk = chunk;
+
 	INIT_LIST_HEAD(&chunk->hash);
 	INIT_LIST_HEAD(&chunk->trees);
 	chunk->count = count;
@@ -167,8 +198,6 @@ static struct audit_chunk *alloc_chunk(int count)
 		INIT_LIST_HEAD(&chunk->owners[i].list);
 		chunk->owners[i].index = i;
 	}
-	fsnotify_init_mark(&chunk->mark, audit_tree_group);
-	chunk->mark.mask = FS_IN_IGNORED;
 	return chunk;
 }
 
@@ -278,7 +307,7 @@ static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old,
 static void untag_chunk(struct node *p)
 {
 	struct audit_chunk *chunk = find_chunk(p);
-	struct fsnotify_mark *entry = &chunk->mark;
+	struct fsnotify_mark *entry = chunk->mark;
 	struct audit_chunk *new = NULL;
 	struct audit_tree *owner;
 	int size = chunk->count - 1;
@@ -298,7 +327,7 @@ static void untag_chunk(struct node *p)
 	if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 		mutex_unlock(&entry->group->mark_mutex);
 		if (new)
-			fsnotify_put_mark(&new->mark);
+			fsnotify_put_mark(new->mark);
 		goto out;
 	}
 
@@ -322,9 +351,9 @@ static void untag_chunk(struct node *p)
 	if (!new)
 		goto Fallback;
 
-	if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
+	if (fsnotify_add_mark_locked(new->mark, entry->connector->obj,
 				     FSNOTIFY_OBJ_TYPE_INODE, 1)) {
-		fsnotify_put_mark(&new->mark);
+		fsnotify_put_mark(new->mark);
 		goto Fallback;
 	}
 
@@ -344,7 +373,7 @@ static void untag_chunk(struct node *p)
 	fsnotify_detach_mark(entry);
 	mutex_unlock(&entry->group->mark_mutex);
 	fsnotify_free_mark(entry);
-	fsnotify_put_mark(&new->mark);	/* drop initial reference */
+	fsnotify_put_mark(new->mark);	/* drop initial reference */
 	goto out;
 
 Fallback:
@@ -375,7 +404,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
 		return -ENOMEM;
 	}
 
-	entry = &chunk->mark;
+	entry = chunk->mark;
 	if (fsnotify_add_inode_mark_locked(entry, inode, 0)) {
 		mutex_unlock(&audit_tree_group->mark_mutex);
 		fsnotify_put_mark(entry);
@@ -426,7 +455,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 	if (!old_entry)
 		return create_chunk(inode, tree);
 
-	old = container_of(old_entry, struct audit_chunk, mark);
+	old = AUDIT_M(old_entry)->chunk;
 
 	/* are we already there? */
 	spin_lock(&hash_lock);
@@ -447,7 +476,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 		return -ENOMEM;
 	}
 
-	chunk_entry = &chunk->mark;
+	chunk_entry = chunk->mark;
 
 	/*
 	 * mark_mutex protects mark from getting detached and thus also from
@@ -457,7 +486,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 		/* old_entry is being shot, lets just lie */
 		mutex_unlock(&audit_tree_group->mark_mutex);
 		fsnotify_put_mark(old_entry);
-		fsnotify_put_mark(&chunk->mark);
+		fsnotify_put_mark(chunk->mark);
 		return -ENOENT;
 	}
 
@@ -1009,7 +1038,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
 
 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
 {
-	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+	struct audit_chunk *chunk = AUDIT_M(entry)->chunk;
 
 	evict_chunk(chunk);
 
@@ -1030,6 +1059,8 @@ static int __init audit_tree_init(void)
 {
 	int i;
 
+	audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
+
 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
 	if (IS_ERR(audit_tree_group))
 		audit_panic("cannot initialize fsnotify group for rectree watches");
-- 
2.16.4




More information about the Linux-audit mailing list