[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[PATCH 16/19] introduce task_utrace_lock/task_utrace_unlock



- Add task_utrace_lock(task). It simply takes task->utrace->lock if
  this task was ever utraced. Otherwise it takes task_lock(), this
  serializes with utrace_attach_task()->utrace_task_alloc(). In both
  case the caller can be sure it can't race with anything which needs
  utrace->lock.

- Add task_utrace_unlock(task), it releases the corresponding lock.

Signed-off-by: Oleg Nesterov <oleg redhat com>
---
 include/linux/utrace.h |    9 +++++++++
 kernel/utrace.c        |   26 ++++++++++++++++++++++++++
 2 files changed, 35 insertions(+), 0 deletions(-)

diff --git a/include/linux/utrace.h b/include/linux/utrace.h
index f251efe..5176f5f 100644
--- a/include/linux/utrace.h
+++ b/include/linux/utrace.h
@@ -109,6 +109,12 @@ void utrace_signal_handler(struct task_struct *, int);
 
 #ifndef CONFIG_UTRACE
 
+static inline void task_utrace_lock(struct task_struct *task)
+{
+}
+static inline void task_utrace_unlock(struct task_struct *task)
+{
+}
 /*
  * <linux/tracehook.h> uses these accessors to avoid #ifdef CONFIG_UTRACE.
  */
@@ -131,6 +137,9 @@ static inline void task_utrace_proc_status(struct seq_file *m,
 
 #else  /* CONFIG_UTRACE */
 
+extern void task_utrace_lock(struct task_struct *task);
+extern void task_utrace_unlock(struct task_struct *task);
+
 static inline unsigned long task_utrace_flags(struct task_struct *task)
 {
 	return task->utrace_flags;
diff --git a/kernel/utrace.c b/kernel/utrace.c
index a824ac3..508c13c 100644
--- a/kernel/utrace.c
+++ b/kernel/utrace.c
@@ -79,6 +79,32 @@ static int __init utrace_init(void)
 }
 module_init(utrace_init);
 
+void task_utrace_lock(struct task_struct *task)
+{
+	struct utrace *utrace = task_utrace_struct(task);
+
+	if (!utrace) {
+		task_lock(task);
+		utrace = task_utrace_struct(task);
+		if (!utrace)
+			return;
+
+		task_unlock(task);
+	}
+
+	spin_lock(&utrace->lock);
+}
+
+void task_utrace_unlock(struct task_struct *task)
+{
+	struct utrace *utrace = task_utrace_struct(task);
+
+	if (utrace)
+		spin_unlock(&utrace->lock);
+	else
+		task_unlock(task);
+}
+
 /*
  * Set up @task.utrace for the first time.  We can have races
  * between two utrace_attach_task() calls here.  The task_lock()
-- 
1.5.5.1



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]