aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra2007-10-16 23:25:50 -0700
committerLinus Torvalds2007-10-17 08:42:45 -0700
commit3e26c149c358529b1605f8959341d34bc4b880a3 (patch)
tree9d173b1753b86bcf03a8591e2509e3162234447c /kernel
parent04fbfdc14e5f48463820d6b9807daa5e9c92c51f (diff)
mm: dirty balancing for tasks
Based on ideas of Andrew: http://marc.info/?l=linux-kernel&m=102912915020543&w=2 Scale the bdi dirty limit inversly with the tasks dirty rate. This makes heavy writers have a lower dirty limit than the occasional writer. Andrea proposed something similar: http://lwn.net/Articles/152277/ The main disadvantage to his patch is that he uses an unrelated quantity to measure time, which leaves him with a workload dependant tunable. Other than that the two approaches appear quite similar. [akpm@linux-foundation.org: fix warning] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 3fc3c1383912..163325af8179 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
+ prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ int err;
prepare_to_copy(orig);
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->stack = ti;
+
+ err = prop_local_init_single(&tsk->dirties);
+ if (err) {
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
+ }
+
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR