aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds2006-12-06 08:01:37 -0800
committerLinus Torvalds2006-12-06 08:01:37 -0800
commitdd8856bda5f1308beb113281b248683992998a9e (patch)
tree5dc35290cdbca32cbdecd93a76fa5b29075ac18c /mm
parentf81cff0d4067e41fd7383d9c013cc82da7c169d2 (diff)
parent06328b4f7919e9d2169d45cadc5a37b828a78eda (diff)
Merge git://git.infradead.org/users/dhowells/workq-2.6
* git://git.infradead.org/users/dhowells/workq-2.6: Actually update the fixed up compile failures. WorkQueue: Fix up arch-specific work items where possible WorkStruct: make allyesconfig WorkStruct: Pass the work_struct pointer instead of context data WorkStruct: Merge the pending bit into the wq_data pointer WorkStruct: Typedef the work function prototype WorkStruct: Separate delayable and non-delayable events.
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c12
-rw-r--r--mm/swap.c4
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34eddc..5de81473df34 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
/*
* This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@ int slab_is_available(void)
return g_cpucache_up == FULL;
}
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
*/
static void __devinit start_cpu_timer(int cpu)
{
- struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
- if (keventd_up() && reap_work->func == NULL) {
+ if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
- INIT_WORK(reap_work, cache_reap, NULL);
+ INIT_DELAYED_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871f542f..d9a3770d8f3c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@ void lru_add_drain(void)
}
#ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
*/
int lru_add_drain_all(void)
{
- return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+ return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
#else