
From: "Ashok Raj" <ashok.raj@intel.com>

This patch changes __init to __devinit to init_idle so that when a new cpu
arrives, it can call these functions at a later time.


---

 25-akpm/init/main.c    |    2 +-
 25-akpm/kernel/sched.c |    2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff -puN init/main.c~ia64-cpu-hotplug-core_kernel_init init/main.c
--- 25/init/main.c~ia64-cpu-hotplug-core_kernel_init	2004-04-25 22:23:09.559212664 -0700
+++ 25-akpm/init/main.c	2004-04-25 22:23:09.585208712 -0700
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(loops_per_jiffy);
    better than 1% */
 #define LPS_PREC 8
 
-void __init calibrate_delay(void)
+void __devinit calibrate_delay(void)
 {
 	unsigned long ticks, loopbit;
 	int lps_precision = LPS_PREC;
diff -puN kernel/sched.c~ia64-cpu-hotplug-core_kernel_init kernel/sched.c
--- 25/kernel/sched.c~ia64-cpu-hotplug-core_kernel_init	2004-04-25 22:23:09.561212360 -0700
+++ 25-akpm/kernel/sched.c	2004-04-25 22:23:09.585208712 -0700
@@ -3389,7 +3389,7 @@ void show_state(void)
 	read_unlock(&tasklist_lock);
 }
 
-void __init init_idle(task_t *idle, int cpu)
+void __devinit init_idle(task_t *idle, int cpu)
 {
 	runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
 	unsigned long flags;

_
