4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
21 #define __KERNEL_SYSCALLS__
23 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/smp.h>
30 #include <linux/kernel_stat.h>
32 #include <linux/cache.h>
33 #include <linux/delay.h>
34 #include <linux/cache.h>
35 #include <linux/efi.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
39 #include <asm/current.h>
40 #include <asm/delay.h>
41 #include <asm/machvec.h>
45 #include <asm/pgalloc.h>
46 #include <asm/pgtable.h>
47 #include <asm/processor.h>
48 #include <asm/ptrace.h>
50 #include <asm/system.h>
51 #include <asm/tlbflush.h>
52 #include <asm/unistd.h>
56 * Structure and data for smp_call_function(). This is designed to minimise static memory
57 * requirements. It also looks cleaner.
59 static spinlock_t call_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
61 struct call_data_struct {
62 void (*func) (void *info);
69 static volatile struct call_data_struct *call_data;
71 #define IPI_CALL_FUNC 0
72 #define IPI_CPU_STOP 1
74 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
75 static DEFINE_PER_CPU(__u64, ipi_operation) ____cacheline_aligned;
80 extern void cpu_halt (void);
84 clear_bit(smp_processor_id(), &cpu_online_map);
91 handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
93 int this_cpu = get_cpu();
94 unsigned long *pending_ipis = &__get_cpu_var(ipi_operation);
97 /* Count this now; we may make a call that never returns. */
98 local_cpu_data->ipi_count++;
100 mb(); /* Order interrupt and bit testing. */
101 while ((ops = xchg(pending_ipis, 0)) != 0) {
102 mb(); /* Order bit clearing and data access. */
107 ops &= ~(1 << which);
112 struct call_data_struct *data;
113 void (*func)(void *info);
117 /* release the 'pointer lock' */
118 data = (struct call_data_struct *) call_data;
124 atomic_inc(&data->started);
126 * At this point the structure may be gone unless
131 /* Notify the sending CPU that the task is done. */
134 atomic_inc(&data->finished);
143 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
147 mb(); /* Order data access and bit testing. */
154 * Called with preeemption disabled.
157 send_IPI_single (int dest_cpu, int op)
159 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
160 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
164 * Called with preeemption disabled.
167 send_IPI_allbutself (int op)
171 for (i = 0; i < NR_CPUS; i++) {
172 if (cpu_online(i) && i != smp_processor_id())
173 send_IPI_single(i, op);
178 * Called with preeemption disabled.
181 send_IPI_all (int op)
185 for (i = 0; i < NR_CPUS; i++)
187 send_IPI_single(i, op);
191 * Called with preeemption disabled.
194 send_IPI_self (int op)
196 send_IPI_single(smp_processor_id(), op);
200 * Called with preeemption disabled.
203 smp_send_reschedule (int cpu)
205 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
209 * This function sends a reschedule IPI to all (other) CPUs. This should only be used if
210 * some 'global' task became runnable, such as a RT task, that must be handled now. The
211 * first CPU that manages to grab the task will run it.
214 smp_send_reschedule_all (void)
217 int cpu = get_cpu(); /* disable preemption */
219 for (i = 0; i < NR_CPUS; i++)
220 if (cpu_online(i) && i != cpu)
221 smp_send_reschedule(i);
227 smp_flush_tlb_all (void)
229 on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
233 smp_flush_tlb_mm (struct mm_struct *mm)
235 /* this happens for the common case of a single-threaded fork(): */
236 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
238 local_finish_flush_tlb_mm(mm);
243 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
244 * have been running in the address space. It's not clear that this is worth the
245 * trouble though: to avoid races, we have to raise the IPI on the target CPU
246 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
249 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
253 * Run a function on another CPU
254 * <func> The function to run. This must be fast and non-blocking.
255 * <info> An arbitrary pointer to pass to the function.
256 * <nonatomic> Currently unused.
257 * <wait> If true, wait until function has completed on other CPUs.
258 * [RETURNS] 0 on success, else a negative status code.
260 * Does not return until the remote CPU is nearly ready to execute <func>
261 * or is or has executed.
265 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
268 struct call_data_struct data;
270 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
273 printk("%s: trying to call self\n", __FUNCTION__);
280 atomic_set(&data.started, 0);
283 atomic_set(&data.finished, 0);
285 spin_lock_bh(&call_lock);
288 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
289 send_IPI_single(cpuid, IPI_CALL_FUNC);
291 /* Wait for response */
292 while (atomic_read(&data.started) != cpus)
296 while (atomic_read(&data.finished) != cpus)
300 spin_unlock_bh(&call_lock);
306 * this function sends a 'generic call function' IPI to all other CPUs
311 * [SUMMARY] Run a function on all other CPUs.
312 * <func> The function to run. This must be fast and non-blocking.
313 * <info> An arbitrary pointer to pass to the function.
314 * <nonatomic> currently unused.
315 * <wait> If true, wait (atomically) until function has completed on other CPUs.
316 * [RETURNS] 0 on success, else a negative status code.
318 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
321 * You must not call this function with disabled interrupts or from a
322 * hardware interrupt handler or from a bottom half handler.
325 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
327 struct call_data_struct data;
328 int cpus = num_online_cpus()-1;
335 atomic_set(&data.started, 0);
338 atomic_set(&data.finished, 0);
340 spin_lock(&call_lock);
343 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
344 send_IPI_allbutself(IPI_CALL_FUNC);
346 /* Wait for response */
347 while (atomic_read(&data.started) != cpus)
351 while (atomic_read(&data.finished) != cpus)
355 spin_unlock(&call_lock);
360 smp_do_timer (struct pt_regs *regs)
362 int user = user_mode(regs);
364 if (--local_cpu_data->prof_counter <= 0) {
365 local_cpu_data->prof_counter = local_cpu_data->prof_multiplier;
366 update_process_times(user);
371 * this function calls the 'stop' function on all other CPUs in the system.
376 send_IPI_allbutself(IPI_CPU_STOP);
380 setup_profiling_timer (unsigned int multiplier)