2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/mman.h>
18 #include <linux/personality.h>
19 #include <linux/sys.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/init.h>
23 #include <linux/completion.h>
25 #include <asm/bootinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/mipsregs.h>
29 #include <asm/processor.h>
30 #include <asm/stackframe.h>
31 #include <asm/uaccess.h>
39 * We use this if we don't have any better idle routine..
40 * (This to kill: kernel/platform.c.
42 void default_idle (void)
47 * The idle thread. There's no useful work to be done, so just try to conserve
48 * power and have a low exit latency (ie sit in a loop waiting for somebody to
49 * say that they'd like to reschedule)
51 ATTRIB_NORET void cpu_idle(void)
53 /* endless idle loop with no priority at all */
55 while (!need_resched())
62 asmlinkage void ret_from_fork(void);
64 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
68 /* New thread loses kernel privileges. */
69 status = regs->cp0_status & ~(ST0_CU0|ST0_FR|ST0_KSU);
71 status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
72 regs->cp0_status = status;
73 current->used_math = 0;
77 current_thread_info()->addr_limit = USER_DS;
80 void exit_thread(void)
84 void flush_thread(void)
88 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
89 unsigned long unused, struct task_struct *p,
92 struct thread_info *ti = p->thread_info;
93 struct pt_regs *childregs;
96 childksp = (unsigned long)ti + KERNEL_STACK_SIZE - 32;
102 /* set up new TSS. */
103 childregs = (struct pt_regs *) childksp - 1;
105 childregs->regs[7] = 0; /* Clear error flag */
106 childregs->regs[2] = 0; /* Child gets zero as return value */
107 regs->regs[2] = p->pid;
109 if (childregs->cp0_status & ST0_CU0) {
110 childregs->regs[28] = (unsigned long) ti;
111 childregs->regs[29] = childksp;
112 ti->addr_limit = KERNEL_DS;
114 childregs->regs[29] = usp;
115 ti->addr_limit = USER_DS;
117 p->thread.reg29 = (unsigned long) childregs;
118 p->thread.reg31 = (unsigned long) ret_from_fork;
121 * New tasks lose permission to use the fpu. This accelerates context
122 * switching for most programs since they don't use the fpu.
124 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1|ST0_KSU);
125 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
126 p->set_child_tid = p->clear_child_tid = NULL;
131 /* Fill in the fpu structure for a core dump.. */
132 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
134 memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu));
139 * Create a kernel thread
141 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
145 __asm__ __volatile__(
150 " beq $6, $sp, 1f \n"
158 : "i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn),
159 "r" (flags | CLONE_VM | CLONE_UNTRACED)
161 * The called subroutine might have destroyed any of the
162 * at, result, argument or temporary registers ...
164 : "$2", "$3", "$4", "$5", "$6", "$7", "$8",
165 "$9","$10","$11","$12","$13","$14","$15","$24","$25","$31");
170 struct mips_frame_info {
174 static struct mips_frame_info schedule_frame;
175 static struct mips_frame_info schedule_timeout_frame;
176 static struct mips_frame_info sleep_on_frame;
177 static struct mips_frame_info sleep_on_timeout_frame;
178 static struct mips_frame_info wait_for_completion_frame;
179 static int mips_frame_info_initialized;
180 static int __init get_frame_info(struct mips_frame_info *info, void *func)
183 union mips_instruction *ip = (union mips_instruction *)func;
184 info->pc_offset = -1;
185 info->frame_offset = -1;
186 for (i = 0; i < 128; i++, ip++) {
187 /* if jal, jalr, jr, stop. */
188 if (ip->j_format.opcode == jal_op ||
189 (ip->r_format.opcode == spec_op &&
190 (ip->r_format.func == jalr_op ||
191 ip->r_format.func == jr_op)))
193 if (ip->i_format.opcode == sd_op &&
194 ip->i_format.rs == 29) {
195 /* sd $ra, offset($sp) */
196 if (ip->i_format.rt == 31) {
197 if (info->pc_offset != -1)
200 ip->i_format.simmediate / sizeof(long);
202 /* sd $s8, offset($sp) */
203 if (ip->i_format.rt == 30) {
204 if (info->frame_offset != -1)
207 ip->i_format.simmediate / sizeof(long);
211 if (info->pc_offset == -1 || info->frame_offset == -1) {
212 printk("Can't analyze prologue code at %p\n", func);
213 info->pc_offset = -1;
214 info->frame_offset = -1;
220 void __init frame_info_init(void)
222 mips_frame_info_initialized =
223 !get_frame_info(&schedule_frame, schedule) &&
224 !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
225 !get_frame_info(&sleep_on_frame, sleep_on) &&
226 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
227 !get_frame_info(&wait_for_completion_frame, wait_for_completion);
231 * Return saved PC of a blocked thread.
233 unsigned long thread_saved_pc(struct thread_struct *t)
235 extern void ret_from_fork(void);
237 /* New born processes are a special case */
238 if (t->reg31 == (unsigned long) ret_from_fork)
241 if (schedule_frame.pc_offset < 0)
243 return ((unsigned long *)t->reg29)[schedule_frame.pc_offset];
247 * These bracket the sleeping functions..
249 extern void scheduling_functions_start_here(void);
250 extern void scheduling_functions_end_here(void);
251 #define first_sched ((unsigned long) scheduling_functions_start_here)
252 #define last_sched ((unsigned long) scheduling_functions_end_here)
254 /* get_wchan - a maintenance nightmare ... */
255 unsigned long get_wchan(struct task_struct *p)
257 unsigned long frame, pc;
259 if (!p || p == current || p->state == TASK_RUNNING)
262 if (!mips_frame_info_initialized)
264 pc = thread_saved_pc(&p->thread);
265 if (pc < first_sched || pc >= last_sched)
268 if (pc >= (unsigned long) sleep_on_timeout)
269 goto schedule_timeout_caller;
270 if (pc >= (unsigned long) sleep_on)
271 goto schedule_caller;
272 if (pc >= (unsigned long) interruptible_sleep_on_timeout)
273 goto schedule_timeout_caller;
274 if (pc >= (unsigned long)interruptible_sleep_on)
275 goto schedule_caller;
276 if (pc >= (unsigned long)wait_for_completion)
277 goto schedule_caller;
278 goto schedule_timeout_caller;
281 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
282 if (pc >= (unsigned long) sleep_on)
283 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
285 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
288 schedule_timeout_caller:
289 /* Must be schedule_timeout ... */
290 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
292 /* The schedule_timeout frame ... */
293 pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
295 if (pc >= first_sched && pc < last_sched) {
296 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
297 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
298 pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
302 if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */