This changes do_fork() to return the task struct, rather than the PID.
Also changes CLONE_PID ("if my pid is 0, copy it") to CLONE_IDLETASK
("set child's pid to zero"), and disallows access to the flag from user
mode.
alpha_clone(unsigned long clone_flags, unsigned long usp,
struct switch_stack * swstack)
{
+ struct task_struct *p;
if (!usp)
usp = rdusp();
- return do_fork(clone_flags, usp, (struct pt_regs *) (swstack+1), 0);
+
+ p = do_fork(clone_flags & ~CLONE_IDLETASK,
+ usp, (struct pt_regs *) (swstack+1), 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int
alpha_vfork(struct switch_stack * swstack)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
- (struct pt_regs *) (swstack+1), 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
+ (struct pt_regs *) (swstack+1), 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
return 0;
}
-static int __init
+static struct task_struct * __init
fork_by_hand(void)
{
/* Don't care about the contents of regs since we'll never
reschedule the forked task. */
struct pt_regs regs;
- return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
}
/*
the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid
rescheduling the child. */
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpuid);
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d", cpuid);
-
init_idle(idle, cpuid);
unhash_process(idle);
*/
asmlinkage int sys_fork(struct pt_regs *regs)
{
- return do_fork(SIGCHLD, regs->ARM_sp, regs, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs->ARM_sp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/* Clone a task - this clones the calling program thread.
*/
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs)
{
+ struct task_struct *p;
if (!newsp)
newsp = regs->ARM_sp;
- return do_fork(clone_flags, newsp, regs, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_vfork(struct pt_regs *regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/* sys_execve() executes a new program.
asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
- return do_fork(SIGCHLD, rdusp(), regs, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, rdusp(), regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/* if newusp is 0, we just grab the old usp */
long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
+ struct task_struct *p;
if (!newusp)
newusp = rdusp();
- return do_fork(flags, newusp, regs, 0);
+ p = do_fork(flags & ~CLONE_IDLETASK, newusp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/* vfork is a system call in i386 because of register-pressure - maybe
asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
asmlinkage int sys_fork(struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.esp, ®s, 0);
+ struct task_struct *p;
+
+ p = do_fork(SIGCHLD, regs.esp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_clone(struct pt_regs regs)
{
+ struct task_struct *p;
unsigned long clone_flags;
unsigned long newsp;
newsp = regs.ecx;
if (!newsp)
newsp = regs.esp;
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
*/
asmlinkage int sys_vfork(struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0);
+ struct task_struct *p;
+
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
unsigned short ss;
} stack_start;
-static int __init fork_by_hand(void)
+static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
- return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
}
/* which physical APIC ID maps to which logical CPU number */
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d", cpu);
-
init_idle(idle, cpu);
map_cpu_to_boot_apicid(cpu, apicid);
mov out3=16 // stacksize (compensates for 16-byte scratch area)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
zxt4 out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
+ br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and return value now task_struct *.
.ret0: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov ar.pfs=loc1
mov out1=0
mov out3=0
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
- br.call.sptk.few rp=do_fork
+ br.call.sptk.few rp=do_fork_FIXME_RETURNS_TASK_STRUCT
.ret5: mov ar.pfs=loc1
.restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov out3=in2
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
+ br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and now returns task_struct *.
.ret1: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov ar.pfs=loc1
mov out3=16 // stacksize (compensates for 16-byte scratch area)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
+ br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and now return task_struct *.
.ret2: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov ar.pfs=loc1
return cpu_idle();
}
-static int __init
+static struct task_struct * __init
fork_by_hand (void)
{
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
- return do_fork(CLONE_VM|CLONE_PID, 0, 0, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0);
}
static void __init
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d", cpu);
-
init_idle(idle, cpu);
ia64_cpu_to_sapicid[cpu] = sapicid;
asmlinkage int m68k_fork(struct pt_regs *regs)
{
- return do_fork(SIGCHLD, rdusp(), regs, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, rdusp(), regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int m68k_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
+ struct task_struct *p;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags = regs->d1;
newsp = regs->d2;
if (!newsp)
newsp = rdusp();
- return do_fork(clone_flags, newsp, regs, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* Spawn a new process normally. Grab a pointer to
its task struct so we can mess with it */
- do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
- p = prev_task(&init_task);
+ p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
/* Schedule the first task manually */
p->processor = i;
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
- kernel_thread(0, NULL, CLONE_PID);
+ kernel_thread(0, NULL, CLONE_IDLETASK);
p = prev_task(&init_task);
sprintf(p->comm, "%s%d", "Idle", i);
init_tasks[i] = p;
save_static_function(sys_fork);
static_unused int _sys_fork(struct pt_regs regs)
{
- int res;
+ struct task_struct *p;
- res = do_fork(SIGCHLD, regs.regs[29], ®s, 0);
- return res;
+ p = do_fork(SIGCHLD, regs.regs[29], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
{
unsigned long clone_flags;
unsigned long newsp;
- int res;
+ struct task_struct *p;
clone_flags = regs.regs[4];
newsp = regs.regs[5];
if (!newsp)
newsp = regs.regs[29];
- res = do_fork(clone_flags, newsp, ®s, 0);
- return res;
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
asmlinkage int sys_fork(abi64_no_regargs, struct pt_regs regs)
{
- int res;
+ struct task_struct *p;
save_static(®s);
- res = do_fork(SIGCHLD, regs.regs[29], ®s, 0);
- return res;
+ p = do_fork(SIGCHLD, regs.regs[29], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_clone(abi64_no_regargs, struct pt_regs regs)
{
unsigned long clone_flags;
unsigned long newsp;
- int res;
+ struct task_struct *p;
save_static(®s);
clone_flags = regs.regs[4];
newsp = regs.regs[5];
if (!newsp)
newsp = regs.regs[29];
- res = do_fork(clone_flags, newsp, ®s, 0);
- return res;
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
- kernel_thread(0, NULL, CLONE_PID);
+ kernel_thread(0, NULL, CLONE_IDLETASK);
p = prev_task(&init_task);
sprintf(p->comm, "%s%d", "Idle", num_cpus);
init_tasks[num_cpus] = p;
ldo CLONE_VM(%r0), %r26 /* Force CLONE_VM since only init_mm */
or %r26, %r24, %r26 /* will have kernel mappings. */
copy %r0, %r25
- bl do_fork, %r2
+ bl do_fork_FIXME_NOW_RETURNS_TASK_STRUCT, %r2
copy %r1, %r24
/* Parent Returns here */
sys_clone(unsigned long clone_flags, unsigned long usp,
struct pt_regs *regs)
{
- return do_fork(clone_flags, usp, regs, 0);
+ struct task_struct *p;
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int
sys_vfork(struct pt_regs *regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
- regs->gr[30], regs, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
+ struct task_struct *p;
CHECK_FULL_REGS(regs);
- return do_fork(p1, regs->gpr[1], regs, 0);
+ p = do_fork(p1 & ~CLONE_IDLETASK, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
+ struct task_struct *p;
CHECK_FULL_REGS(regs);
- return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
+ p = do_fork(SIGCHLD, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
+ struct task_struct *p;
CHECK_FULL_REGS(regs);
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset(®s, 0, sizeof(struct pt_regs));
- if (do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0) < 0)
+ p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
+ if (IS_ERR(p))
panic("failed fork for CPU %d", i);
- p = prev_task(&init_task);
- if (!p)
- panic("No idle task for CPU %d", i);
init_idle(p, i);
unhash_process(p);
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
- return do_fork(p1, regs->gpr[1], regs, 0);
+ struct task_struct *p;
+ p = do_fork(p1 & ~CLONE_IDLETASK, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
- return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
memset(®s, 0, sizeof(struct pt_regs));
- if (do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0) < 0)
+ p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
+ if (IS_ERR(p))
panic("failed fork for CPU %d", i);
- p = prev_task(&init_task);
- if (!p)
- panic("No idle task for CPU %d", i);
init_idle(p, i);
asmlinkage int sys_fork(struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.gprs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs.gprs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_clone(struct pt_regs regs)
{
unsigned long clone_flags;
unsigned long newsp;
+ struct task_struct *p;
clone_flags = regs.gprs[3];
newsp = regs.orig_gpr2;
if (!newsp)
newsp = regs.gprs[15];
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
*/
asmlinkage int sys_vfork(struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
- regs.gprs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.gprs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
{
}
-static int __init fork_by_hand(void)
+static struct task_struct *__init fork_by_hand(void)
{
struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset(®s,0,sizeof(struct pt_regs));
- return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
}
static void __init do_boot_cpu(int cpu)
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d",cpu);
idle->processor = cpu;
idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
asmlinkage int sys_fork(struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.gprs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs.gprs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_clone(struct pt_regs regs)
{
unsigned long clone_flags;
unsigned long newsp;
+ struct task_struct *p;
clone_flags = regs.gprs[3];
newsp = regs.orig_gpr2;
if (!newsp)
newsp = regs.gprs[15];
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
*/
asmlinkage int sys_vfork(struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
- regs.gprs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.gprs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
{
}
-static int __init fork_by_hand(void)
+static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset(®s,0,sizeof(struct pt_regs));
- return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
}
static void __init do_boot_cpu(int cpu)
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d",cpu);
idle->processor = cpu;
idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.regs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs.regs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
+ struct task_struct *p;
if (!newsp)
newsp = regs.regs[15];
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
std %g4, [%curptr + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
- call C_LABEL(do_fork)
+ call C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT)
mov %l5, %o7
/* Whee, kernel threads! */
std %g4, [%curptr + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
- call C_LABEL(do_fork)
+ /* FIXME: remove CLONE_IDLETASK from flags first */
+ call C_LABEL(do_fork_WITHOUT_CLONE_IDLETASK)
mov %l5, %o7
/* Whee, real vfork! */
sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
mov %fp, %o1
or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
- sethi %hi(C_LABEL(do_fork)), %l1
+ sethi %hi(C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT)), %l1
mov 0, %o3
- jmpl %l1 + %lo(C_LABEL(do_fork)), %g0
+ jmpl %l1 + %lo(C_LABEL(do_fork_FIXME_NOW_RETURNS_TASK_STRUCT)), %g0
add %sp, REGWIN_SZ, %o2
.align 4
int no;
/* Cook up an idler for this guy. */
- kernel_thread(start_secondary, NULL, CLONE_PID);
+ kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
cpucount++;
int timeout;
/* Cook up an idler for this guy. */
- kernel_thread(start_secondary, NULL, CLONE_PID);
+ kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
cpucount++;
sys_clone: flushw
movrz %o1, %fp, %o1
mov 0, %o3
- ba,pt %xcc, do_fork
+ ba,pt %xcc, do_fork_FIXME_NOW_RETURNS_TASK_STRUCT
add %sp, STACK_BIAS + REGWIN_SZ, %o2
ret_from_syscall:
/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
int no;
prom_printf("Starting CPU %d... ", i);
- kernel_thread(NULL, NULL, CLONE_PID);
+ kernel_thread(NULL, NULL, CLONE_IDLETASK);
cpucount++;
p = prev_task(&init_task);
asmlinkage int sys32_fork(struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.rsp, ®s, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs.rsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage int sys32_clone(unsigned int clone_flags, unsigned int newsp, struct pt_regs regs)
{
+ struct task_struct *p;
if (!newsp)
newsp = regs.rsp;
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
*/
asmlinkage int sys32_vfork(struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
movq %rsp, %rdx
# clone now
- call do_fork
+ call do_fork_FIXME_NOW_RETURNS_TASK_STRUCT
# save retval on the stack so it's popped before `ret`
movq %rax, RAX(%rsp)
asmlinkage long sys_fork(struct pt_regs regs)
{
- return do_fork(SIGCHLD, regs.rsp, ®s, 0);
+ struct task_struct *p;
+ p = do_fork(SIGCHLD, regs.rsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs regs)
{
+ struct task_struct *p;
if (!newsp)
newsp = regs.rsp;
- return do_fork(clone_flags, newsp, ®s, 0);
+ p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
*/
asmlinkage long sys_vfork(struct pt_regs regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0);
+ struct task_struct *p;
+ p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.rsp, ®s, 0);
+ return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
/*
extern volatile unsigned long init_rsp;
extern void (*initial_code)(void);
-static int __init fork_by_hand(void)
+static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the rip and regs settings since
* we'll never reschedule the forked task.
*/
- return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
+ return do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0);
}
#if APIC_DEBUG
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
- if (fork_by_hand() < 0)
+ idle = fork_by_hand();
+ if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
- idle = prev_task(&init_task);
- if (!idle)
- panic("No idle process for CPU %d", cpu);
-
init_idle(idle,cpu);
x86_cpu_to_apicid[cpu] = apicid;
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
-#define CLONE_PID 0x00001000 /* set if pid shared */
+#define CLONE_IDLETASK 0x00001000 /* set if new pid should be 0 (kernel only)*/
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
extern task_t *child_reaper;
extern int do_execve(char *, char **, char **, struct pt_regs *);
-extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
+extern struct task_struct *do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
struct task_struct *p;
int pid;
- if (flags & CLONE_PID)
- return current->pid;
+ if (flags & CLONE_IDLETASK)
+ return 0;
spin_lock(&lastpid_lock);
if((++last_pid) & 0xffff8000) {
* For an example that's using stack_top, see
* arch/ia64/kernel/process.c.
*/
-int do_fork(unsigned long clone_flags, unsigned long stack_start,
- struct pt_regs *regs, unsigned long stack_size)
+struct task_struct *do_fork(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size)
{
int retval;
unsigned long flags;
- struct task_struct *p;
+ struct task_struct *p = NULL;
struct completion vfork;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
- return -EINVAL;
-
- retval = -EPERM;
-
- /*
- * CLONE_PID is only allowed for the initial SMP swapper
- * calls
- */
- if (clone_flags & CLONE_PID) {
- if (current->pid)
- goto fork_out;
- }
+ return ERR_PTR(-EINVAL);
retval = -ENOMEM;
p = dup_task_struct(current);
*
* Let it rip!
*/
- retval = p->pid;
- p->tgid = retval;
+ p->tgid = p->pid;
INIT_LIST_HEAD(&p->thread_group);
/* Need tasklist lock for parent etc handling! */
* COW overhead when the child exec()s afterwards.
*/
set_need_resched();
+ retval = 0;
fork_out:
- return retval;
+ if (retval)
+ return ERR_PTR(retval);
+ return p;
bad_fork_cleanup_namespace:
exit_namespace(p);