4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/cpu.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/div64.h>
38 #include <asm/timex.h>
41 * per-CPU timer vector definitions:
45 #define TVN_SIZE (1 << TVN_BITS)
46 #define TVR_SIZE (1 << TVR_BITS)
47 #define TVN_MASK (TVN_SIZE - 1)
48 #define TVR_MASK (TVR_SIZE - 1)
50 typedef struct tvec_s {
51 struct list_head vec[TVN_SIZE];
54 typedef struct tvec_root_s {
55 struct list_head vec[TVR_SIZE];
58 struct tvec_t_base_s {
60 unsigned long timer_jiffies;
61 struct timer_list *running_timer;
67 } ____cacheline_aligned_in_smp;
69 typedef struct tvec_t_base_s tvec_base_t;
71 static inline void set_running_timer(tvec_base_t *base,
72 struct timer_list *timer)
75 base->running_timer = timer;
79 /* Fake initialization */
80 static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED };
82 static void check_timer_failed(struct timer_list *timer)
84 static int whine_count;
85 if (whine_count < 16) {
87 printk("Uninitialised timer!\n");
88 printk("This is just a warning. Your computer is OK\n");
89 printk("function=0x%p, data=0x%lx\n",
90 timer->function, timer->data);
96 spin_lock_init(&timer->lock);
97 timer->magic = TIMER_MAGIC;
100 static inline void check_timer(struct timer_list *timer)
102 if (timer->magic != TIMER_MAGIC)
103 check_timer_failed(timer);
107 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
109 unsigned long expires = timer->expires;
110 unsigned long idx = expires - base->timer_jiffies;
111 struct list_head *vec;
113 if (idx < TVR_SIZE) {
114 int i = expires & TVR_MASK;
115 vec = base->tv1.vec + i;
116 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
117 int i = (expires >> TVR_BITS) & TVN_MASK;
118 vec = base->tv2.vec + i;
119 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
120 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
121 vec = base->tv3.vec + i;
122 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
123 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
124 vec = base->tv4.vec + i;
125 } else if ((signed long) idx < 0) {
127 * Can happen if you add a timer with expires == jiffies,
128 * or you set a timer to go off in the past
130 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
133 /* If the timeout is larger than 0xffffffff on 64-bit
134 * architectures then we use the maximum timeout:
136 if (idx > 0xffffffffUL) {
138 expires = idx + base->timer_jiffies;
140 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
141 vec = base->tv5.vec + i;
146 list_add_tail(&timer->entry, vec);
149 int __mod_timer(struct timer_list *timer, unsigned long expires)
151 tvec_base_t *old_base, *new_base;
155 BUG_ON(!timer->function);
159 spin_lock_irqsave(&timer->lock, flags);
160 new_base = &__get_cpu_var(tvec_bases);
162 old_base = timer->base;
165 * Prevent deadlocks via ordering by old_base < new_base.
167 if (old_base && (new_base != old_base)) {
168 if (old_base < new_base) {
169 spin_lock(&new_base->lock);
170 spin_lock(&old_base->lock);
172 spin_lock(&old_base->lock);
173 spin_lock(&new_base->lock);
176 * The timer base might have been cancelled while we were
177 * trying to take the lock(s):
179 if (timer->base != old_base) {
180 spin_unlock(&new_base->lock);
181 spin_unlock(&old_base->lock);
185 spin_lock(&new_base->lock);
186 if (timer->base != old_base) {
187 spin_unlock(&new_base->lock);
193 * Delete the previous timeout (if there was any), and install
197 list_del(&timer->entry);
200 timer->expires = expires;
201 internal_add_timer(new_base, timer);
202 timer->base = new_base;
204 if (old_base && (new_base != old_base))
205 spin_unlock(&old_base->lock);
206 spin_unlock(&new_base->lock);
207 spin_unlock_irqrestore(&timer->lock, flags);
212 EXPORT_SYMBOL(__mod_timer);
215 * add_timer_on - start a timer on a particular CPU
216 * @timer: the timer to be added
217 * @cpu: the CPU to start it on
219 * This is not very scalable on SMP. Double adds are not possible.
221 void add_timer_on(struct timer_list *timer, int cpu)
223 tvec_base_t *base = &per_cpu(tvec_bases, cpu);
226 BUG_ON(timer_pending(timer) || !timer->function);
230 spin_lock_irqsave(&base->lock, flags);
231 internal_add_timer(base, timer);
233 spin_unlock_irqrestore(&base->lock, flags);
237 * mod_timer - modify a timer's timeout
238 * @timer: the timer to be modified
240 * mod_timer is a more efficient way to update the expire field of an
241 * active timer (if the timer is inactive it will be activated)
243 * mod_timer(timer, expires) is equivalent to:
245 * del_timer(timer); timer->expires = expires; add_timer(timer);
247 * Note that if there are multiple unserialized concurrent users of the
248 * same timer, then mod_timer() is the only safe way to modify the timeout,
249 * since add_timer() cannot modify an already running timer.
251 * The function returns whether it has modified a pending timer or not.
252 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
253 * active timer returns 1.)
255 int mod_timer(struct timer_list *timer, unsigned long expires)
257 BUG_ON(!timer->function);
262 * This is a common optimization triggered by the
263 * networking code - if the timer is re-modified
264 * to be the same thing then just return:
266 if (timer->expires == expires && timer_pending(timer))
269 return __mod_timer(timer, expires);
272 EXPORT_SYMBOL(mod_timer);
275 * del_timer - deactive a timer.
276 * @timer: the timer to be deactivated
278 * del_timer() deactivates a timer - this works on both active and inactive
281 * The function returns whether it has deactivated a pending timer or not.
282 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
283 * active timer returns 1.)
285 int del_timer(struct timer_list *timer)
296 spin_lock_irqsave(&base->lock, flags);
297 if (base != timer->base) {
298 spin_unlock_irqrestore(&base->lock, flags);
301 list_del(&timer->entry);
303 spin_unlock_irqrestore(&base->lock, flags);
308 EXPORT_SYMBOL(del_timer);
312 * del_timer_sync - deactivate a timer and wait for the handler to finish.
313 * @timer: the timer to be deactivated
315 * This function only differs from del_timer() on SMP: besides deactivating
316 * the timer it also makes sure the handler has finished executing on other
319 * Synchronization rules: callers must prevent restarting of the timer,
320 * otherwise this function is meaningless. It must not be called from
321 * interrupt contexts. The caller must not hold locks which would prevent
322 * completion of the timer's handler. Upon exit the timer is not queued and
323 * the handler is not running on any CPU.
325 * The function returns whether it has deactivated a pending timer or not.
327 * del_timer_sync() is slow and complicated because it copes with timer
328 * handlers which re-arm the timer (periodic timers). If the timer handler
329 * is known to not do this (a single shot timer) then use
330 * del_singleshot_timer_sync() instead.
332 int del_timer_sync(struct timer_list *timer)
340 ret += del_timer(timer);
342 for_each_online_cpu(i) {
343 base = &per_cpu(tvec_bases, i);
344 if (base->running_timer == timer) {
345 while (base->running_timer == timer) {
347 preempt_check_resched();
353 if (timer_pending(timer))
358 EXPORT_SYMBOL(del_timer_sync);
361 * del_singleshot_timer_sync - deactivate a non-recursive timer
362 * @timer: the timer to be deactivated
364 * This function is an optimization of del_timer_sync for the case where the
365 * caller can guarantee the timer does not reschedule itself in its timer
368 * Synchronization rules: callers must prevent restarting of the timer,
369 * otherwise this function is meaningless. It must not be called from
370 * interrupt contexts. The caller must not hold locks which wold prevent
371 * completion of the timer's handler. Upon exit the timer is not queued and
372 * the handler is not running on any CPU.
374 * The function returns whether it has deactivated a pending timer or not.
376 int del_singleshot_timer_sync(struct timer_list *timer)
378 int ret = del_timer(timer);
381 ret = del_timer_sync(timer);
387 EXPORT_SYMBOL(del_singleshot_timer_sync);
390 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
392 /* cascade all the timers from tv up one level */
393 struct list_head *head, *curr;
395 head = tv->vec + index;
398 * We are removing _all_ timers from the list, so we don't have to
399 * detach them individually, just clear the list afterwards.
401 while (curr != head) {
402 struct timer_list *tmp;
404 tmp = list_entry(curr, struct timer_list, entry);
405 BUG_ON(tmp->base != base);
407 internal_add_timer(base, tmp);
409 INIT_LIST_HEAD(head);
415 * __run_timers - run all expired timers (if any) on this CPU.
416 * @base: the timer vector to be processed.
418 * This function cascades all vectors and executes all expired timer
421 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
423 static inline void __run_timers(tvec_base_t *base)
425 struct timer_list *timer;
427 spin_lock_irq(&base->lock);
428 while (time_after_eq(jiffies, base->timer_jiffies)) {
429 struct list_head work_list = LIST_HEAD_INIT(work_list);
430 struct list_head *head = &work_list;
431 int index = base->timer_jiffies & TVR_MASK;
437 (!cascade(base, &base->tv2, INDEX(0))) &&
438 (!cascade(base, &base->tv3, INDEX(1))) &&
439 !cascade(base, &base->tv4, INDEX(2)))
440 cascade(base, &base->tv5, INDEX(3));
441 ++base->timer_jiffies;
442 list_splice_init(base->tv1.vec + index, &work_list);
444 if (!list_empty(head)) {
445 void (*fn)(unsigned long);
448 timer = list_entry(head->next,struct timer_list,entry);
449 fn = timer->function;
452 list_del(&timer->entry);
453 set_running_timer(base, timer);
456 spin_unlock_irq(&base->lock);
458 spin_lock_irq(&base->lock);
462 set_running_timer(base, NULL);
463 spin_unlock_irq(&base->lock);
466 #ifdef CONFIG_NO_IDLE_HZ
468 * Find out when the next timer event is due to happen. This
469 * is used on S/390 to stop all activity when a cpus is idle.
470 * This functions needs to be called disabled.
472 unsigned long next_timer_interrupt(void)
475 struct list_head *list;
476 struct timer_list *nte;
477 unsigned long expires;
481 base = &__get_cpu_var(tvec_bases);
482 spin_lock(&base->lock);
483 expires = base->timer_jiffies + (LONG_MAX >> 1);
486 /* Look for timer events in tv1. */
487 j = base->timer_jiffies & TVR_MASK;
489 list_for_each_entry(nte, base->tv1.vec + j, entry) {
490 expires = nte->expires;
491 if (j < (base->timer_jiffies & TVR_MASK))
492 list = base->tv2.vec + (INDEX(0));
495 j = (j + 1) & TVR_MASK;
496 } while (j != (base->timer_jiffies & TVR_MASK));
499 varray[0] = &base->tv2;
500 varray[1] = &base->tv3;
501 varray[2] = &base->tv4;
502 varray[3] = &base->tv5;
503 for (i = 0; i < 4; i++) {
506 if (list_empty(varray[i]->vec + j)) {
507 j = (j + 1) & TVN_MASK;
510 list_for_each_entry(nte, varray[i]->vec + j, entry)
511 if (time_before(nte->expires, expires))
512 expires = nte->expires;
513 if (j < (INDEX(i)) && i < 3)
514 list = varray[i + 1]->vec + (INDEX(i + 1));
516 } while (j != (INDEX(i)));
521 * The search wrapped. We need to look at the next list
522 * from next tv element that would cascade into tv element
523 * where we found the timer element.
525 list_for_each_entry(nte, list, entry) {
526 if (time_before(nte->expires, expires))
527 expires = nte->expires;
530 spin_unlock(&base->lock);
535 /******************************************************************/
538 * Timekeeping variables
540 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
541 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
545 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
546 * for sub jiffie times) to get to monotonic time. Monotonic is pegged at zero
547 * at zero at system boot time, so wall_to_monotonic will be negative,
548 * however, we will ALWAYS keep the tv_nsec part positive so we can use
549 * the usual normalization.
551 struct timespec xtime __attribute__ ((aligned (16)));
552 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
554 EXPORT_SYMBOL(xtime);
556 /* Don't completely fail for HZ > 500. */
557 int tickadj = 500/HZ ? : 1; /* microsecs */
561 * phase-lock loop variables
563 /* TIME_ERROR prevents overwriting the CMOS clock */
564 int time_state = TIME_OK; /* clock synchronization status */
565 int time_status = STA_UNSYNC; /* clock status bits */
566 long time_offset; /* time adjustment (us) */
567 long time_constant = 2; /* pll time constant */
568 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
569 long time_precision = 1; /* clock precision (us) */
570 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
571 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
572 long time_phase; /* phase offset (scaled us) */
573 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
574 /* frequency offset (scaled ppm)*/
575 long time_adj; /* tick adjust (scaled 1 / HZ) */
576 long time_reftime; /* time at last adjustment (s) */
578 long time_next_adjust;
581 * this routine handles the overflow of the microsecond field
583 * The tricky bits of code to handle the accurate clock support
584 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
585 * They were originally developed for SUN and DEC kernels.
586 * All the kudos should go to Dave for this stuff.
589 static void second_overflow(void)
593 /* Bump the maxerror field */
594 time_maxerror += time_tolerance >> SHIFT_USEC;
595 if ( time_maxerror > NTP_PHASE_LIMIT ) {
596 time_maxerror = NTP_PHASE_LIMIT;
597 time_status |= STA_UNSYNC;
601 * Leap second processing. If in leap-insert state at
602 * the end of the day, the system clock is set back one
603 * second; if in leap-delete state, the system clock is
604 * set ahead one second. The microtime() routine or
605 * external clock driver will insure that reported time
606 * is always monotonic. The ugly divides should be
609 switch (time_state) {
612 if (time_status & STA_INS)
613 time_state = TIME_INS;
614 else if (time_status & STA_DEL)
615 time_state = TIME_DEL;
619 if (xtime.tv_sec % 86400 == 0) {
621 wall_to_monotonic.tv_sec++;
622 time_interpolator_update(-NSEC_PER_SEC);
623 time_state = TIME_OOP;
625 printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
630 if ((xtime.tv_sec + 1) % 86400 == 0) {
632 wall_to_monotonic.tv_sec--;
633 time_interpolator_update(NSEC_PER_SEC);
634 time_state = TIME_WAIT;
636 printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
641 time_state = TIME_WAIT;
645 if (!(time_status & (STA_INS | STA_DEL)))
646 time_state = TIME_OK;
650 * Compute the phase adjustment for the next second. In
651 * PLL mode, the offset is reduced by a fixed factor
652 * times the time constant. In FLL mode the offset is
653 * used directly. In either mode, the maximum phase
654 * adjustment for each second is clamped so as to spread
655 * the adjustment over not more than the number of
656 * seconds between updates.
658 if (time_offset < 0) {
659 ltemp = -time_offset;
660 if (!(time_status & STA_FLL))
661 ltemp >>= SHIFT_KG + time_constant;
662 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
663 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
664 time_offset += ltemp;
665 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
668 if (!(time_status & STA_FLL))
669 ltemp >>= SHIFT_KG + time_constant;
670 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
671 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
672 time_offset -= ltemp;
673 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
677 * Compute the frequency estimate and additional phase
678 * adjustment due to frequency error for the next
679 * second. When the PPS signal is engaged, gnaw on the
680 * watchdog counter and update the frequency computed by
681 * the pll and the PPS signal.
684 if (pps_valid == PPS_VALID) { /* PPS signal lost */
685 pps_jitter = MAXTIME;
686 pps_stabil = MAXFREQ;
687 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
688 STA_PPSWANDER | STA_PPSERROR);
690 ltemp = time_freq + pps_freq;
692 time_adj -= -ltemp >>
693 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
696 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
699 /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
700 * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
703 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
705 time_adj += (time_adj >> 2) + (time_adj >> 5);
708 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
709 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
712 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
714 time_adj += (time_adj >> 6) + (time_adj >> 7);
718 /* in the NTP reference this is called "hardclock()" */
719 static void update_wall_time_one_tick(void)
721 long time_adjust_step, delta_nsec;
723 if ( (time_adjust_step = time_adjust) != 0 ) {
724 /* We are doing an adjtime thing.
726 * Prepare time_adjust_step to be within bounds.
727 * Note that a positive time_adjust means we want the clock
730 * Limit the amount of the step to be in the range
731 * -tickadj .. +tickadj
733 if (time_adjust > tickadj)
734 time_adjust_step = tickadj;
735 else if (time_adjust < -tickadj)
736 time_adjust_step = -tickadj;
738 /* Reduce by this step the amount of time left */
739 time_adjust -= time_adjust_step;
741 delta_nsec = tick_nsec + time_adjust_step * 1000;
743 * Advance the phase, once it gets to one microsecond, then
744 * advance the tick more.
746 time_phase += time_adj;
747 if (time_phase <= -FINENSEC) {
748 long ltemp = -time_phase >> (SHIFT_SCALE - 10);
749 time_phase += ltemp << (SHIFT_SCALE - 10);
752 else if (time_phase >= FINENSEC) {
753 long ltemp = time_phase >> (SHIFT_SCALE - 10);
754 time_phase -= ltemp << (SHIFT_SCALE - 10);
757 xtime.tv_nsec += delta_nsec;
758 time_interpolator_update(delta_nsec);
760 /* Changes by adjtime() do not take effect till next tick. */
761 if (time_next_adjust != 0) {
762 time_adjust = time_next_adjust;
763 time_next_adjust = 0;
768 * Using a loop looks inefficient, but "ticks" is
769 * usually just one (we shouldn't be losing ticks,
770 * we're doing this this way mainly for interrupt
771 * latency reasons, not because we think we'll
772 * have lots of lost timer ticks
774 static void update_wall_time(unsigned long ticks)
778 update_wall_time_one_tick();
781 if (xtime.tv_nsec >= 1000000000) {
782 xtime.tv_nsec -= 1000000000;
788 static inline void do_process_times(struct task_struct *p,
789 unsigned long user, unsigned long system)
793 psecs = (p->utime += user);
794 psecs += (p->stime += system);
795 if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
796 /* Send SIGXCPU every second.. */
798 send_sig(SIGXCPU, p, 1);
799 /* and SIGKILL when we go over max.. */
800 if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
801 send_sig(SIGKILL, p, 1);
805 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
807 unsigned long it_virt = p->it_virt_value;
812 it_virt = p->it_virt_incr;
813 send_sig(SIGVTALRM, p, 1);
815 p->it_virt_value = it_virt;
819 static inline void do_it_prof(struct task_struct *p)
821 unsigned long it_prof = p->it_prof_value;
824 if (--it_prof == 0) {
825 it_prof = p->it_prof_incr;
826 send_sig(SIGPROF, p, 1);
828 p->it_prof_value = it_prof;
832 static void update_one_process(struct task_struct *p, unsigned long user,
833 unsigned long system, int cpu)
835 do_process_times(p, user, system);
841 * Called from the timer interrupt handler to charge one tick to the current
842 * process. user_tick is 1 if the tick is user time, 0 for system.
844 void update_process_times(int user_tick)
846 struct task_struct *p = current;
847 int cpu = smp_processor_id(), system = user_tick ^ 1;
849 update_one_process(p, user_tick, system, cpu);
851 scheduler_tick(user_tick, system);
855 * Nr of active tasks - counted in fixed-point numbers
857 static unsigned long count_active_tasks(void)
859 return (nr_running() + nr_uninterruptible()) * FIXED_1;
863 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
864 * imply that avenrun[] is the standard name for this kind of thing.
865 * Nothing else seems to be standardized: the fractional size etc
866 * all seem to differ on different machines.
868 * Requires xtime_lock to access.
870 unsigned long avenrun[3];
873 * calc_load - given tick count, update the avenrun load estimates.
874 * This is called while holding a write_lock on xtime_lock.
876 static inline void calc_load(unsigned long ticks)
878 unsigned long active_tasks; /* fixed-point */
879 static int count = LOAD_FREQ;
884 active_tasks = count_active_tasks();
885 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
886 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
887 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
891 /* jiffies at the most recent update of wall time */
892 unsigned long wall_jiffies = INITIAL_JIFFIES;
895 * This read-write spinlock protects us from races in SMP while
896 * playing with xtime and avenrun.
898 #ifndef ARCH_HAVE_XTIME_LOCK
899 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
901 EXPORT_SYMBOL(xtime_lock);
905 * This function runs timers and the timer-tq in bottom half context.
907 static void run_timer_softirq(struct softirq_action *h)
909 tvec_base_t *base = &__get_cpu_var(tvec_bases);
911 if (time_after_eq(jiffies, base->timer_jiffies))
916 * Called by the local, per-CPU timer interrupt on SMP.
918 void run_local_timers(void)
920 raise_softirq(TIMER_SOFTIRQ);
924 * Called by the timer interrupt. xtime_lock must already be taken
927 static inline void update_times(void)
931 ticks = jiffies - wall_jiffies;
933 wall_jiffies += ticks;
934 update_wall_time(ticks);
940 * The 64-bit jiffies value is not atomic - you MUST NOT read it
941 * without sampling the sequence number in xtime_lock.
942 * jiffies is defined in the linker script...
945 void do_timer(struct pt_regs *regs)
949 /* SMP process accounting uses the local APIC timer */
951 update_process_times(user_mode(regs));
956 #ifdef __ARCH_WANT_SYS_ALARM
959 * For backwards compatibility? This can be done in libc so Alpha
960 * and all newer ports shouldn't need it.
962 asmlinkage unsigned long sys_alarm(unsigned int seconds)
964 struct itimerval it_new, it_old;
965 unsigned int oldalarm;
967 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
968 it_new.it_value.tv_sec = seconds;
969 it_new.it_value.tv_usec = 0;
970 do_setitimer(ITIMER_REAL, &it_new, &it_old);
971 oldalarm = it_old.it_value.tv_sec;
972 /* ehhh.. We can't return 0 if we have an alarm pending.. */
973 /* And we'd better return too much than too little anyway */
974 if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
984 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
985 * should be moved into arch/i386 instead?
989 * sys_getpid - return the thread group id of the current process
991 * Note, despite the name, this returns the tgid not the pid. The tgid and
992 * the pid are identical unless CLONE_THREAD was specified on clone() in
993 * which case the tgid is the same in all threads of the same group.
995 * This is SMP safe as current->tgid does not change.
997 asmlinkage long sys_getpid(void)
999 return current->tgid;
1003 * Accessing ->group_leader->real_parent is not SMP-safe, it could
1004 * change from under us. However, rather than getting any lock
1005 * we can use an optimistic algorithm: get the parent
1006 * pid, and go back and check that the parent is still
1007 * the same. If it has changed (which is extremely unlikely
1008 * indeed), we just try again..
1010 * NOTE! This depends on the fact that even if we _do_
1011 * get an old value of "parent", we can happily dereference
1012 * the pointer (it was and remains a dereferencable kernel pointer
1013 * no matter what): we just can't necessarily trust the result
1014 * until we know that the parent pointer is valid.
1016 * NOTE2: ->group_leader never changes from under us.
1018 asmlinkage long sys_getppid(void)
1021 struct task_struct *me = current;
1022 struct task_struct *parent;
1024 parent = me->group_leader->real_parent;
1029 struct task_struct *old = parent;
1032 * Make sure we read the pid before re-reading the
1036 parent = me->group_leader->real_parent;
1046 asmlinkage long sys_getuid(void)
1048 /* Only we change this so SMP safe */
1049 return current->uid;
1052 asmlinkage long sys_geteuid(void)
1054 /* Only we change this so SMP safe */
1055 return current->euid;
1058 asmlinkage long sys_getgid(void)
1060 /* Only we change this so SMP safe */
1061 return current->gid;
1064 asmlinkage long sys_getegid(void)
1066 /* Only we change this so SMP safe */
1067 return current->egid;
1072 static void process_timeout(unsigned long __data)
1074 wake_up_process((task_t *)__data);
1078 * schedule_timeout - sleep until timeout
1079 * @timeout: timeout value in jiffies
1081 * Make the current task sleep until @timeout jiffies have
1082 * elapsed. The routine will return immediately unless
1083 * the current task state has been set (see set_current_state()).
1085 * You can set the task state as follows -
1087 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1088 * pass before the routine returns. The routine will return 0
1090 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1091 * delivered to the current task. In this case the remaining time
1092 * in jiffies will be returned, or 0 if the timer expired in time
1094 * The current task state is guaranteed to be TASK_RUNNING when this
1097 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1098 * the CPU away without a bound on the timeout. In this case the return
1099 * value will be %MAX_SCHEDULE_TIMEOUT.
1101 * In all cases the return value is guaranteed to be non-negative.
1103 fastcall signed long __sched schedule_timeout(signed long timeout)
1105 struct timer_list timer;
1106 unsigned long expire;
1110 case MAX_SCHEDULE_TIMEOUT:
1112 * These two special cases are useful to be comfortable
1113 * in the caller. Nothing more. We could take
1114 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1115 * but I' d like to return a valid offset (>=0) to allow
1116 * the caller to do everything it want with the retval.
1122 * Another bit of PARANOID. Note that the retval will be
1123 * 0 since no piece of kernel is supposed to do a check
1124 * for a negative retval of schedule_timeout() (since it
1125 * should never happens anyway). You just have the printk()
1126 * that will tell you if something is gone wrong and where.
1130 printk(KERN_ERR "schedule_timeout: wrong timeout "
1131 "value %lx from %p\n", timeout,
1132 __builtin_return_address(0));
1133 current->state = TASK_RUNNING;
1138 expire = timeout + jiffies;
1141 timer.expires = expire;
1142 timer.data = (unsigned long) current;
1143 timer.function = process_timeout;
1147 del_singleshot_timer_sync(&timer);
1149 timeout = expire - jiffies;
1152 return timeout < 0 ? 0 : timeout;
1155 EXPORT_SYMBOL(schedule_timeout);
1157 /* Thread ID - the internal kernel "pid" */
1158 asmlinkage long sys_gettid(void)
1160 return current->pid;
1163 static long __sched nanosleep_restart(struct restart_block *restart)
1165 unsigned long expire = restart->arg0, now = jiffies;
1166 struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
1169 /* Did it expire while we handled signals? */
1170 if (!time_after(expire, now))
1173 current->state = TASK_INTERRUPTIBLE;
1174 expire = schedule_timeout(expire - now);
1179 jiffies_to_timespec(expire, &t);
1181 ret = -ERESTART_RESTARTBLOCK;
1182 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1184 /* The 'restart' block is already filled in */
1189 asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1192 unsigned long expire;
1195 if (copy_from_user(&t, rqtp, sizeof(t)))
1198 if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
1201 expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
1202 current->state = TASK_INTERRUPTIBLE;
1203 expire = schedule_timeout(expire);
1207 struct restart_block *restart;
1208 jiffies_to_timespec(expire, &t);
1209 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1212 restart = ¤t_thread_info()->restart_block;
1213 restart->fn = nanosleep_restart;
1214 restart->arg0 = jiffies + expire;
1215 restart->arg1 = (unsigned long) rmtp;
1216 ret = -ERESTART_RESTARTBLOCK;
1222 * sys_sysinfo - fill in sysinfo struct
1224 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1227 unsigned long mem_total, sav_total;
1228 unsigned int mem_unit, bitcount;
1231 memset((char *)&val, 0, sizeof(struct sysinfo));
1235 seq = read_seqbegin(&xtime_lock);
1238 * This is annoying. The below is the same thing
1239 * posix_get_clock_monotonic() does, but it wants to
1240 * take the lock which we want to cover the loads stuff
1244 do_gettimeofday((struct timeval *)&tp);
1245 tp.tv_nsec *= NSEC_PER_USEC;
1246 tp.tv_sec += wall_to_monotonic.tv_sec;
1247 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1248 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1249 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1252 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1254 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1255 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1256 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1258 val.procs = nr_threads;
1259 } while (read_seqretry(&xtime_lock, seq));
1265 * If the sum of all the available memory (i.e. ram + swap)
1266 * is less than can be stored in a 32 bit unsigned long then
1267 * we can be binary compatible with 2.2.x kernels. If not,
1268 * well, in that case 2.2.x was broken anyways...
1270 * -Erik Andersen <andersee@debian.org>
1273 mem_total = val.totalram + val.totalswap;
1274 if (mem_total < val.totalram || mem_total < val.totalswap)
1277 mem_unit = val.mem_unit;
1278 while (mem_unit > 1) {
1281 sav_total = mem_total;
1283 if (mem_total < sav_total)
1288 * If mem_total did not overflow, multiply all memory values by
1289 * val.mem_unit and set it to 1. This leaves things compatible
1290 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1295 val.totalram <<= bitcount;
1296 val.freeram <<= bitcount;
1297 val.sharedram <<= bitcount;
1298 val.bufferram <<= bitcount;
1299 val.totalswap <<= bitcount;
1300 val.freeswap <<= bitcount;
1301 val.totalhigh <<= bitcount;
1302 val.freehigh <<= bitcount;
1305 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1311 static void __devinit init_timers_cpu(int cpu)
1316 base = &per_cpu(tvec_bases, cpu);
1317 spin_lock_init(&base->lock);
1318 for (j = 0; j < TVN_SIZE; j++) {
1319 INIT_LIST_HEAD(base->tv5.vec + j);
1320 INIT_LIST_HEAD(base->tv4.vec + j);
1321 INIT_LIST_HEAD(base->tv3.vec + j);
1322 INIT_LIST_HEAD(base->tv2.vec + j);
1324 for (j = 0; j < TVR_SIZE; j++)
1325 INIT_LIST_HEAD(base->tv1.vec + j);
1327 base->timer_jiffies = jiffies;
1330 #ifdef CONFIG_HOTPLUG_CPU
1331 static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1333 struct timer_list *timer;
1335 while (!list_empty(head)) {
1336 timer = list_entry(head->next, struct timer_list, entry);
1337 /* We're locking backwards from __mod_timer order here,
1339 if (!spin_trylock(&timer->lock))
1341 list_del(&timer->entry);
1342 internal_add_timer(new_base, timer);
1343 timer->base = new_base;
1344 spin_unlock(&timer->lock);
1349 static void __devinit migrate_timers(int cpu)
1351 tvec_base_t *old_base;
1352 tvec_base_t *new_base;
1355 BUG_ON(cpu_online(cpu));
1356 old_base = &per_cpu(tvec_bases, cpu);
1357 new_base = &get_cpu_var(tvec_bases);
1359 local_irq_disable();
1361 /* Prevent deadlocks via ordering by old_base < new_base. */
1362 if (old_base < new_base) {
1363 spin_lock(&new_base->lock);
1364 spin_lock(&old_base->lock);
1366 spin_lock(&old_base->lock);
1367 spin_lock(&new_base->lock);
1370 if (old_base->running_timer)
1372 for (i = 0; i < TVR_SIZE; i++)
1373 if (!migrate_timer_list(new_base, old_base->tv1.vec + i))
1375 for (i = 0; i < TVN_SIZE; i++)
1376 if (!migrate_timer_list(new_base, old_base->tv2.vec + i)
1377 || !migrate_timer_list(new_base, old_base->tv3.vec + i)
1378 || !migrate_timer_list(new_base, old_base->tv4.vec + i)
1379 || !migrate_timer_list(new_base, old_base->tv5.vec + i))
1381 spin_unlock(&old_base->lock);
1382 spin_unlock(&new_base->lock);
1384 put_cpu_var(tvec_bases);
1388 /* Avoid deadlock with __mod_timer, by backing off. */
1389 spin_unlock(&old_base->lock);
1390 spin_unlock(&new_base->lock);
1394 #endif /* CONFIG_HOTPLUG_CPU */
1396 static int __devinit timer_cpu_notify(struct notifier_block *self,
1397 unsigned long action, void *hcpu)
1399 long cpu = (long)hcpu;
1401 case CPU_UP_PREPARE:
1402 init_timers_cpu(cpu);
1404 #ifdef CONFIG_HOTPLUG_CPU
1406 migrate_timers(cpu);
1415 static struct notifier_block __devinitdata timers_nb = {
1416 .notifier_call = timer_cpu_notify,
1420 void __init init_timers(void)
1422 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1423 (void *)(long)smp_processor_id());
1424 register_cpu_notifier(&timers_nb);
1425 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1428 #ifdef CONFIG_TIME_INTERPOLATION
1429 volatile unsigned long last_nsec_offset;
1430 #ifndef __HAVE_ARCH_CMPXCHG
1431 spinlock_t last_nsec_offset_lock = SPIN_LOCK_UNLOCKED;
1434 struct time_interpolator *time_interpolator;
1435 static struct time_interpolator *time_interpolator_list;
1436 static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
1439 is_better_time_interpolator(struct time_interpolator *new)
1441 if (!time_interpolator)
1443 return new->frequency > 2*time_interpolator->frequency ||
1444 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1448 register_time_interpolator(struct time_interpolator *ti)
1450 spin_lock(&time_interpolator_lock);
1451 write_seqlock_irq(&xtime_lock);
1452 if (is_better_time_interpolator(ti))
1453 time_interpolator = ti;
1454 write_sequnlock_irq(&xtime_lock);
1456 ti->next = time_interpolator_list;
1457 time_interpolator_list = ti;
1458 spin_unlock(&time_interpolator_lock);
1462 unregister_time_interpolator(struct time_interpolator *ti)
1464 struct time_interpolator *curr, **prev;
1466 spin_lock(&time_interpolator_lock);
1467 prev = &time_interpolator_list;
1468 for (curr = *prev; curr; curr = curr->next) {
1476 write_seqlock_irq(&xtime_lock);
1477 if (ti == time_interpolator) {
1478 /* we lost the best time-interpolator: */
1479 time_interpolator = NULL;
1480 /* find the next-best interpolator */
1481 for (curr = time_interpolator_list; curr; curr = curr->next)
1482 if (is_better_time_interpolator(curr))
1483 time_interpolator = curr;
1485 write_sequnlock_irq(&xtime_lock);
1486 spin_unlock(&time_interpolator_lock);
1488 #endif /* CONFIG_TIME_INTERPOLATION */
1491 * msleep - sleep safely even with waitqueue interruptions
1492 * @msecs: Time in milliseconds to sleep for
1494 void msleep(unsigned int msecs)
1496 unsigned long timeout = msecs_to_jiffies(msecs);
1499 set_current_state(TASK_UNINTERRUPTIBLE);
1500 timeout = schedule_timeout(timeout);
1504 EXPORT_SYMBOL(msleep);