Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-flexiantxendom0-3.2.10.git] / arch / ia64 / kernel / time.c
index bf7e7b5..ecc904b 100644 (file)
 #include <linux/time.h>
 #include <linux/interrupt.h>
 #include <linux/efi.h>
-#include <linux/profile.h>
 #include <linux/timex.h>
+#include <linux/clocksource.h>
+#include <linux/platform_device.h>
 
 #include <asm/machvec.h>
 #include <asm/delay.h>
 #include <asm/hw_irq.h>
+#include <asm/paravirt.h>
 #include <asm/ptrace.h>
 #include <asm/sal.h>
 #include <asm/sections.h>
-#include <asm/system.h>
+
+#include "fsyscall_gtod_data.h"
+
+static cycle_t itc_get_cycles(struct clocksource *cs);
+
+struct fsyscall_gtod_data_t fsyscall_gtod_data;
+
+struct itc_jitter_data_t itc_jitter_data;
 
 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
 
@@ -38,18 +47,123 @@ EXPORT_SYMBOL(last_cli_ip);
 
 #endif
 
-static struct time_interpolator itc_interpolator = {
-       .shift = 16,
-       .mask = 0xffffffffffffffffLL,
-       .source = TIME_SOURCE_CPU
+#ifdef CONFIG_PARAVIRT
+/* We need to define a real function for sched_clock, to override the
+   weak default version */
+unsigned long long sched_clock(void)
+{
+        return paravirt_sched_clock();
+}
+#endif
+
+#ifdef CONFIG_PARAVIRT
+static void
+paravirt_clocksource_resume(struct clocksource *cs)
+{
+       if (pv_time_ops.clocksource_resume)
+               pv_time_ops.clocksource_resume();
+}
+#endif
+
+static struct clocksource clocksource_itc = {
+       .name           = "itc",
+       .rating         = 350,
+       .read           = itc_get_cycles,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+#ifdef CONFIG_PARAVIRT
+       .resume         = paravirt_clocksource_resume,
+#endif
 };
+static struct clocksource *itc_clocksource;
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+
+#include <linux/kernel_stat.h>
+
+extern cputime_t cycle_to_cputime(u64 cyc);
+
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
+{
+       struct thread_info *pi = task_thread_info(prev);
+       struct thread_info *ni = task_thread_info(next);
+       cputime_t delta_stime, delta_utime;
+       __u64 now;
+
+       now = ia64_get_itc();
+
+       delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
+       if (idle_task(smp_processor_id()) != prev)
+               account_system_time(prev, 0, delta_stime, delta_stime);
+       else
+               account_idle_time(delta_stime);
+
+       if (pi->ac_utime) {
+               delta_utime = cycle_to_cputime(pi->ac_utime);
+               account_user_time(prev, delta_utime, delta_utime);
+       }
+
+       pi->ac_stamp = ni->ac_stamp = now;
+       ni->ac_stime = ni->ac_utime = 0;
+}
+
+/*
+ * Account time for a transition between system, hard irq or soft irq state.
+ * Note that this function is called with interrupts enabled.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+       struct thread_info *ti = task_thread_info(tsk);
+       unsigned long flags;
+       cputime_t delta_stime;
+       __u64 now;
+
+       local_irq_save(flags);
+
+       now = ia64_get_itc();
+
+       delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
+       if (irq_count() || idle_task(smp_processor_id()) != tsk)
+               account_system_time(tsk, 0, delta_stime, delta_stime);
+       else
+               account_idle_time(delta_stime);
+       ti->ac_stime = 0;
+
+       ti->ac_stamp = now;
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(account_system_vtime);
+
+/*
+ * Called from the timer interrupt handler to charge accumulated user time
+ * to the current process.  Must be called with interrupts disabled.
+ */
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+       struct thread_info *ti = task_thread_info(p);
+       cputime_t delta_utime;
+
+       if (ti->ac_utime) {
+               delta_utime = cycle_to_cputime(ti->ac_utime);
+               account_user_time(p, delta_utime, delta_utime);
+               ti->ac_utime = 0;
+       }
+}
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
 {
        unsigned long new_itm;
 
-       if (unlikely(cpu_is_offline(smp_processor_id()))) {
+       if (cpu_is_offline(smp_processor_id())) {
                return IRQ_HANDLED;
        }
 
@@ -63,24 +177,18 @@ timer_interrupt (int irq, void *dev_id)
 
        profile_tick(CPU_PROFILING);
 
+       if (paravirt_do_steal_accounting(&new_itm))
+               goto skip_process_time_accounting;
+
        while (1) {
                update_process_times(user_mode(get_irq_regs()));
 
                new_itm += local_cpu_data->itm_delta;
 
-               if (smp_processor_id() == time_keeper_id) {
-                       /*
-                        * Here we are in the timer irq handler. We have irqs locally
-                        * disabled, but we don't know if the timer_bh is running on
-                        * another CPU. We need to avoid to SMP race by acquiring the
-                        * xtime_lock.
-                        */
-                       write_seqlock(&xtime_lock);
-                       do_timer(1);
-                       local_cpu_data->itm_next = new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else
-                       local_cpu_data->itm_next = new_itm;
+               if (smp_processor_id() == time_keeper_id)
+                       xtime_update(1);
+
+               local_cpu_data->itm_next = new_itm;
 
                if (time_after(new_itm, ia64_get_itc()))
                        break;
@@ -92,13 +200,15 @@ timer_interrupt (int irq, void *dev_id)
                local_irq_disable();
        }
 
+skip_process_time_accounting:
+
        do {
                /*
                 * If we're too close to the next clock tick for
                 * comfort, we increase the safety margin by
                 * intentionally dropping the next tick(s).  We do NOT
                 * update itm.next because that would force us to call
-                * do_timer() which in turn would let our clock run
+                * xtime_update() which in turn would let our clock run
                 * too fast (with the potentially devastating effect
                 * of losing monotony of time).
                 */
@@ -211,12 +321,10 @@ ia64_init_itm (void)
                                        + itc_freq/2)/itc_freq;
 
        if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-               itc_interpolator.frequency = local_cpu_data->itc_freq;
-               itc_interpolator.drift = itc_drift;
 #ifdef CONFIG_SMP
                /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
                 * Jitter compensation requires a cmpxchg which may limit
-                * the scalibility of the syscalls for retrieving time.
+                * the scalability of the syscalls for retrieving time.
                 * The ITC synchronization is usually successful to within a few
                 * ITC ticks but this is not a sure thing. If you need to improve
                 * timer performance in SMP situations then boot the kernel with the
@@ -224,38 +332,97 @@ ia64_init_itm (void)
                 * even going backward) if the ITC offsets between the individual CPUs
                 * are too large.
                 */
-               if (!nojitter) itc_interpolator.jitter = 1;
+               if (!nojitter)
+                       itc_jitter_data.itc_jitter = 1;
 #endif
-               register_time_interpolator(&itc_interpolator);
-       }
+       } else
+               /*
+                * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
+                * ITC values may fluctuate significantly between processors.
+                * Clock should not be used for hrtimers. Mark itc as only
+                * useful for boot and testing.
+                *
+                * Note that jitter compensation is off! There is no point of
+                * synchronizing ITCs since they may be large differentials
+                * that change over time.
+                *
+                * The only way to fix this would be to repeatedly sync the
+                * ITCs. Until that time we have to avoid ITC.
+                */
+               clocksource_itc.rating = 50;
+
+       paravirt_init_missing_ticks_accounting(smp_processor_id());
+
+       /* avoid softlock up message when cpu is unplug and plugged again. */
+       touch_softlockup_watchdog();
 
        /* Setup the CPU local timer tick */
        ia64_cpu_local_tick();
+
+       if (!itc_clocksource) {
+               clocksource_register_hz(&clocksource_itc,
+                                               local_cpu_data->itc_freq);
+               itc_clocksource = &clocksource_itc;
+       }
+}
+
+static cycle_t itc_get_cycles(struct clocksource *cs)
+{
+       unsigned long lcycle, now, ret;
+
+       if (!itc_jitter_data.itc_jitter)
+               return get_cycles();
+
+       lcycle = itc_jitter_data.itc_lastcycle;
+       now = get_cycles();
+       if (lcycle && time_after(lcycle, now))
+               return lcycle;
+
+       /*
+        * Keep track of the last timer value returned.
+        * In an SMP environment, you could lose out in contention of
+        * cmpxchg. If so, your cmpxchg returns new value which the
+        * winner of contention updated to. Use the new value instead.
+        */
+       ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
+       if (unlikely(ret != lcycle))
+               return ret;
+
+       return now;
 }
 
+
 static struct irqaction timer_irqaction = {
        .handler =      timer_interrupt,
        .flags =        IRQF_DISABLED | IRQF_IRQPOLL,
        .name =         "timer"
 };
 
-void __devinit ia64_disable_timer(void)
+static struct platform_device rtc_efi_dev = {
+       .name = "rtc-efi",
+       .id = -1,
+};
+
+static int __init rtc_init(void)
+{
+       if (platform_device_register(&rtc_efi_dev) < 0)
+               printk(KERN_ERR "unable to register rtc device...\n");
+
+       /* not necessarily an error */
+       return 0;
+}
+module_init(rtc_init);
+
+void read_persistent_clock(struct timespec *ts)
 {
-       ia64_set_itv(1 << 16);
+       efi_gettimeofday(ts);
 }
 
 void __init
 time_init (void)
 {
        register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
-       efi_gettimeofday(&xtime);
        ia64_init_itm();
-
-       /*
-        * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
-        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
-        */
-       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
 }
 
 /*
@@ -282,29 +449,37 @@ udelay (unsigned long usecs)
 }
 EXPORT_SYMBOL(udelay);
 
-static unsigned long long ia64_itc_printk_clock(void)
+/* IA64 doesn't cache the timezone */
+void update_vsyscall_tz(void)
 {
-       if (ia64_get_kr(IA64_KR_PER_CPU_DATA))
-               return sched_clock();
-       return 0;
 }
 
-static unsigned long long ia64_default_printk_clock(void)
+void update_vsyscall(struct timespec *wall, struct timespec *wtm,
+                       struct clocksource *c, u32 mult)
 {
-       return (unsigned long long)(jiffies_64 - INITIAL_JIFFIES) *
-               (1000000000/HZ);
-}
-
-unsigned long long (*ia64_printk_clock)(void) = &ia64_default_printk_clock;
+       write_seqcount_begin(&fsyscall_gtod_data.seq);
+
+        /* copy fsyscall clock data */
+        fsyscall_gtod_data.clk_mask = c->mask;
+        fsyscall_gtod_data.clk_mult = mult;
+        fsyscall_gtod_data.clk_shift = c->shift;
+        fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
+        fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
+
+       /* copy kernel time structures */
+        fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
+        fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
+       fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
+                                                       + wall->tv_sec;
+       fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
+                                                       + wall->tv_nsec;
+
+       /* normalize */
+       while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
+               fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
+               fsyscall_gtod_data.monotonic_time.tv_sec++;
+       }
 
-unsigned long long printk_clock(void)
-{
-       return ia64_printk_clock();
+       write_seqcount_end(&fsyscall_gtod_data.seq);
 }
 
-void __init
-ia64_setup_printk_clock(void)
-{
-       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
-               ia64_printk_clock = ia64_itc_printk_clock;
-}