2 * linux/arch/ia64/kernel/time.c
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
11 #include <linux/config.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/profile.h>
16 #include <linux/sched.h>
17 #include <linux/time.h>
18 #include <linux/interrupt.h>
19 #include <linux/efi.h>
21 #include <asm/delay.h>
22 #include <asm/hw_irq.h>
23 #include <asm/ptrace.h>
25 #include <asm/system.h>
27 extern unsigned long wall_jiffies;
28 extern unsigned long last_nsec_offset;
30 u64 jiffies_64 = INITIAL_JIFFIES;
32 #ifdef CONFIG_IA64_DEBUG_IRQ
34 unsigned long last_cli_ip;
39 do_profile (unsigned long ip)
41 extern unsigned long prof_cpu_mask;
47 if (!((1UL << smp_processor_id()) & prof_cpu_mask))
50 ip -= (unsigned long) &_stext;
53 * Don't ignore out-of-bounds IP values silently, put them into the last
54 * histogram slot, so if present, they will show up as a sharp peak.
56 if (ip > prof_len - 1)
59 atomic_inc((atomic_t *) &prof_buffer[ip]);
63 * Return the number of nano-seconds that elapsed since the last update to jiffy. The
64 * xtime_lock must be at least read-locked when calling this routine.
66 static inline unsigned long
69 unsigned long elapsed_cycles, lost = jiffies - wall_jiffies;
70 unsigned long now, last_tick;
71 # define time_keeper_id 0 /* smp_processor_id() of time-keeper */
73 last_tick = (cpu_data(time_keeper_id)->itm_next
74 - (lost + 1)*cpu_data(time_keeper_id)->itm_delta);
77 if (unlikely((long) (now - last_tick) < 0)) {
78 printk(KERN_ERR "CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!\n",
79 smp_processor_id(), now, last_tick);
80 return last_nsec_offset;
82 elapsed_cycles = now - last_tick;
83 return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
87 set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
89 while (nsec > NSEC_PER_SEC) {
102 do_settimeofday (struct timeval *tv)
104 time_t wtm_sec, sec = tv->tv_sec;
105 long wtm_nsec, nsec = tv->tv_usec * 1000;
107 write_seqlock_irq(&xtime_lock);
110 * This is revolting. We need to set "xtime" correctly. However, the value
111 * in this location is the value at the most recent update of wall time.
112 * Discover what correction gettimeofday would have done, and then undo
115 nsec -= gettimeoffset();
117 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
118 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
120 set_normalized_timespec(&xtime, sec, nsec);
121 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
123 time_adjust = 0; /* stop active adjtime() */
124 time_status |= STA_UNSYNC;
125 time_maxerror = NTP_PHASE_LIMIT;
126 time_esterror = NTP_PHASE_LIMIT;
128 write_sequnlock_irq(&xtime_lock);
133 do_gettimeofday (struct timeval *tv)
135 unsigned long seq, nsec, usec, sec, old, offset;
138 seq = read_seqbegin(&xtime_lock);
140 old = last_nsec_offset;
141 offset = gettimeoffset();
143 nsec = xtime.tv_nsec;
145 if (unlikely(read_seqretry(&xtime_lock, seq)))
148 * Ensure that for any pair of causally ordered gettimeofday() calls, time
149 * never goes backwards (even when ITC on different CPUs are not perfectly
150 * synchronized). (A pair of concurrent calls to gettimeofday() is by
151 * definition non-causal and hence it makes no sense to talk about
152 * time-continuity for such calls.)
154 * Doing this in a lock-free and race-free manner is tricky. Here is why
155 * it works (most of the time): read_seqretry() just succeeded, which
156 * implies we calculated a consistent (valid) value for "offset". If the
157 * cmpxchg() below succeeds, we further know that last_nsec_offset still
158 * has the same value as at the beginning of the loop, so there was
159 * presumably no timer-tick or other updates to last_nsec_offset in the
160 * meantime. This isn't 100% true though: there _is_ a possibility of a
161 * timer-tick occurring right right after read_seqretry() and then getting
162 * zero or more other readers which will set last_nsec_offset to the same
163 * value as the one we read at the beginning of the loop. If this
164 * happens, we'll end up returning a slightly newer time than we ought to
165 * (the jump forward is at most "offset" nano-seconds). There is no
166 * danger of causing time to go backwards, though, so we are safe in that
167 * sense. We could make the probability of this unlucky case occurring
168 * arbitrarily small by encoding a version number in last_nsec_offset, but
169 * even without versioning, the probability of this unlucky case should be
170 * so small that we won't worry about it.
175 } else if (likely(cmpxchg(&last_nsec_offset, old, offset) == old))
178 /* someone else beat us to updating last_nsec_offset; try again */
181 usec = (nsec + offset) / 1000;
183 while (unlikely(usec >= USEC_PER_SEC)) {
184 usec -= USEC_PER_SEC;
193 timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
195 unsigned long new_itm;
197 new_itm = local_cpu_data->itm_next;
199 if (!time_after(ia64_get_itc(), new_itm))
200 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
201 ia64_get_itc(), new_itm);
205 * Do kernel PC profiling here. We multiply the instruction number by
206 * four so that we can use a prof_shift of 2 to get instruction-level
207 * instead of just bundle-level accuracy.
209 if (!user_mode(regs))
210 do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);
215 new_itm += local_cpu_data->itm_delta;
217 if (smp_processor_id() == 0) {
219 * Here we are in the timer irq handler. We have irqs locally
220 * disabled, but we don't know if the timer_bh is running on
221 * another CPU. We need to avoid to SMP race by acquiring the
224 write_seqlock(&xtime_lock);
226 local_cpu_data->itm_next = new_itm;
227 write_sequnlock(&xtime_lock);
229 local_cpu_data->itm_next = new_itm;
231 if (time_after(new_itm, ia64_get_itc()))
237 * If we're too close to the next clock tick for comfort, we increase the
238 * safety margin by intentionally dropping the next tick(s). We do NOT update
239 * itm.next because that would force us to call do_timer() which in turn would
240 * let our clock run too fast (with the potentially devastating effect of
241 * losing monotony of time).
243 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
244 new_itm += local_cpu_data->itm_delta;
245 ia64_set_itm(new_itm);
246 /* double check, in case we got hit by a (slow) PMI: */
247 } while (time_after_eq(ia64_get_itc(), new_itm));
252 * Encapsulate access to the itm structure for SMP.
255 ia64_cpu_local_tick (void)
257 int cpu = smp_processor_id();
258 unsigned long shift = 0, delta;
260 /* arrange for the cycle counter to generate a timer interrupt: */
261 ia64_set_itv(IA64_TIMER_VECTOR);
263 delta = local_cpu_data->itm_delta;
265 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
269 unsigned long hi = 1UL << ia64_fls(cpu);
270 shift = (2*(cpu - hi) + 1) * delta/hi/2;
272 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
273 ia64_set_itm(local_cpu_data->itm_next);
279 unsigned long platform_base_freq, itc_freq, drift;
280 struct pal_freq_ratio itc_ratio, proc_ratio;
284 * According to SAL v2.6, we need to use a SAL call to determine the platform base
285 * frequency and then a PAL call to determine the frequency ratio between the ITC
286 * and the base frequency.
288 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &platform_base_freq, &drift);
290 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
292 status = ia64_pal_freq_ratios(&proc_ratio, 0, &itc_ratio);
294 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
297 /* invent "random" values */
299 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
300 platform_base_freq = 100000000;
304 if (platform_base_freq < 40000000) {
305 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
307 platform_base_freq = 75000000;
310 proc_ratio.den = 1; /* avoid division by zero */
312 itc_ratio.den = 1; /* avoid division by zero */
314 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
315 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
316 printk(KERN_INFO "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
317 "ITC freq=%lu.%03luMHz\n", smp_processor_id(),
318 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
319 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
321 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
322 local_cpu_data->itc_freq = itc_freq;
323 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
324 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
325 + itc_freq/2)/itc_freq;
327 /* Setup the CPU local timer tick */
328 ia64_cpu_local_tick();
331 static struct irqaction timer_irqaction = {
332 .handler = timer_interrupt,
333 .flags = SA_INTERRUPT,
340 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
341 efi_gettimeofday(&xtime);
345 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
346 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
348 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);