2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
11 /* Disable profiling for userspace code: */
12 #define DISABLE_BRANCH_PROFILING
14 #include <linux/kernel.h>
15 #include <linux/posix-timers.h>
16 #include <linux/time.h>
17 #include <linux/string.h>
18 #include <asm/vsyscall.h>
19 #include <asm/fixmap.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
23 #include <asm/unistd.h>
26 #define gtod (&VVAR(vsyscall_gtod_data))
31 notrace static cycle_t vread_tsc(void)
37 * Empirically, a fence (of type that depends on the CPU)
38 * before rdtsc is enough to ensure that rdtsc is ordered
39 * with respect to loads. The various CPU manuals are unclear
40 * as to whether rdtsc can be reordered with later loads,
41 * but no one has ever seen it happen.
44 ret = (cycle_t)vget_cycles();
46 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
48 if (likely(ret >= last))
52 * GCC likes to generate cmov here, but this branch is extremely
53 * predictable (it's just a funciton of time and the likely is
54 * very likely) and there's a data dependence, so force GCC
55 * to generate a branch instead. I don't barrier() because
56 * we don't actually need a barrier, and if this function
57 * ever gets inlined it will generate worse code.
63 static notrace cycle_t vread_hpet(void)
65 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
67 #endif /* CONFIG_XEN */
69 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
72 asm("syscall" : "=a" (ret) :
73 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
77 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
81 asm("syscall" : "=a" (ret) :
82 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
88 notrace static inline long vgetns(void)
92 if (gtod->clock.vclock_mode == VCLOCK_TSC)
94 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
95 cycles = vread_hpet();
98 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
99 return (v * gtod->clock.mult) >> gtod->clock.shift;
102 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
103 notrace static int __always_inline do_realtime(struct timespec *ts)
105 unsigned long seq, ns;
109 seq = read_seqcount_begin(>od->seq);
110 mode = gtod->clock.vclock_mode;
111 ts->tv_sec = gtod->wall_time_sec;
112 ts->tv_nsec = gtod->wall_time_nsec;
114 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
116 timespec_add_ns(ts, ns);
120 notrace static int do_monotonic(struct timespec *ts)
122 unsigned long seq, ns;
126 seq = read_seqcount_begin(>od->seq);
127 mode = gtod->clock.vclock_mode;
128 ts->tv_sec = gtod->monotonic_time_sec;
129 ts->tv_nsec = gtod->monotonic_time_nsec;
131 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
132 timespec_add_ns(ts, ns);
136 #endif /* CONFIG_XEN */
138 notrace static int do_realtime_coarse(struct timespec *ts)
142 seq = read_seqcount_begin(>od->seq);
143 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
144 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
145 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
149 notrace static int do_monotonic_coarse(struct timespec *ts)
153 seq = read_seqcount_begin(>od->seq);
154 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
155 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
156 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
161 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
163 int ret = VCLOCK_NONE;
168 ret = do_realtime(ts);
170 case CLOCK_MONOTONIC:
171 ret = do_monotonic(ts);
174 case CLOCK_REALTIME_COARSE:
175 return do_realtime_coarse(ts);
176 case CLOCK_MONOTONIC_COARSE:
177 return do_monotonic_coarse(ts);
180 if (ret == VCLOCK_NONE)
181 return vdso_fallback_gettime(clock, ts);
184 int clock_gettime(clockid_t, struct timespec *)
185 __attribute__((weak, alias("__vdso_clock_gettime")));
187 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
189 long ret = VCLOCK_NONE;
192 if (likely(tv != NULL)) {
193 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
194 offsetof(struct timespec, tv_nsec) ||
195 sizeof(*tv) != sizeof(struct timespec));
196 ret = do_realtime((struct timespec *)tv);
199 if (unlikely(tz != NULL)) {
200 /* Avoid memcpy. Some old compilers fail to inline it */
201 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
202 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
206 if (ret == VCLOCK_NONE)
207 return vdso_fallback_gtod(tv, tz);
210 int gettimeofday(struct timeval *, struct timezone *)
211 __attribute__((weak, alias("__vdso_gettimeofday")));
214 * This will break when the xtime seconds get inaccurate, but that is
217 notrace time_t __vdso_time(time_t *t)
219 /* This is atomic on x86_64 so we don't need any locks. */
220 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
227 __attribute__((weak, alias("__vdso_time")));