summaryrefslogtreecommitdiffhomepage
path: root/vdso
diff options
context:
space:
mode:
authorHaibo Xu <haibo.xu@arm.com>2019-04-18 16:20:45 -0700
committerShentubot <shentubot@google.com>2019-04-18 16:22:08 -0700
commitf4d434c18002c96511decf8ff1ebdbede46ca6a1 (patch)
treecc781d877ef0ea64ce33c46d6a30b76eefa4753a /vdso
parentc931c8e0829914718a729e20d7db0c2bf4e73f0b (diff)
Enable vDSO support on arm64.
Signed-off-by: Haibo Xu <haibo.xu@arm.com> Change-Id: I20103cd6d193431ab7e8120005da1f567b9bc2eb PiperOrigin-RevId: 244280119
Diffstat (limited to 'vdso')
-rw-r--r--vdso/BUILD16
-rw-r--r--vdso/barrier.h14
-rw-r--r--vdso/cycle_clock.h9
-rw-r--r--vdso/syscalls.h49
-rw-r--r--vdso/vdso.cc65
-rw-r--r--vdso/vdso_amd64.lds (renamed from vdso/vdso.lds)0
-rw-r--r--vdso/vdso_arm64.lds99
-rw-r--r--vdso/vdso_time.cc16
8 files changed, 257 insertions, 11 deletions
diff --git a/vdso/BUILD b/vdso/BUILD
index 3df569233..f95f690eb 100644
--- a/vdso/BUILD
+++ b/vdso/BUILD
@@ -12,6 +12,11 @@ config_setting(
constraint_values = ["@bazel_tools//platforms:x86_64"],
)
+config_setting(
+ name = "aarch64",
+ constraint_values = ["@bazel_tools//platforms:aarch64"],
+)
+
genrule(
name = "vdso",
srcs = [
@@ -21,7 +26,8 @@ genrule(
"seqlock.h",
"syscalls.h",
"vdso.cc",
- "vdso.lds",
+ "vdso_amd64.lds",
+ "vdso_arm64.lds",
"vdso_time.h",
"vdso_time.cc",
],
@@ -49,7 +55,13 @@ genrule(
"-Wl,-Bsymbolic " +
"-Wl,-z,max-page-size=4096 " +
"-Wl,-z,common-page-size=4096 " +
- "-Wl,-T$(location vdso.lds) " +
+ select(
+ {
+ ":x86_64": "-Wl,-T$(location vdso_amd64.lds) ",
+ ":aarch64": "-Wl,-T$(location vdso_arm64.lds) ",
+ },
+ no_match_error = "Unsupported architecture",
+ ) +
"-o $(location vdso.so) " +
"$(location vdso.cc) " +
"$(location vdso_time.cc) " +
diff --git a/vdso/barrier.h b/vdso/barrier.h
index 7866af414..5b6c763f6 100644
--- a/vdso/barrier.h
+++ b/vdso/barrier.h
@@ -21,11 +21,25 @@ namespace vdso {
inline void barrier(void) { __asm__ __volatile__("" ::: "memory"); }
#if __x86_64__
+
inline void memory_barrier(void) {
__asm__ __volatile__("mfence" ::: "memory");
}
inline void read_barrier(void) { barrier(); }
inline void write_barrier(void) { barrier(); }
+
+#elif __aarch64__
+
+inline void memory_barrier(void) {
+ __asm__ __volatile__("dmb ish" ::: "memory");
+}
+inline void read_barrier(void) {
+ __asm__ __volatile__("dmb ishld" ::: "memory");
+}
+inline void write_barrier(void) {
+ __asm__ __volatile__("dmb ishst" ::: "memory");
+}
+
#else
#error "unsupported architecture"
#endif
diff --git a/vdso/cycle_clock.h b/vdso/cycle_clock.h
index dfb5b427d..26d6690c0 100644
--- a/vdso/cycle_clock.h
+++ b/vdso/cycle_clock.h
@@ -33,6 +33,15 @@ static inline uint64_t cycle_clock(void) {
asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
return ((uint64_t)hi << 32) | lo;
}
+
+#elif __aarch64__
+
+static inline uint64_t cycle_clock(void) {
+ uint64_t val;
+ asm volatile("mrs %0, CNTVCT_EL0" : "=r"(val)::"memory");
+ return val;
+}
+
#else
#error "unsupported architecture"
#endif
diff --git a/vdso/syscalls.h b/vdso/syscalls.h
index 0be8a7f9b..90fb424ce 100644
--- a/vdso/syscalls.h
+++ b/vdso/syscalls.h
@@ -26,10 +26,12 @@
#include <stddef.h>
#include <sys/types.h>
-struct getcpu_cache;
-
namespace vdso {
+#if __x86_64__
+
+struct getcpu_cache;
+
static inline int sys_clock_gettime(clockid_t clock, struct timespec* ts) {
int num = __NR_clock_gettime;
asm volatile("syscall\n"
@@ -49,6 +51,49 @@ static inline int sys_getcpu(unsigned* cpu, unsigned* node,
return num;
}
+#elif __aarch64__
+
+static inline int sys_rt_sigreturn(void) {
+ int num = __NR_rt_sigreturn;
+
+ asm volatile(
+ "mov x8, %0\n"
+ "svc #0 \n"
+ : "+r"(num)
+ :
+ :);
+ return num;
+}
+
+static inline int sys_clock_gettime(clockid_t _clkid, struct timespec *_ts) {
+ register struct timespec *ts asm("x1") = _ts;
+ register clockid_t clkid asm("x0") = _clkid;
+ register long ret asm("x0");
+ register long nr asm("x8") = __NR_clock_gettime;
+
+ asm volatile("svc #0\n"
+ : "=r"(ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+ return ret;
+}
+
+static inline int sys_clock_getres(clockid_t _clkid, struct timespec *_ts) {
+ register struct timespec *ts asm("x1") = _ts;
+ register clockid_t clkid asm("x0") = _clkid;
+ register long ret asm("x0");
+ register long nr asm("x8") = __NR_clock_getres;
+
+ asm volatile("svc #0\n"
+ : "=r"(ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+ return ret;
+}
+
+#else
+#error "unsupported architecture"
+#endif
} // namespace vdso
#endif // VDSO_SYSCALLS_H_
diff --git a/vdso/vdso.cc b/vdso/vdso.cc
index f30dc26a2..550729035 100644
--- a/vdso/vdso.cc
+++ b/vdso/vdso.cc
@@ -23,9 +23,9 @@
#include "vdso/vdso_time.h"
namespace vdso {
+namespace {
-// __vdso_clock_gettime() implements clock_gettime()
-extern "C" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {
+int __common_clock_gettime(clockid_t clock, struct timespec* ts) {
int ret;
switch (clock) {
@@ -44,11 +44,8 @@ extern "C" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {
return ret;
}
-extern "C" int clock_gettime(clockid_t clock, struct timespec* ts)
- __attribute__((weak, alias("__vdso_clock_gettime")));
-// __vdso_gettimeofday() implements gettimeofday()
-extern "C" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {
+int __common_gettimeofday(struct timeval* tv, struct timezone* tz) {
if (tv) {
struct timespec ts;
int ret = ClockRealtime(&ts);
@@ -68,6 +65,21 @@ extern "C" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {
return 0;
}
+} // namespace
+
+#if __x86_64__
+
+// __vdso_clock_gettime() implements clock_gettime()
+extern "C" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {
+ return __common_clock_gettime(clock, ts);
+}
+extern "C" int clock_gettime(clockid_t clock, struct timespec* ts)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
+
+// __vdso_gettimeofday() implements gettimeofday()
+extern "C" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {
+ return __common_gettimeofday(tv, tz);
+}
extern "C" int gettimeofday(struct timeval* tv, struct timezone* tz)
__attribute__((weak, alias("__vdso_gettimeofday")));
@@ -92,4 +104,45 @@ extern "C" long getcpu(unsigned* cpu, unsigned* node,
struct getcpu_cache* cache)
__attribute__((weak, alias("__vdso_getcpu")));
+#elif __aarch64__
+
+// __kernel_clock_gettime() implements clock_gettime()
+extern "C" int __kernel_clock_gettime(clockid_t clock, struct timespec* ts) {
+ return __common_clock_gettime(clock, ts);
+}
+
+// __kernel_gettimeofday() implements gettimeofday()
+extern "C" int __kernel_gettimeofday(struct timeval* tv, struct timezone* tz) {
+ return __common_gettimeofday(tv, tz);
+}
+
+// __kernel_clock_getres() implements clock_getres()
+extern "C" int __kernel_clock_getres(clockid_t clock, struct timespec* res) {
+ int ret = 0;
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC: {
+ res->tv_sec = 0;
+ res->tv_nsec = 1;
+ break;
+ }
+
+ default:
+ ret = sys_clock_getres(clock, res);
+ break;
+ }
+
+ return ret;
+}
+
+// __kernel_rt_sigreturn() implements gettimeofday()
+extern "C" int __kernel_rt_sigreturn(unsigned long unused) {
+ // No optimizations yet, just make the real system call.
+ return sys_rt_sigreturn();
+}
+
+#else
+#error "unsupported architecture"
+#endif
} // namespace vdso
diff --git a/vdso/vdso.lds b/vdso/vdso_amd64.lds
index 166779931..166779931 100644
--- a/vdso/vdso.lds
+++ b/vdso/vdso_amd64.lds
diff --git a/vdso/vdso_arm64.lds b/vdso/vdso_arm64.lds
new file mode 100644
index 000000000..19f8efa01
--- /dev/null
+++ b/vdso/vdso_arm64.lds
@@ -0,0 +1,99 @@
+/*
+ * Linker script for the VDSO.
+ *
+ * The VDSO is essentially a normal ELF shared library that is mapped into the
+ * address space of the process that is going to use it. The address of the
+ * VDSO is passed to the runtime linker in the AT_SYSINFO_EHDR entry of the aux
+ * vector.
+ *
+ * There are, however, three ways in which the VDSO differs from a normal
+ * shared library:
+ *
+ * - The runtime linker does not attempt to process any relocations for the
+ * VDSO so it is the responsibility of whoever loads the VDSO into the
+ * address space to do this if necessary. Because of this restriction we are
+ * careful to ensure that the VDSO does not need to have any relocations
+ * applied to it.
+ *
+ * - Although the VDSO is position independent and would normally be linked at
+ * virtual address 0, the Linux kernel VDSO is actually linked at a non zero
+ * virtual address and the code in the system runtime linker that handles the
+ * VDSO expects this to be the case so we have to explicitly link this VDSO
+ * at a non zero address. The actual address is arbitrary, but we use the
+ * same one as the Linux kernel VDSO.
+ *
+ * - The VDSO will be directly mmapped by the sentry, rather than going through
+ * a normal ELF loading process. The VDSO must be carefully constructed such
+ * that the layout in the ELF file is identical to the layout in memory.
+ */
+
+VDSO_PRELINK = 0xffffffffff700000;
+
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+
+SECTIONS {
+ /* The parameter page is mapped just before the VDSO. */
+ _params = VDSO_PRELINK - 0x1000;
+
+ . = VDSO_PRELINK + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ .altinstructions : { *(.altinstructions) }
+ .altinstr_replacement : { *(.altinstr_replacement) }
+
+ /*
+ * TODO: Remove this alignment? Then the VDSO would fit
+ * in a single page.
+ */
+ . = ALIGN(0x1000);
+ .text : { *(.text*) } :text =0xd503201f
+
+ /*
+ * N.B. There is no data/bss section. This VDSO neither needs nor uses a data
+ * section. We omit it entirely because some gcc/clang and gold/bfd version
+ * combinations struggle to handle an empty data PHDR segment (internal
+ * linker assertion failures result).
+ *
+ * If the VDSO does incorrectly include a data section, the linker will
+ * include it in the text segment. check_vdso.py looks for this degenerate
+ * case.
+ */
+}
+
+PHDRS {
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * Define the symbols that are to be exported.
+ */
+VERSION {
+ LINUX_2.6.39 {
+ global:
+ __kernel_clock_getres;
+ __kernel_clock_gettime;
+ __kernel_gettimeofday;
+ __kernel_rt_sigreturn;
+ local: *;
+ };
+}
diff --git a/vdso/vdso_time.cc b/vdso/vdso_time.cc
index a59771bff..9fc262f60 100644
--- a/vdso/vdso_time.cc
+++ b/vdso/vdso_time.cc
@@ -55,12 +55,26 @@ struct params {
//
// So instead, we use inline assembly with a construct that seems to have wide
// compatibility across many toolchains.
+#if __x86_64__
+
+inline struct params* get_params() {
+ struct params* p = nullptr;
+ asm("leaq _params(%%rip), %0" : "=r"(p) : :);
+ return p;
+}
+
+#elif __aarch64__
+
inline struct params* get_params() {
struct params* p = nullptr;
- asm volatile("leaq _params(%%rip), %0" : "=r"(p) : :);
+ asm("adr %0, _params" : "=r"(p) : :);
return p;
}
+#else
+#error "unsupported architecture"
+#endif
+
namespace vdso {
const uint64_t kNsecsPerSec = 1000000000UL;