From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 7856) id 3A0C43858412; Mon, 11 Jul 2022 11:01:20 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 3A0C43858412 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Xi Ruoyao To: gcc-cvs@gcc.gnu.org Subject: [gcc(refs/users/xry111/heads/loongarch-sanitizer-test)] cherry-pick https://reviews.llvm.org/D129371 X-Act-Checkin: gcc X-Git-Author: Xi Ruoyao X-Git-Refname: refs/users/xry111/heads/loongarch-sanitizer-test X-Git-Oldrev: 415d2c38edadf97950eb14b8d7e6b1491c98cdd5 X-Git-Newrev: bb82537128e17d84776c41101c3e1b02a9460a93 Message-Id: <20220711110120.3A0C43858412@sourceware.org> Date: Mon, 11 Jul 2022 11:01:20 +0000 (GMT) X-BeenThere: gcc-cvs@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 11 Jul 2022 11:01:20 -0000 https://gcc.gnu.org/g:bb82537128e17d84776c41101c3e1b02a9460a93 commit bb82537128e17d84776c41101c3e1b02a9460a93 Author: Xi Ruoyao Date: Mon Jul 11 18:20:09 2022 +0800 cherry-pick https://reviews.llvm.org/D129371 Diff: --- libsanitizer/sanitizer_common/sanitizer_linux.cpp | 64 +++++++- libsanitizer/sanitizer_common/sanitizer_platform.h | 6 + .../sanitizer_platform_limits_linux.cpp | 5 +- .../sanitizer_platform_limits_posix.cpp | 4 + .../sanitizer_platform_limits_posix.h | 5 +- .../sanitizer_syscall_linux_loongarch64.inc | 166 +++++++++++++++++++++ 6 files changed, 243 insertions(+), 7 deletions(-) diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp index 5ba033492e7..a3f12b0add6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp @@ -78,6 +78,10 @@ #include #endif +#if SANITIZER_LINUX && defined(__loongarch__) +#include +#endif + #if SANITIZER_FREEBSD #include #include @@ -188,6 +192,8 @@ ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); } # include "sanitizer_syscall_linux_arm.inc" # elif SANITIZER_LINUX && defined(__hexagon__) # include "sanitizer_syscall_linux_hexagon.inc" +# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64 +# include "sanitizer_syscall_linux_loongarch64.inc" # else # include "sanitizer_syscall_generic.inc" # endif @@ -290,6 +296,28 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) { } #endif +#if SANITIZER_LINUX && defined(__loongarch__) +static void statx_to_stat(struct statx *in, struct stat *out) { + internal_memset(out, 0, sizeof(*out)); + out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor); + out->st_ino = in->stx_ino; + out->st_mode = in->stx_mode; + out->st_nlink = in->stx_nlink; + out->st_uid = in->stx_uid; + out->st_gid = in->stx_gid; + out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor); + out->st_size = in->stx_size; + out->st_blksize = in->stx_blksize; + out->st_blocks = in->stx_blocks; + out->st_atime = in->stx_atime.tv_sec; + out->st_atim.tv_nsec = in->stx_atime.tv_nsec; + out->st_mtime = in->stx_mtime.tv_sec; + out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec; + out->st_ctime = in->stx_ctime.tv_sec; + out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec; +} +#endif + #if SANITIZER_MIPS64 // Undefine compatibility macros from // so that they would not clash with the kernel_stat @@ -344,8 +372,15 @@ uptr internal_stat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); # elif SANITIZER_LINUX -# if SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32) +# if defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, + AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; +# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ + !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); # else @@ -368,8 +403,16 @@ uptr internal_lstat(const char *path, void *buf) { return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); # elif SANITIZER_LINUX -# if defined(_LP64) || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32) +# if defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, + AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, + STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; +# elif (defined(_LP64) || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ + !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); # else @@ -395,6 +438,12 @@ uptr internal_fstat(fd_t fd, void *buf) { int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; +# elif SANITIZER_LINUX && defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), fd, 0, AT_EMPTY_PATH, + STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; # else return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); # endif @@ -443,7 +492,7 @@ uptr internal_unlink(const char *path) { } uptr internal_rename(const char *oldpath, const char *newpath) { -#if defined(__riscv) && defined(__linux__) +#if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__) return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath, 0); # elif SANITIZER_LINUX @@ -2151,6 +2200,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.r30; *sp = ucontext->uc_mcontext.r29; +# elif defined(__loongarch__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.__pc; + *bp = ucontext->uc_mcontext.__gregs[22]; + *sp = ucontext->uc_mcontext.__gregs[3]; # else # error "Unsupported arch" # endif diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h index 8bd9a327623..1473bb5371c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform.h @@ -244,6 +244,12 @@ # define SANITIZER_RISCV64 0 #endif +#if defined(__loongarch__) && defined(__loongarch_lp64) +# define SANITIZER_LOONGARCH64 1 +#else +# define SANITIZER_LOONGARCH64 0 +#endif + // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp index 2b1a2f7932c..0ec01376275 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp @@ -68,11 +68,14 @@ namespace __sanitizer { # if !defined(__powerpc64__) && !defined(__x86_64__) && \ !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \ - !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) + !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \ + !defined(__loongarch__) COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); #endif +#if !defined(__loongarch__) COMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat)); +#endif #if defined(__i386__) COMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64)); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp index 8ed3e92d270..3795e4102aa 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -265,6 +265,10 @@ namespace __sanitizer { defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \ defined(__hexagon__) # define SIZEOF_STRUCT_USTAT 20 +# elif defined(__loongarch__) + // Not used. The minimum Glibc version available for LoongArch is 2.36 + // so ustat() wrapper is already gone. +# define SIZEOF_STRUCT_USTAT 0 # else # error Unknown size of struct ustat # endif diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h index 89772a7e5c0..75a62bfecb7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h @@ -105,6 +105,9 @@ const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64 # elif defined(__hexagon__) const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 0; +# elif defined(__loongarch__) +const unsigned struct_kernel_stat_sz = 128; +const unsigned struct_kernel_stat64_sz = 0; # endif struct __sanitizer_perf_event_attr { unsigned type; @@ -125,7 +128,7 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long); #if SANITIZER_LINUX -#if defined(__powerpc64__) || defined(__s390__) +#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) const unsigned struct___old_kernel_stat_sz = 0; #elif !defined(__sparc__) const unsigned struct___old_kernel_stat_sz = 32; diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc new file mode 100644 index 00000000000..502f24d7717 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc @@ -0,0 +1,166 @@ +//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implementations of internal_syscall and internal_iserror for Linux/loongarch64. +// +//===----------------------------------------------------------------------===// + +// About local register variables: +// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables +// +// Kernel ABI... +// syscall number is passed in a7 +// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in +// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments +// are passed in: a0-a7 (confirmed by inspecting glibc sources). +#define SYSCALL(name) __NR_##name + +#define INTERNAL_SYSCALL_CLOBBERS "memory" + +static uptr __internal_syscall(u64 nr) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0"); + __asm__ volatile("syscall 0\n\t" + : "=r"(a0) + : "r"(a7) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall0(n) (__internal_syscall)(n) + +static uptr __internal_syscall(u64 nr, u64 arg1) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall2(n, a1, a2) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall3(n, a1, a2, a3) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall4(n, a1, a2, a3, a4) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall5(n, a1, a2, a3, a4, a5) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + register u64 a5 asm("a5") = arg6; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6, long arg7) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + register u64 a5 asm("a5") = arg6; + register u64 a6 asm("a6") = arg7; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), + "r"(a6) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6), (long)(a7)) + +#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n +#define __SYSCALL_NARGS(...) \ + __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, ) +#define __SYSCALL_CONCAT_X(a, b) a##b +#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b) +#define __SYSCALL_DISP(b, ...) \ + __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__) + +#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__) + +// Helper function used to avoid clobbering of errno. +bool internal_iserror(uptr retval, int *internal_errno) { + if (retval >= (uptr)-4095) { + if (internal_errno) + *internal_errno = -retval; + return true; + } + return false; +}