From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 1848 invoked by alias); 18 Nov 2014 12:50:26 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Received: (qmail 1836 invoked by uid 89); 18 Nov 2014 12:50:25 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.2 required=5.0 tests=AWL,BAYES_00,RCVD_IN_DNSWL_LOW,SPF_PASS autolearn=ham version=3.3.2 X-HELO: mail-ie0-f169.google.com X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:mime-version:in-reply-to:references:date :message-id:subject:from:to:cc:content-type; bh=//opNlIq/EIftj74fj4wPAS9gLYak60ivnTzOML5ucI=; b=Ojf4enyVKmNlWAl478gKvFgvVap+KEYuI0p+Vh9D6rJrHsgORKNvMHDVzp+Lchvfpe LW8aC45YeYf0rpX+E7GBfIzNywr9RVOadKnePn6DcnLzhMzYmiDNn/ve9RPr4q96BrkH gtSHuiz18zI2g7SifkXa480axt0nd9zeMu2QHP/0lymhLzIAiNtiSWUW80+jnxBzsjHt 3Qqbm+izf/w/9hnp6mXluW0VpIKEPmjyNb+tUIQ22M+ypm6azjimFv160NtRo4clrxHd nsbKW/8I96vCL7eOGWOCBjrixRRgX3yL1t3UIBQkGglY5vwVq0R8pH7F3cCudlTpwDdd w2fQ== X-Gm-Message-State: ALoCoQmeTn6DifD/bUP/aCBXJ+9GkJ/4OuKNTplz1hxcThIrlyZ7RhFDEPeZY9vrPBq7xs731O7y MIME-Version: 1.0 X-Received: by 10.50.138.76 with SMTP id qo12mr2928143igb.43.1416315022680; Tue, 18 Nov 2014 04:50:22 -0800 (PST) In-Reply-To: <1414396793-9005-18-git-send-email-apinski@cavium.com> References: <1414396793-9005-1-git-send-email-apinski@cavium.com> <1414396793-9005-18-git-send-email-apinski@cavium.com> Date: Tue, 18 Nov 2014 12:50:00 -0000 Message-ID: Subject: Re: [PATCH 17/29] [AARCH64] Syscalls for ILP32 are passed always via 64bit values. From: Will Newton To: Andrew Pinski Cc: libc-alpha Content-Type: text/plain; charset=UTF-8 X-SW-Source: 2014-11/txt/msg00429.txt.bz2 On 27 October 2014 07:59, Andrew Pinski wrote: > This patch adds support for ILP32 syscalls, sign and zero extending > where needed. Unlike LP64, pointers are 32bit and need to be zero > extended rather than the standard sign extend that the code would do. > We take advatage of ssize_t being long rather than int for ILP32, > to get this correct. > > * sysdeps/unix/sysv/linux/aarch64/sysdep.h > (INLINE_VSYSCALL): Use long long instead of long. > (INTERNAL_VSYSCALL): Likewise. > (INLINE_SYSCALL): Likewise. > (INTERNAL_SYSCALL_RAW): Likewise. > (ARGIFY): New macro. > (LOAD_ARGS_0): Use long long instead of long. > (LOAD_ARGS_1): Use long long instead of long > and use ARGIFY. > (LOAD_ARGS_2): Likewise. > (LOAD_ARGS_3): Likewise. > (LOAD_ARGS_4): Likewise. > (LOAD_ARGS_5): Likewise. > (LOAD_ARGS_6): Likewise. > (LOAD_ARGS_7): Likewise. > --- > sysdeps/unix/sysv/linux/aarch64/sysdep.h | 52 ++++++++++++++++++----------- > 1 files changed, 32 insertions(+), 20 deletions(-) > > diff --git a/sysdeps/unix/sysv/linux/aarch64/sysdep.h b/sysdeps/unix/sysv/linux/aarch64/sysdep.h > index fc31661..0d9fa8a 100644 > --- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h > +++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h > @@ -156,7 +156,7 @@ > ({ \ > __label__ out; \ > __label__ iserr; \ > - long sc_ret; \ > + long long sc_ret; \ > INTERNAL_SYSCALL_DECL (sc_err); \ > \ > if (__vdso_##name != NULL) \ > @@ -187,7 +187,7 @@ > # define INTERNAL_VSYSCALL(name, err, nr, args...) \ > ({ \ > __label__ out; \ > - long v_ret; \ > + long long v_ret; \ > \ > if (__vdso_##name != NULL) \ > { \ > @@ -224,11 +224,11 @@ > call. */ > # undef INLINE_SYSCALL > # define INLINE_SYSCALL(name, nr, args...) \ > - ({ unsigned long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \ > + ({ unsigned long long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \ > if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_sys_result, ), 0))\ > { \ > __set_errno (INTERNAL_SYSCALL_ERRNO (_sys_result, )); \ > - _sys_result = (unsigned long) -1; \ > + _sys_result = (unsigned long long) -1; \ > } \ > (long) _sys_result; }) > > @@ -237,10 +237,10 @@ > > # undef INTERNAL_SYSCALL_RAW > # define INTERNAL_SYSCALL_RAW(name, err, nr, args...) \ > - ({ long _sys_result; \ > + ({ long long _sys_result; \ > { \ > LOAD_ARGS_##nr (args) \ > - register long _x8 asm ("x8") = (name); \ > + register long long _x8 asm ("x8") = (name); \ > asm volatile ("svc 0 // syscall " # name \ > : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory"); \ > _sys_result = _x0; \ > @@ -262,36 +262,48 @@ > # undef INTERNAL_SYSCALL_ERRNO > # define INTERNAL_SYSCALL_ERRNO(val, err) (-(val)) > > +/* Convert X to a long long, without losing any bits if it is one > + already or warning if it is a 32-bit pointer. This zero extends > + 32-bit pointers and sign extends other signed types. Note this only > + works because ssize_t is long and short-short is promoted to int. */ > +#define ARGIFY(X) \ > + ((unsigned long long) \ > + __builtin_choose_expr(__builtin_types_compatible_p(__typeof__(X), __typeof__((X) - (X))), \ > + (X), \ > + __builtin_choose_expr(__builtin_types_compatible_p(int, __typeof__((X) - (X))), \ > + (X), \ > + (unsigned long)(X)))) > + This could do with spaces before parentheses and the line wrapping could be made more readable, but otherwise this looks ok. > # define LOAD_ARGS_0() \ > - register long _x0 asm ("x0"); > + register long long _x0 asm ("x0"); > # define LOAD_ARGS_1(x0) \ > - long _x0tmp = (long) (x0); \ > + long long _x0tmp = ARGIFY (x0); \ > LOAD_ARGS_0 () \ > _x0 = _x0tmp; > # define LOAD_ARGS_2(x0, x1) \ > - long _x1tmp = (long) (x1); \ > + long long _x1tmp = ARGIFY (x1); \ > LOAD_ARGS_1 (x0) \ > - register long _x1 asm ("x1") = _x1tmp; > + register long long _x1 asm ("x1") = _x1tmp; > # define LOAD_ARGS_3(x0, x1, x2) \ > - long _x2tmp = (long) (x2); \ > + long long _x2tmp = ARGIFY (x2); \ > LOAD_ARGS_2 (x0, x1) \ > - register long _x2 asm ("x2") = _x2tmp; > + register long long _x2 asm ("x2") = _x2tmp; > # define LOAD_ARGS_4(x0, x1, x2, x3) \ > - long _x3tmp = (long) (x3); \ > + long long _x3tmp = ARGIFY (x3); \ > LOAD_ARGS_3 (x0, x1, x2) \ > - register long _x3 asm ("x3") = _x3tmp; > + register long long _x3 asm ("x3") = _x3tmp; > # define LOAD_ARGS_5(x0, x1, x2, x3, x4) \ > - long _x4tmp = (long) (x4); \ > + long long _x4tmp = ARGIFY (x4); \ > LOAD_ARGS_4 (x0, x1, x2, x3) \ > - register long _x4 asm ("x4") = _x4tmp; > + register long long _x4 asm ("x4") = _x4tmp; > # define LOAD_ARGS_6(x0, x1, x2, x3, x4, x5) \ > - long _x5tmp = (long) (x5); \ > + long long _x5tmp = ARGIFY (x5); \ > LOAD_ARGS_5 (x0, x1, x2, x3, x4) \ > - register long _x5 asm ("x5") = _x5tmp; > + register long long _x5 asm ("x5") = _x5tmp; > # define LOAD_ARGS_7(x0, x1, x2, x3, x4, x5, x6)\ > - long _x6tmp = (long) (x6); \ > + long long _x6tmp = ARGIFY (x6); \ > LOAD_ARGS_6 (x0, x1, x2, x3, x4, x5) \ > - register long _x6 asm ("x6") = _x6tmp; > + register long long _x6 asm ("x6") = _x6tmp; > > # define ASM_ARGS_0 > # define ASM_ARGS_1 , "r" (_x0) > -- > 1.7.2.5 > -- Will Newton Toolchain Working Group, Linaro