This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
Re: [PATCH 17/29] [AARCH64] Syscalls for ILP32 are passed always via 64bit values.
- From: Will Newton <will dot newton at linaro dot org>
- To: Andrew Pinski <apinski at cavium dot com>
- Cc: libc-alpha <libc-alpha at sourceware dot org>
- Date: Tue, 18 Nov 2014 12:50:22 +0000
- Subject: Re: [PATCH 17/29] [AARCH64] Syscalls for ILP32 are passed always via 64bit values.
- Authentication-results: sourceware.org; auth=none
- References: <1414396793-9005-1-git-send-email-apinski at cavium dot com> <1414396793-9005-18-git-send-email-apinski at cavium dot com>
On 27 October 2014 07:59, Andrew Pinski <apinski@cavium.com> wrote:
> This patch adds support for ILP32 syscalls, sign and zero extending
> where needed. Unlike LP64, pointers are 32bit and need to be zero
> extended rather than the standard sign extend that the code would do.
> We take advatage of ssize_t being long rather than int for ILP32,
> to get this correct.
>
> * sysdeps/unix/sysv/linux/aarch64/sysdep.h
> (INLINE_VSYSCALL): Use long long instead of long.
> (INTERNAL_VSYSCALL): Likewise.
> (INLINE_SYSCALL): Likewise.
> (INTERNAL_SYSCALL_RAW): Likewise.
> (ARGIFY): New macro.
> (LOAD_ARGS_0): Use long long instead of long.
> (LOAD_ARGS_1): Use long long instead of long
> and use ARGIFY.
> (LOAD_ARGS_2): Likewise.
> (LOAD_ARGS_3): Likewise.
> (LOAD_ARGS_4): Likewise.
> (LOAD_ARGS_5): Likewise.
> (LOAD_ARGS_6): Likewise.
> (LOAD_ARGS_7): Likewise.
> ---
> sysdeps/unix/sysv/linux/aarch64/sysdep.h | 52 ++++++++++++++++++-----------
> 1 files changed, 32 insertions(+), 20 deletions(-)
>
> diff --git a/sysdeps/unix/sysv/linux/aarch64/sysdep.h b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> index fc31661..0d9fa8a 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> @@ -156,7 +156,7 @@
> ({ \
> __label__ out; \
> __label__ iserr; \
> - long sc_ret; \
> + long long sc_ret; \
> INTERNAL_SYSCALL_DECL (sc_err); \
> \
> if (__vdso_##name != NULL) \
> @@ -187,7 +187,7 @@
> # define INTERNAL_VSYSCALL(name, err, nr, args...) \
> ({ \
> __label__ out; \
> - long v_ret; \
> + long long v_ret; \
> \
> if (__vdso_##name != NULL) \
> { \
> @@ -224,11 +224,11 @@
> call. */
> # undef INLINE_SYSCALL
> # define INLINE_SYSCALL(name, nr, args...) \
> - ({ unsigned long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \
> + ({ unsigned long long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \
> if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_sys_result, ), 0))\
> { \
> __set_errno (INTERNAL_SYSCALL_ERRNO (_sys_result, )); \
> - _sys_result = (unsigned long) -1; \
> + _sys_result = (unsigned long long) -1; \
> } \
> (long) _sys_result; })
>
> @@ -237,10 +237,10 @@
>
> # undef INTERNAL_SYSCALL_RAW
> # define INTERNAL_SYSCALL_RAW(name, err, nr, args...) \
> - ({ long _sys_result; \
> + ({ long long _sys_result; \
> { \
> LOAD_ARGS_##nr (args) \
> - register long _x8 asm ("x8") = (name); \
> + register long long _x8 asm ("x8") = (name); \
> asm volatile ("svc 0 // syscall " # name \
> : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory"); \
> _sys_result = _x0; \
> @@ -262,36 +262,48 @@
> # undef INTERNAL_SYSCALL_ERRNO
> # define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
>
> +/* Convert X to a long long, without losing any bits if it is one
> + already or warning if it is a 32-bit pointer. This zero extends
> + 32-bit pointers and sign extends other signed types. Note this only
> + works because ssize_t is long and short-short is promoted to int. */
> +#define ARGIFY(X) \
> + ((unsigned long long) \
> + __builtin_choose_expr(__builtin_types_compatible_p(__typeof__(X), __typeof__((X) - (X))), \
> + (X), \
> + __builtin_choose_expr(__builtin_types_compatible_p(int, __typeof__((X) - (X))), \
> + (X), \
> + (unsigned long)(X))))
> +
This could do with spaces before parentheses and the line wrapping
could be made more readable, but otherwise this looks ok.
> # define LOAD_ARGS_0() \
> - register long _x0 asm ("x0");
> + register long long _x0 asm ("x0");
> # define LOAD_ARGS_1(x0) \
> - long _x0tmp = (long) (x0); \
> + long long _x0tmp = ARGIFY (x0); \
> LOAD_ARGS_0 () \
> _x0 = _x0tmp;
> # define LOAD_ARGS_2(x0, x1) \
> - long _x1tmp = (long) (x1); \
> + long long _x1tmp = ARGIFY (x1); \
> LOAD_ARGS_1 (x0) \
> - register long _x1 asm ("x1") = _x1tmp;
> + register long long _x1 asm ("x1") = _x1tmp;
> # define LOAD_ARGS_3(x0, x1, x2) \
> - long _x2tmp = (long) (x2); \
> + long long _x2tmp = ARGIFY (x2); \
> LOAD_ARGS_2 (x0, x1) \
> - register long _x2 asm ("x2") = _x2tmp;
> + register long long _x2 asm ("x2") = _x2tmp;
> # define LOAD_ARGS_4(x0, x1, x2, x3) \
> - long _x3tmp = (long) (x3); \
> + long long _x3tmp = ARGIFY (x3); \
> LOAD_ARGS_3 (x0, x1, x2) \
> - register long _x3 asm ("x3") = _x3tmp;
> + register long long _x3 asm ("x3") = _x3tmp;
> # define LOAD_ARGS_5(x0, x1, x2, x3, x4) \
> - long _x4tmp = (long) (x4); \
> + long long _x4tmp = ARGIFY (x4); \
> LOAD_ARGS_4 (x0, x1, x2, x3) \
> - register long _x4 asm ("x4") = _x4tmp;
> + register long long _x4 asm ("x4") = _x4tmp;
> # define LOAD_ARGS_6(x0, x1, x2, x3, x4, x5) \
> - long _x5tmp = (long) (x5); \
> + long long _x5tmp = ARGIFY (x5); \
> LOAD_ARGS_5 (x0, x1, x2, x3, x4) \
> - register long _x5 asm ("x5") = _x5tmp;
> + register long long _x5 asm ("x5") = _x5tmp;
> # define LOAD_ARGS_7(x0, x1, x2, x3, x4, x5, x6)\
> - long _x6tmp = (long) (x6); \
> + long long _x6tmp = ARGIFY (x6); \
> LOAD_ARGS_6 (x0, x1, x2, x3, x4, x5) \
> - register long _x6 asm ("x6") = _x6tmp;
> + register long long _x6 asm ("x6") = _x6tmp;
>
> # define ASM_ARGS_0
> # define ASM_ARGS_1 , "r" (_x0)
> --
> 1.7.2.5
>
--
Will Newton
Toolchain Working Group, Linaro