This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH 1/2] Hoist ZVA check out of the function


On 19/09/2017 08:23, Siddhesh Poyarekar wrote:

> The DZP bit in the dczid_el0 register does not change dynamically, so
> it is safe to read once during program startup.  Hoist the zva check
> into an ifunc resolver and store the result into a static variable,
> which can be read in case of non-standard zva sizes.  This effectively
> adds 3 ifunc variants for memset - one for cases where zva is
> disabled, one for 64 byte zva and another for 128 byte zva.  I have
> retained the older memset as __memset_generic for internal libc.so use
> so that the change impact is minimal.  We should eventually have a
> discussion on what is more expensive, reading dczid_el0 on every
> memset invocation or the indirection due to PLT.
>
> The gains due to this are significant for falkor, with gains as high
> as 80% in some cases.  Likewise for mustang, although the numbers are
> slightly lower.  Here's a sample from the falkor tests:
>
I would use a more compact ChangeLog entry as:

	* sysdeps/aarch64/memset.S (do_no_zva, do_zva_64,
	do_zva_128, do_zva_default): New macro.

Same for the other entries where it applies.

> 	(MEMSET): Use the new macros.
> 	(MEMSET)[INTERNAL_MEMSET]: Retain old memset.
> 	(MEMSET)[!INTERNAL_MEMSET]: Remove zva check.
> 	* sysdeps/aarch64/multiarch/Makefile (sysdep_routines):
> 	Add memset_generic, memset_nozva and memset_zva.
> 	* sysdeps/aarch64/multiarch/ifunc-impl-list.c
> 	(__libc_ifunc_impl_list): Add memset ifuncs.
> 	* sysdeps/aarch64/multiarch/init-arch.h (INIT_ARCH): New
> 	static variable __aarch64_zva_size and local variable
> 	zva_size.
> 	* sysdeps/aarch64/multiarch/memset.c: New file.
> 	* sysdeps/aarch64/multiarch/memset_generic.S: New file.
> 	* sysdeps/aarch64/multiarch/memset_nozva.S: New file.
> 	* sysdeps/aarch64/multiarch/memset_zva.S: New file.
> 	* sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> 	(DCZID_DZP_MASK): New macro.
> 	(DCZID_BS_MASK): Likewise.
> 	(init_cpu_features): Read and set zva_size.
> 	* sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> 	(struct cpu_features): New member zva_size.
>
>
> diff --git a/sysdeps/aarch64/memset.S b/sysdeps/aarch64/memset.S
> index 110fd22..8cff3a4 100644
> --- a/sysdeps/aarch64/memset.S
> +++ b/sysdeps/aarch64/memset.S
> @@ -37,7 +37,108 @@
>  #define zva_len x7
>  #define zva_lenw w7
>  
> -ENTRY_ALIGN (__memset, 6)
> +/* Macros that do the critical loops for either no zva or zva of 64 bytes, 128
> +   bytes and higher sizes.  */
> +
> +#ifndef ZVA_MACROS
> +# define ZVA_MACROS
> +/* No ZVA.  */
> +.macro do_no_zva
> +	sub	count, dstend, dst	/* Count is 16 too large.  */
> +	add	dst, dst, 16
> +	sub	count, count, 64 + 16	/* Adjust count and bias for loop.  */
> +1:	stp	q0, q0, [dst], 64
> +	stp	q0, q0, [dst, -32]
> +	subs	count, count, 64
> +	b.hi	1b
> +	stp	q0, q0, [dstend, -64]
> +	stp	q0, q0, [dstend, -32]
> +	ret
> +.endm
> +
> +/* Write the first and last 64 byte aligned block using stp rather
> +   than using DC ZVA.  This is faster on some cores.  */
> +.macro do_zva_64
> +	str	q0, [dst, 16]
> +	stp	q0, q0, [dst, 32]
> +	bic	dst, dst, 63
> +	stp	q0, q0, [dst, 64]
> +	stp	q0, q0, [dst, 96]
> +	sub	count, dstend, dst	/* Count is now 128 too large.	*/
> +	sub	count, count, 128+64+64	/* Adjust count and bias for loop.  */
> +	add	dst, dst, 128
> +	nop
> +1:	dc	zva, dst
> +	add	dst, dst, 64
> +	subs	count, count, 64
> +	b.hi	1b
> +	stp	q0, q0, [dst, 0]
> +	stp	q0, q0, [dst, 32]
> +	stp	q0, q0, [dstend, -64]
> +	stp	q0, q0, [dstend, -32]
> +	ret
> +.endm
> +
> +/* ZVA size of 128 bytes.  */
> +.macro do_zva_128
> +	str	q0, [dst, 16]
> +	stp	q0, q0, [dst, 32]
> +	stp	q0, q0, [dst, 64]
> +	stp	q0, q0, [dst, 96]
> +	bic	dst, dst, 127
> +	sub	count, dstend, dst	/* Count is now 128 too large.	*/
> +	sub	count, count, 128+128	/* Adjust count and bias for loop.  */
> +	add	dst, dst, 128
> +1:	dc	zva, dst
> +	add	dst, dst, 128
> +	subs	count, count, 128
> +	b.hi	1b
> +	stp	q0, q0, [dstend, -128]
> +	stp	q0, q0, [dstend, -96]
> +	stp	q0, q0, [dstend, -64]
> +	stp	q0, q0, [dstend, -32]
> +	ret
> +.endm
> +
> +/* ZVA size of more than 128 bytes.  */
> +.macro do_zva_default
> +	add	tmp1, zva_len, 64	/* Max alignment bytes written.	 */
> +	cmp	count, tmp1
> +	blo	MEMSET_L(no_zva)
> +
> +	sub	tmp2, zva_len, 1
> +	add	tmp1, dst, zva_len
> +	add	dst, dst, 16
> +	subs	count, tmp1, dst	/* Actual alignment bytes to write.  */
> +	bic	tmp1, tmp1, tmp2	/* Aligned dc zva start address.  */
> +	beq	2f
> +1:	stp	q0, q0, [dst], 64
> +	stp	q0, q0, [dst, -32]
> +	subs	count, count, 64
> +	b.hi	1b
> +2:	mov	dst, tmp1
> +	sub	count, dstend, tmp1	/* Remaining bytes to write.  */
> +	subs	count, count, zva_len
> +	b.lo	4f
> +3:	dc	zva, dst
> +	add	dst, dst, zva_len
> +	subs	count, count, zva_len
> +	b.hs	3b
> +4:	add	count, count, zva_len
> +	subs	count, count, 64
> +	b.ls	6f
> +5:	stp	q0, q0, [dst], 64
> +	stp	q0, q0, [dst, -32]
> +	subs	count, count, 64
> +	b.hi	5b
> +6:	stp	q0, q0, [dstend, -64]
> +	stp	q0, q0, [dstend, -32]
> +	ret
> +.endm
> +#endif
> +
> +/* Memset entry point.  */
> +ENTRY_ALIGN (MEMSET, 6)
>  
>  	DELOUSE (0)
>  	DELOUSE (2)
> @@ -46,9 +147,9 @@ ENTRY_ALIGN (__memset, 6)
>  	add	dstend, dstin, count
>  
>  	cmp	count, 96
> -	b.hi	L(set_long)
> +	b.hi	MEMSET_L(set_long)
>  	cmp	count, 16
> -	b.hs	L(set_medium)
> +	b.hs	MEMSET_L(set_medium)
>  	mov	val, v0.D[0]
>  
>  	/* Set 0..15 bytes.  */
> @@ -68,9 +169,9 @@ ENTRY_ALIGN (__memset, 6)
>  3:	ret
>  
>  	/* Set 17..96 bytes.  */
> -L(set_medium):
> +MEMSET_L(set_medium):
>  	str	q0, [dstin]
> -	tbnz	count, 6, L(set96)
> +	tbnz	count, 6, MEMSET_L(set96)
>  	str	q0, [dstend, -16]
>  	tbz	count, 5, 1f
>  	str	q0, [dstin, 16]
> @@ -80,7 +181,7 @@ L(set_medium):
>  	.p2align 4
>  	/* Set 64..96 bytes.  Write 64 bytes from the start and
>  	   32 bytes from the end.  */
> -L(set96):
> +MEMSET_L(set96):
>  	str	q0, [dstin, 16]
>  	stp	q0, q0, [dstin, 32]
>  	stp	q0, q0, [dstend, -32]
> @@ -88,108 +189,63 @@ L(set96):
>  
>  	.p2align 3
>  	nop
> -L(set_long):
> +MEMSET_L(set_long):
> +#ifdef INTERNAL_MEMSET
>  	and	valw, valw, 255
>  	bic	dst, dstin, 15
>  	str	q0, [dstin]
>  	cmp	count, 256
>  	ccmp	valw, 0, 0, cs
> -	b.eq	L(try_zva)
> -L(no_zva):
> -	sub	count, dstend, dst	/* Count is 16 too large.  */
> -	add	dst, dst, 16
> -	sub	count, count, 64 + 16	/* Adjust count and bias for loop.  */
> -1:	stp	q0, q0, [dst], 64
> -	stp	q0, q0, [dst, -32]
> -L(tail64):
> -	subs	count, count, 64
> -	b.hi	1b
> -2:	stp	q0, q0, [dstend, -64]
> -	stp	q0, q0, [dstend, -32]
> -	ret
> +	b.eq	MEMSET_L(try_zva)
>  
> -	.p2align 3
> -L(try_zva):
> +MEMSET_L(no_zva):
> +	do_no_zva
> +
> +	.p2align 4
> +MEMSET_L(try_zva):
>  	mrs	tmp1, dczid_el0
> -	tbnz	tmp1w, 4, L(no_zva)
>  	and	tmp1w, tmp1w, 15
>  	cmp	tmp1w, 4	/* ZVA size is 64 bytes.  */
> -	b.ne	 L(zva_128)
> +	b.ne	 MEMSET_L(zva_128)
> +	do_zva_64
>  
> -	/* Write the first and last 64 byte aligned block using stp rather
> -	   than using DC ZVA.  This is faster on some cores.
> -	 */
> -L(zva_64):
> -	str	q0, [dst, 16]
> -	stp	q0, q0, [dst, 32]
> -	bic	dst, dst, 63
> -	stp	q0, q0, [dst, 64]
> -	stp	q0, q0, [dst, 96]
> -	sub	count, dstend, dst	/* Count is now 128 too large.	*/
> -	sub	count, count, 128+64+64	/* Adjust count and bias for loop.  */
> -	add	dst, dst, 128
> -	nop
> -1:	dc	zva, dst
> -	add	dst, dst, 64
> -	subs	count, count, 64
> -	b.hi	1b
> -	stp	q0, q0, [dst, 0]
> -	stp	q0, q0, [dst, 32]
> -	stp	q0, q0, [dstend, -64]
> -	stp	q0, q0, [dstend, -32]
> -	ret
> -
> -	.p2align 3
> -L(zva_128):
> +MEMSET_L(zva_128):
>  	cmp	tmp1w, 5	/* ZVA size is 128 bytes.  */
> -	b.ne	L(zva_other)
> +	b.ne	MEMSET_L(zva_other)
> +	do_zva_128
>  
> -	str	q0, [dst, 16]
> -	stp	q0, q0, [dst, 32]
> -	stp	q0, q0, [dst, 64]
> -	stp	q0, q0, [dst, 96]
> -	bic	dst, dst, 127
> -	sub	count, dstend, dst	/* Count is now 128 too large.	*/
> -	sub	count, count, 128+128	/* Adjust count and bias for loop.  */
> -	add	dst, dst, 128
> -1:	dc	zva, dst
> -	add	dst, dst, 128
> -	subs	count, count, 128
> -	b.hi	1b
> -	stp	q0, q0, [dstend, -128]
> -	stp	q0, q0, [dstend, -96]
> -	stp	q0, q0, [dstend, -64]
> -	stp	q0, q0, [dstend, -32]
> -	ret
> -
> -L(zva_other):
> +MEMSET_L(zva_other):
>  	mov	tmp2w, 4
>  	lsl	zva_lenw, tmp2w, tmp1w
> -	add	tmp1, zva_len, 64	/* Max alignment bytes written.	 */
> -	cmp	count, tmp1
> -	blo	L(no_zva)
> +	do_zva_default
> +#else
> +	/* Memset called through PLT, so we need only one of the ZVA
> +	   variants.  */
> +# ifdef MEMSET_ZVA
> +	and	valw, valw, 255
> +# endif
> +	bic	dst, dstin, 15
> +	str	q0, [dstin]
> +# ifdef MEMSET_ZVA
> +	cmp	count, 256
> +	ccmp	valw, 0, 0, cs
> +	b.eq	MEMSET_L(try_zva)
> +# endif
> +MEMSET_L(no_zva):
> +	do_no_zva
> +# if defined MEMSET_ZVA
> +MEMSET_L(try_zva):
> +#  if MEMSET_ZVA == 64
> +	do_zva_64
> +#  elif MEMSET_ZVA == 128
> +	do_zva_128
> +#  else
> +	adrp	zva_len, __aarch64_zva_size
> +	ldr	zva_len, [zva_len, #:lo12:__aarch64_zva_size]
> +	do_zva_default
> +#  endif
> +# endif
> +#endif
>  
> -	sub	tmp2, zva_len, 1
> -	add	tmp1, dst, zva_len
> -	add	dst, dst, 16
> -	subs	count, tmp1, dst	/* Actual alignment bytes to write.  */
> -	bic	tmp1, tmp1, tmp2	/* Aligned dc zva start address.  */
> -	beq	2f
> -1:	stp	q0, q0, [dst], 64
> -	stp	q0, q0, [dst, -32]
> -	subs	count, count, 64
> -	b.hi	1b
> -2:	mov	dst, tmp1
> -	sub	count, dstend, tmp1	/* Remaining bytes to write.  */
> -	subs	count, count, zva_len
> -	b.lo	4f
> -3:	dc	zva, dst
> -	add	dst, dst, zva_len
> -	subs	count, count, zva_len
> -	b.hs	3b
> -4:	add	count, count, zva_len
> -	b	L(tail64)
> -
> -END (__memset)
> -weak_alias (__memset, memset)
> -libc_hidden_builtin_def (memset)
> +END (MEMSET)
> +libc_hidden_builtin_def (MEMSET)
> diff --git a/sysdeps/aarch64/multiarch/Makefile b/sysdeps/aarch64/multiarch/Makefile
> index 9aa1e79..f611182 100644
> --- a/sysdeps/aarch64/multiarch/Makefile
> +++ b/sysdeps/aarch64/multiarch/Makefile
> @@ -1,4 +1,4 @@
>  ifeq ($(subdir),string)
>  sysdep_routines += memcpy_generic memcpy_thunderx memcpy_falkor \
> -		   memmove_falkor
> +		   memmove_falkor memset_generic memset_nozva memset_zva
>  endif
> diff --git a/sysdeps/aarch64/multiarch/ifunc-impl-list.c b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
> index 2cb74d5..29148ac 100644
> --- a/sysdeps/aarch64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
> @@ -46,6 +46,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>  	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_thunderx)
>  	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_falkor)
>  	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_generic))
> +  IFUNC_IMPL (i, name, memset,
> +	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_nozva)
> +	      IFUNC_IMPL_ADD (array, i, memset, (zva_size == 64), __memset_zva_64)
> +	      IFUNC_IMPL_ADD (array, i, memset, (zva_size == 128), __memset_zva_128)
> +	      IFUNC_IMPL_ADD (array, i, memset, (zva_size > 0), __memset_zva_default)
> +	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))
>  
>    return i;
>  }
> diff --git a/sysdeps/aarch64/multiarch/init-arch.h b/sysdeps/aarch64/multiarch/init-arch.h
> index 3af442c..541c27e 100644
> --- a/sysdeps/aarch64/multiarch/init-arch.h
> +++ b/sysdeps/aarch64/multiarch/init-arch.h
> @@ -18,6 +18,9 @@
>  
>  #include <ldsodefs.h>
>  
> -#define INIT_ARCH()				\
> -  uint64_t __attribute__((unused)) midr =	\
> -    GLRO(dl_aarch64_cpu_features).midr_el1;
> +#define INIT_ARCH()							      \
> +  uint64_t __attribute__((unused)) midr =				      \
> +    GLRO(dl_aarch64_cpu_features).midr_el1;				      \
> +  extern unsigned __aarch64_zva_size;					      \
> +  unsigned __attribute__((unused)) zva_size = __aarch64_zva_size =	      \
> +    GLRO(dl_aarch64_cpu_features).zva_size;
> diff --git a/sysdeps/aarch64/multiarch/memset.c b/sysdeps/aarch64/multiarch/memset.c
> new file mode 100644
> index 0000000..58e669a
> --- /dev/null
> +++ b/sysdeps/aarch64/multiarch/memset.c
> @@ -0,0 +1,47 @@
> +/* Multiple versions of memset. AARCH64 version.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +/* Define multiple versions only for the definition in libc.  */
> +
> +#if IS_IN (libc)
> +/* Redefine memset so that the compiler won't complain about the type
> +   mismatch with the IFUNC selector in strong_alias, below.  */
> +# undef memset
> +# define memset __redirect_memset
> +# include <string.h>
> +# include <init-arch.h>
> +
> +unsigned __aarch64_zva_size;
> +
> +extern __typeof (__redirect_memset) __libc_memset;
> +
> +extern __typeof (__redirect_memset) __memset_nozva attribute_hidden;
> +extern __typeof (__redirect_memset) __memset_zva_64 attribute_hidden;
> +extern __typeof (__redirect_memset) __memset_zva_128 attribute_hidden;
> +extern __typeof (__redirect_memset) __memset_zva_default attribute_hidden;
> +
> +libc_ifunc (__libc_memset, (zva_size == 0 ? __memset_nozva
> +			    : (zva_size == 64 ? __memset_zva_64
> +			       : (zva_size == 128 ? __memset_zva_128
> +				  : __memset_zva_default))));
> +
> +# undef memset
> +strong_alias (__libc_memset, memset);
> +#else
> +#include <string/memset.c>
> +#endif

You don't need use the default version for the loader, you can use the
generic sysdeps/aarch64/memset.S by creating a rtld-memset.S on
multiarch and defining the required macros.


> diff --git a/sysdeps/aarch64/multiarch/memset_generic.S b/sysdeps/aarch64/multiarch/memset_generic.S
> new file mode 100644
> index 0000000..56f1e02
> --- /dev/null
> +++ b/sysdeps/aarch64/multiarch/memset_generic.S
> @@ -0,0 +1,27 @@
> +/* Memset for aarch64, default version for internal use.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library.  If not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define MEMSET __memset_generic
> +#define INTERNAL_MEMSET
> +#define MEMSET_L(label) L(label)
> +#ifdef SHARED
> +	.globl __GI_memset; __GI_memset = __memset_generic
> +#endif

I would add a comment stating it is essentially doing libc_hidden_def(memset)
and redirecting the internal implementation to __memset_generic.

> +
> +#include <sysdeps/aarch64/memset.S>
> diff --git a/sysdeps/aarch64/multiarch/memset_nozva.S b/sysdeps/aarch64/multiarch/memset_nozva.S
> new file mode 100644
> index 0000000..98045ac
> --- /dev/null
> +++ b/sysdeps/aarch64/multiarch/memset_nozva.S
> @@ -0,0 +1,22 @@
> +/* Memset for aarch64, ZVA disabled.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library.  If not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define MEMSET __memset_nozva
> +#define MEMSET_L(label) L(label)
> +#include <sysdeps/aarch64/memset.S>

Although not strictly required, I think it should avoid build these
for !IS_IN(libc) as for memset_zva.S. Same applied for memset_generic.S.

> diff --git a/sysdeps/aarch64/multiarch/memset_zva.S b/sysdeps/aarch64/multiarch/memset_zva.S
> new file mode 100644
> index 0000000..5d02b89
> --- /dev/null
> +++ b/sysdeps/aarch64/multiarch/memset_zva.S
> @@ -0,0 +1,41 @@
> +/* Memset for aarch64, ZVA enabled.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library.  If not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#if IS_IN (libc)
> +# define MEMSET __memset_zva_64
> +# define MEMSET_ZVA 64
> +# define MEMSET_L(label) L(label ## _zva64)
> +# include <sysdeps/aarch64/memset.S>
> +
> +# undef MEMSET
> +# undef MEMSET_ZVA
> +# undef MEMSET_L
> +# define MEMSET __memset_zva_128
> +# define MEMSET_ZVA 128
> +# define MEMSET_L(label) L(label ## _zva128)
> +# include <sysdeps/aarch64/memset.S>
> +
> +# undef MEMSET
> +# undef MEMSET_ZVA
> +# undef MEMSET_L
> +# define MEMSET __memset_zva_default
> +# define MEMSET_ZVA 1
> +# define MEMSET_L(label) L(label ## _zvadef)
> +# include <sysdeps/aarch64/memset.S>
> +#endif
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> index e769eeb..092ee81 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> @@ -20,6 +20,9 @@
>  #include <sys/auxv.h>
>  #include <elf/dl-hwcaps.h>
>  
> +#define DCZID_DZP_MASK (1 << 4)
> +#define DCZID_BS_MASK (0xf)
> +
>  #if HAVE_TUNABLES
>  struct cpu_list
>  {
> @@ -72,4 +75,11 @@ init_cpu_features (struct cpu_features *cpu_features)
>      }
>  
>    cpu_features->midr_el1 = midr;
> +
> +  /* Check if ZVA is enabled.  */
> +  unsigned dczid;
> +  asm volatile ("mrs %0, dczid_el0" : "=r"(dczid));
> +
> +  if ((dczid & DCZID_DZP_MASK) == 0)
> +    cpu_features->zva_size = 4 << (dczid & DCZID_BS_MASK);
>  }
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> index 73cb53d..f2b6afd 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> @@ -47,6 +47,7 @@
>  struct cpu_features
>  {
>    uint64_t midr_el1;
> +  unsigned zva_size;
>  };
>  
>  #endif /* _CPU_FEATURES_AARCH64_H  */

Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>



Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]