This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: PATCH: Add AVX support to x86-64 _dl_runtime_profile


On Wed, Jul 01, 2009 at 06:39:42AM -0700, H.J. Lu wrote:
> AVX extends xmm register to 256bit. x86-64 _dl_runtime_profile needs
> to save and restore the whole 256bit when running on AVX processor.
> This patch does a few things:
> 
> 1. Add a new 32bbyte vector type, La_x86_64_vector.
> 2. Use La_x86_64_vector instead of La_x86_64_xmm in La_x86_64_regs
> and La_x86_64_retval.
> 3. Move vector register fields first since they need the largest
> alignment.
> 4. Add configure check for gcc AVX support.
> 5. Align stack to 32byte and use bigger spaces for La_x86_64_regs
> and La_x86_64_retval.
> 6. Use a mini STT_GNU_IFUNC scheme with cpuid to properly save/restore
> the biggest vector register if gcc supports AVX.
> 
> I also added a testcase.  Tested on Linux/x86-64 with gcc 4.3 and 4.4
> as well as AVX emulator.
> 

This is the updated patch against today's git. I fixed some white spaces
and added vector register value check for audit_test.

Thanks.


H.J.
---
2009-07-02  H.J. Lu  <hongjiu.lu@intel.com>

	* config.h.in (HAVE_AVX_SUPPORT): New.

	* config.make.in (config-cflags-avx): New.

	* configure.in: Substitute libc_cv_cc_avx.
	* configure: Regenerated.

	* elf/Makefile (distribute): Add tst-audit4.c tst-auditmod4a.c
	tst-auditmod4b.c.
	(tests): Add tst-audit4 for x86_64.
	(modules-names): Add tst-auditmod4a tst-auditmod4b.
	($(objpfx)tst-audit4): New.
	($(objpfx)tst-audit4.out): Likewise.
	(tst-audit4-ENV): Likewise.
	(CFLAGS-tst-audit4.c): Likewise.
	(CFLAGS-tst-auditmod4a.c): Likewise.
	(CFLAGS-tst-auditmod4b.c): Likewise.

	* elf/tst-audit4.c: New.
	* elf/tst-auditmod4a.c: Likewise.
	* elf/tst-auditmod4b.c: Likewise.

	* elf/tst-auditmod3b.c (pltenter): Check vector register values
	for audit_test.
	(pltexit): Likewise.

	* sysdeps/x86_64/bits/link.h (La_x86_64_ymm): New.
	(La_x86_64_vector): Likewise.
	(La_x86_64_regs): Replace lr_xmm with lr_vector.  Move
	lr_vector first.
	(La_x86_64_retval): Replace lr_xmm0/lrv_xmm1 with
	lr_vector0/lrv_vector1.  Move lrv_vector0 first.

	* sysdeps/x86_64/dl-trampoline.S (_dl_runtime_profile): Move
	saving and restoring SSE registers to ...
	* sysdeps/x86_64/dl-trampoline.h: This.  New.

	* sysdeps/x86_64/dl-trampoline.S: Inclide <config.h>.
	(_dl_runtime_profile): Align stack and allocate space for
	256bit AVX registers.  Jump to memory at save_and_restore_vector
	if HAVE_AVX_SUPPORT is defined.
	(VECTOR_SIZE): New.
	(RDX_OFFSET): Likewise.
	(RAX_OFFSET): Likewise.
	(MOVAPS): Likewise.
	(XMM0): Likewise.
	(XMM1): Likewise.
	(XMM2): Likewise.
	(XMM3): Likewise.
	(XMM4): Likewise.
	(XMM5): Likewise.
	(XMM6): Likewise.
	(XMM7): Likewise.
	(save_and_restore_vector_sse): Likewise.
	(save_and_restore_vector_avx): Likewise.
	(check_avx): Likewise.
	(save_and_restore_vector): Likewise.

	* sysdeps/x86_64/elf/configure.in: Set libc_cv_cc_avx and
	HAVE_AVX_SUPPORT.
	* sysdeps/x86_64/elf/configure: Regenerated.

diff --git a/config.h.in b/config.h.in
index 8dbc224..495599d 100644
--- a/config.h.in
+++ b/config.h.in
@@ -129,6 +129,9 @@
 /* Define if binutils support TLS handling.  */
 #undef	HAVE_TLS_SUPPORT
 
+/* Define if gcc supports AVX.  */
+#undef	HAVE_AVX_SUPPORT
+
 /* Define if the compiler's exception support is based on libunwind.  */
 #undef	HAVE_CC_WITH_LIBUNWIND
 
diff --git a/config.make.in b/config.make.in
index e48ea26..9fa8616 100644
--- a/config.make.in
+++ b/config.make.in
@@ -34,6 +34,8 @@ config-sysdirs = @sysnames@
 cflags-cpu = @libc_cv_cc_submachine@
 asflags-cpu = @libc_cv_cc_submachine@
 
+config-cflags-avx = @libc_cv_cc_avx@
+
 defines = @DEFINES@
 sysincludes = @SYSINCLUDES@
 c++-sysincludes = @CXX_SYSINCLUDES@
diff --git a/configure b/configure
index 88cf4fd..a6101c4 100755
--- a/configure
+++ b/configure
@@ -657,6 +657,7 @@ xcoff
 elf
 ldd_rewrite_script
 use_ldconfig
+libc_cv_cc_avx
 libc_cv_cpp_asm_debuginfo
 libc_cv_forced_unwind
 libc_cv_rootsbindir
@@ -8744,6 +8745,7 @@ fi
 
 
 
+
 if test $elf = yes; then
   cat >>confdefs.h <<\_ACEOF
 #define HAVE_ELF 1
diff --git a/configure.in b/configure.in
index 6a92bd8..efa9d0e 100644
--- a/configure.in
+++ b/configure.in
@@ -2259,6 +2259,7 @@ AC_SUBST(libc_cv_forced_unwind)
 
 dnl sysdeps/CPU/configure.in checks set this via arch-specific asm tests
 AC_SUBST(libc_cv_cpp_asm_debuginfo)
+AC_SUBST(libc_cv_cc_avx)
 
 AC_SUBST(use_ldconfig)
 AC_SUBST(ldd_rewrite_script)
diff --git a/elf/Makefile b/elf/Makefile
index 57febea..d40e211 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -89,8 +89,9 @@ distribute	:= rtld-Rules \
 		   unload4mod1.c unload4mod2.c unload4mod3.c unload4mod4.c \
 		   unload6mod1.c unload6mod2.c unload6mod3.c \
 		   unload7mod1.c unload7mod2.c \
-		   tst-audit1.c tst-audit2.c tst-audit3.c \
+		   tst-audit1.c tst-audit2.c tst-audit3.c tst-audit4.c \
 		   tst-auditmod1.c tst-auditmod3a.c tst-auditmod3b.c \
+		   tst-auditmod4a.c tst-auditmod4b.c \
 		   order2mod1.c order2mod2.c order2mod3.c order2mod4.c \
 		   tst-stackguard1.c tst-stackguard1-static.c \
 		   tst-array5.c tst-array5-static.c tst-array5dep.c \
@@ -195,7 +196,7 @@ tests += loadtest restest1 preloadtest loadfail multiload origtest resolvfail \
 test-srcs = tst-pathopt
 tests-execstack-yes = tst-execstack tst-execstack-needed tst-execstack-prog
 ifeq (x86_64,$(config-machine))
-tests += tst-audit3
+tests += tst-audit3 tst-audit4
 endif
 endif
 ifeq (yesyes,$(have-fpie)$(build-shared))
@@ -235,6 +236,7 @@ modules-names = testobj1 testobj2 testobj3 testobj4 testobj5 testobj6 \
 		tst-dlopenrpathmod tst-deep1mod1 tst-deep1mod2 tst-deep1mod3 \
 		tst-dlmopen1mod tst-auditmod1 \
 		tst-auditmod3a tst-auditmod3b \
+		tst-auditmod4a tst-auditmod4b \
 		unload3mod1 unload3mod2 unload3mod3 unload3mod4 \
 		unload4mod1 unload4mod2 unload4mod3 unload4mod4 \
 		unload6mod1 unload6mod2 unload6mod3 \
@@ -968,6 +970,10 @@ $(objpfx)tst-audit3: $(objpfx)tst-auditmod3a.so
 $(objpfx)tst-audit3.out: $(objpfx)tst-auditmod3b.so
 tst-audit3-ENV = LD_AUDIT=$(objpfx)tst-auditmod3b.so
 
+$(objpfx)tst-audit4: $(objpfx)tst-auditmod4a.so
+$(objpfx)tst-audit4.out: $(objpfx)tst-auditmod4b.so
+tst-audit4-ENV = LD_AUDIT=$(objpfx)tst-auditmod4b.so
+
 $(objpfx)tst-global1: $(libdl)
 $(objpfx)tst-global1.out: $(objpfx)testobj6.so $(objpfx)testobj2.so
 
@@ -1103,3 +1109,9 @@ $(objpfx)ifuncmain5pic: $(addprefix $(objpfx),ifuncmod5.so)
 $(objpfx)ifuncmain5static: $(addprefix $(objpfx),ifuncdep5.o)
 $(objpfx)ifuncmain5staticpic: $(addprefix $(objpfx),ifuncdep5pic.o)
 $(objpfx)ifuncmain5picstatic: $(addprefix $(objpfx),ifuncdep5pic.o)
+
+ifeq (yes,$(config-cflags-avx))
+CFLAGS-tst-audit4.c += -mavx
+CFLAGS-tst-auditmod4a.c += -mavx
+CFLAGS-tst-auditmod4b.c += -mavx
+endif
diff --git a/elf/tst-audit4.c b/elf/tst-audit4.c
new file mode 100644
index 0000000..37ef17c
--- /dev/null
+++ b/elf/tst-audit4.c
@@ -0,0 +1,34 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#ifdef __AVX__
+#include <stdlib.h>
+#include <string.h>
+#include <cpuid.h>
+#include <immintrin.h>
+
+extern __m256i audit_test (__m256i, __m256i, __m256i, __m256i,
+			   __m256i, __m256i, __m256i, __m256i);
+int
+main (void)
+{
+  unsigned int eax, ebx, ecx, edx;
+
+  /* Run AVX test only if AVX is supported.  */
+  if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+      && (ecx & bit_AVX))
+    {
+      __m256i ymm = _mm256_setzero_si256 ();
+      __m256i ret = audit_test (ymm, ymm, ymm, ymm, ymm, ymm, ymm, ymm);
+
+      if (memcmp (&ymm, &ret, sizeof (ret)))
+	abort ();
+    }
+  return 0;
+}
+#else
+int
+main (void)
+{
+  return 0;
+}
+#endif
diff --git a/elf/tst-auditmod3b.c b/elf/tst-auditmod3b.c
index 388ed6e..481f2c5 100644
--- a/elf/tst-auditmod3b.c
+++ b/elf/tst-auditmod3b.c
@@ -121,6 +121,20 @@ pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
   printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
 	  symname, (long int) sym->st_value, ndx, *flags);
 
+  if (strcmp (symname, "audit_test") == 0)
+    {
+      __m128i zero = _mm_setzero_si128 ();
+      if (memcmp (&regs->lr_vector[0], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[1], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[2], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[3], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[4], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[5], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[6], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[7], &zero, sizeof (zero)))
+	abort ();
+    }
+
   __m128i xmm = _mm_set1_epi32 (-1);
   asm volatile ("movdqa %0, %%xmm0" : : "x" (xmm) : "xmm0" );
   asm volatile ("movdqa %0, %%xmm1" : : "x" (xmm) : "xmm1" );
@@ -142,15 +156,24 @@ pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
   printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
 	  symname, (long int) sym->st_value, ndx, outregs->int_retval);
 
+  if (strcmp (symname, "audit_test") == 0)
+    {
+      __m128i zero = _mm_setzero_si128 ();
+      if (memcmp (&outregs->lrv_vector0, &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[0], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[1], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[2], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[3], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[4], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[5], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[6], &zero, sizeof (zero))
+	  || memcmp (&inregs->lr_vector[7], &zero, sizeof (zero)))
+	abort ();
+    }
+
   __m128i xmm = _mm_set1_epi32 (-1);
   asm volatile ("movdqa %0, %%xmm0" : : "x" (xmm) : "xmm0" );
   asm volatile ("movdqa %0, %%xmm1" : : "x" (xmm) : "xmm1" );
-  asm volatile ("movdqa %0, %%xmm2" : : "x" (xmm) : "xmm2" );
-  asm volatile ("movdqa %0, %%xmm3" : : "x" (xmm) : "xmm3" );
-  asm volatile ("movdqa %0, %%xmm4" : : "x" (xmm) : "xmm4" );
-  asm volatile ("movdqa %0, %%xmm5" : : "x" (xmm) : "xmm5" );
-  asm volatile ("movdqa %0, %%xmm6" : : "x" (xmm) : "xmm6" );
-  asm volatile ("movdqa %0, %%xmm7" : : "x" (xmm) : "xmm7" );
 
   return 0;
 }
diff --git a/elf/tst-auditmod4a.c b/elf/tst-auditmod4a.c
new file mode 100644
index 0000000..014f395
--- /dev/null
+++ b/elf/tst-auditmod4a.c
@@ -0,0 +1,26 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#ifdef __AVX__
+#include <stdlib.h>
+#include <string.h>
+#include <immintrin.h>
+
+__m256i
+audit_test (__m256i x0, __m256i x1, __m256i x2, __m256i x3,
+	    __m256i x4, __m256i x5, __m256i x6, __m256i x7)
+{
+  __m256i ymm = _mm256_setzero_si256 ();
+
+  if (memcmp (&ymm, &x0, sizeof (ymm))
+      || memcmp (&ymm, &x1, sizeof (ymm))
+      || memcmp (&ymm, &x2, sizeof (ymm))
+      || memcmp (&ymm, &x3, sizeof (ymm))
+      || memcmp (&ymm, &x4, sizeof (ymm))
+      || memcmp (&ymm, &x5, sizeof (ymm))
+      || memcmp (&ymm, &x6, sizeof (ymm))
+      || memcmp (&ymm, &x7, sizeof (ymm)))
+    abort ();
+
+  return ymm;
+}
+#endif
diff --git a/elf/tst-auditmod4b.c b/elf/tst-auditmod4b.c
new file mode 100644
index 0000000..449aa0f
--- /dev/null
+++ b/elf/tst-auditmod4b.c
@@ -0,0 +1,212 @@
+/* Verify that changing AVX registers in audit library won't affect
+   function parameter passing/return.  */
+
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <bits/wordsize.h>
+#include <gnu/lib-names.h>
+
+unsigned int
+la_version (unsigned int v)
+{
+  setlinebuf (stdout);
+
+  printf ("version: %u\n", v);
+
+  char buf[20];
+  sprintf (buf, "%u", v);
+
+  return v;
+}
+
+void
+la_activity (uintptr_t *cookie, unsigned int flag)
+{
+  if (flag == LA_ACT_CONSISTENT)
+    printf ("activity: consistent\n");
+  else if (flag == LA_ACT_ADD)
+    printf ("activity: add\n");
+  else if (flag == LA_ACT_DELETE)
+    printf ("activity: delete\n");
+  else
+    printf ("activity: unknown activity %u\n", flag);
+}
+
+char *
+la_objsearch (const char *name, uintptr_t *cookie, unsigned int flag)
+{
+  char buf[100];
+  const char *flagstr;
+  if (flag == LA_SER_ORIG)
+    flagstr = "LA_SET_ORIG";
+  else if (flag == LA_SER_LIBPATH)
+    flagstr = "LA_SER_LIBPATH";
+  else if (flag == LA_SER_RUNPATH)
+    flagstr = "LA_SER_RUNPATH";
+  else if (flag == LA_SER_CONFIG)
+    flagstr = "LA_SER_CONFIG";
+  else if (flag == LA_SER_DEFAULT)
+    flagstr = "LA_SER_DEFAULT";
+  else if (flag == LA_SER_SECURE)
+    flagstr = "LA_SER_SECURE";
+  else
+    {
+       sprintf (buf, "unknown flag %d", flag);
+       flagstr = buf;
+    }
+  printf ("objsearch: %s, %s\n", name, flagstr);
+
+  return (char *) name;
+}
+
+unsigned int
+la_objopen (struct link_map *l, Lmid_t lmid, uintptr_t *cookie)
+{
+  printf ("objopen: %ld, %s\n", lmid, l->l_name);
+
+  return 3;
+}
+
+void
+la_preinit (uintptr_t *cookie)
+{
+  printf ("preinit\n");
+}
+
+unsigned int
+la_objclose  (uintptr_t *cookie)
+{
+  printf ("objclose\n");
+  return 0;
+}
+
+uintptr_t
+la_symbind32 (Elf32_Sym *sym, unsigned int ndx, uintptr_t *refcook,
+	      uintptr_t *defcook, unsigned int *flags, const char *symname)
+{
+  printf ("symbind32: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+  return sym->st_value;
+}
+
+uintptr_t
+la_symbind64 (Elf64_Sym *sym, unsigned int ndx, uintptr_t *refcook,
+	      uintptr_t *defcook, unsigned int *flags, const char *symname)
+{
+  printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+  return sym->st_value;
+}
+
+#define pltenter la_x86_64_gnu_pltenter
+#define pltexit la_x86_64_gnu_pltexit
+#define La_regs La_x86_64_regs
+#define La_retval La_x86_64_retval
+#define int_retval lrv_rax
+
+#include <tst-audit.h>
+
+#ifdef __AVX__
+#include <immintrin.h>
+#include <cpuid.h>
+
+static int avx = -1;
+
+static int
+__attribute ((always_inline))
+check_avx (void)
+{
+  if (avx == -1)
+    {
+      unsigned int eax, ebx, ecx, edx;
+
+      if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+	  && (ecx & bit_AVX))
+	avx = 1;
+      else
+	avx = 0;
+    }
+  return avx;
+}
+#endif
+
+ElfW(Addr)
+pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	  uintptr_t *defcook, La_regs *regs, unsigned int *flags,
+	  const char *symname, long int *framesizep)
+{
+  printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+#ifdef __AVX__
+  if (check_avx ())
+    {
+      if (strcmp (symname, "audit_test") == 0)
+	{
+	  __m256i zero = _mm256_setzero_si256 ();
+	  if (memcmp (&regs->lr_vector[0], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[1], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[2], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[3], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[4], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[5], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[6], &zero, sizeof (zero))
+	      || memcmp (&regs->lr_vector[7], &zero, sizeof (zero)))
+	    abort ();
+	}
+
+      __m256i ymm = _mm256_set1_epi32 (-1);
+      asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+      asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+      asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm) : "xmm2" );
+      asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm) : "xmm3" );
+      asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm) : "xmm4" );
+      asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm) : "xmm5" );
+      asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm) : "xmm6" );
+      asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm) : "xmm7" );
+    }
+#endif
+
+  return sym->st_value;
+}
+
+unsigned int
+pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	 uintptr_t *defcook, const La_regs *inregs, La_retval *outregs,
+	 const char *symname)
+{
+  printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
+	  symname, (long int) sym->st_value, ndx, outregs->int_retval);
+
+#ifdef __AVX__
+  if (check_avx ())
+    {
+      if (strcmp (symname, "audit_test") == 0)
+	{
+	  __m256i zero = _mm256_setzero_si256 ();
+	  if (memcmp (&outregs->lrv_vector0, &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[0], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[1], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[2], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[3], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[4], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[5], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[6], &zero, sizeof (zero))
+	      || memcmp (&inregs->lr_vector[7], &zero, sizeof (zero)))
+	    abort ();
+	}
+
+      __m256i ymm = _mm256_set1_epi32 (-1);
+      asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+      asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+    }
+#endif
+
+  return 0;
+}
diff --git a/sysdeps/x86_64/bits/link.h b/sysdeps/x86_64/bits/link.h
index 5676b78..efa1a9f 100644
--- a/sysdeps/x86_64/bits/link.h
+++ b/sysdeps/x86_64/bits/link.h
@@ -65,12 +65,22 @@ __END_DECLS
 /* Registers for entry into PLT on x86-64.  */
 # if __GNUC_PREREQ (4,0)
 typedef float La_x86_64_xmm __attribute__ ((__vector_size__ (16)));
+typedef float La_x86_64_ymm __attribute__ ((__vector_size__ (32)));
 # else
 typedef float La_x86_64_xmm __attribute__ ((__mode__ (__V4SF__)));
 # endif
 
+typedef union
+{
+# if __GNUC_PREREQ (4,0)
+  La_x86_64_ymm ymm;
+# endif
+  La_x86_64_xmm xmm[2];
+} La_x86_64_vector;
+
 typedef struct La_x86_64_regs
 {
+  La_x86_64_vector lr_vector[8];
   uint64_t lr_rdx;
   uint64_t lr_r8;
   uint64_t lr_r9;
@@ -79,16 +89,15 @@ typedef struct La_x86_64_regs
   uint64_t lr_rdi;
   uint64_t lr_rbp;
   uint64_t lr_rsp;
-  La_x86_64_xmm lr_xmm[8];
 } La_x86_64_regs;
 
 /* Return values for calls from PLT on x86-64.  */
 typedef struct La_x86_64_retval
 {
+  La_x86_64_vector lrv_vector0;
+  La_x86_64_vector lrv_vector1;
   uint64_t lrv_rax;
   uint64_t lrv_rdx;
-  La_x86_64_xmm lrv_xmm0;
-  La_x86_64_xmm lrv_xmm1;
   long double lrv_st0;
   long double lrv_st1;
 } La_x86_64_retval;
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 33e6115..e534a1c 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -17,6 +17,7 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <config.h>
 #include <sysdep.h>
 
 	.text
@@ -60,7 +61,7 @@ _dl_runtime_resolve:
 
 _dl_runtime_profile:
 	/* The La_x86_64_regs data structure pointed to by the
-	   fourth paramater must be 16-byte aligned.  This must
+	   fourth paramater must be 32-byte aligned.  This must
 	   be explicitly enforced.  We have the set up a dynamically
 	   sized stack frame.  %rbx points to the top half which
 	   has a fixed size and preserves the original stack pointer.  */
@@ -88,136 +89,90 @@ _dl_runtime_profile:
 	cfi_def_cfa_register(%rbx)
 
 	/* Actively align the La_x86_64_regs structure.  */
-	andq $0xfffffffffffffff0, %rsp
-	subq $192, %rsp		# sizeof(La_x86_64_regs)
+	andq $0xffffffffffffffe0, %rsp
+	subq $320, %rsp		# sizeof(La_x86_64_regs)
 	movq %rsp, 24(%rbx)
 
-	movq %rdx,   (%rsp)	# Fill the La_x86_64_regs structure.
-	movq %r8,   8(%rsp)
-	movq %r9,  16(%rsp)
-	movq %rcx, 24(%rsp)
-	movq %rsi, 32(%rsp)
-	movq %rdi, 40(%rsp)
-	movq %rbp, 48(%rsp)
-	leaq 48(%rbx), %rax
-	movq %rax, 56(%rsp)
-	movaps %xmm0,  64(%rsp)
-	movaps %xmm1,  80(%rsp)
-	movaps %xmm2,  96(%rsp)
-	movaps %xmm3, 112(%rsp)
-	movaps %xmm4, 128(%rsp)
-	movaps %xmm5, 144(%rsp)
-	movaps %xmm6, 160(%rsp)
-	movaps %xmm7, 176(%rsp)
-
-	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
-	movq 48(%rbx), %rdx	# Load return address if needed.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-	movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	leaq 16(%rbx), %r8
-	call _dl_profile_fixup	# Call resolver.
-
-	movq %rax, %r11		# Save return value.
-
-	movq 8(%rbx), %rax	# Get back register content.
-	movq      (%rsp), %rdx
-	movq     8(%rsp), %r8
-	movq    16(%rsp), %r9
-	movaps  64(%rsp), %xmm0
-	movaps  80(%rsp), %xmm1
-	movaps  96(%rsp), %xmm2
-	movaps 112(%rsp), %xmm3
-	movaps 128(%rsp), %xmm4
-	movaps 144(%rsp), %xmm5
-	movaps 160(%rsp), %xmm6
-	movaps 176(%rsp), %xmm7
-
-	movq 16(%rbx), %r10	# Anything in framesize?
-	testq %r10, %r10
-	jns 1f
-
-	/* There's nothing in the frame size, so there
-	   will be no call to the _dl_call_pltexit. */
-
-	movq 24(%rsp), %rcx	# Get back registers content.
-	movq 32(%rsp), %rsi
-	movq 40(%rsp), %rdi
-
-	movq %rbx, %rsp
-	movq (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	jmp *%r11		# Jump to function address.
+/* Size of La_x86_64_vector.  */
+# define VECTOR_SIZE	32
+/* Offset of lr_rdx in La_x86_64_regs.  */
+# define RDX_OFFSET	(VECTOR_SIZE * 8)
+/* Offset of lrv_rax in La_x86_64_retval.  */
+# define RAX_OFFSET	(VECTOR_SIZE * 2)
+
+	/* Fill the La_x86_64_regs structure.  */
+	movq %rdx,	(RDX_OFFSET)(%rsp)
+	movq %r8,  (RDX_OFFSET +  8)(%rsp)
+	movq %r9,  (RDX_OFFSET + 16)(%rsp)
+	movq %rcx, (RDX_OFFSET + 24)(%rsp)
+	movq %rsi, (RDX_OFFSET + 32)(%rsp)
+	movq %rdi, (RDX_OFFSET + 40)(%rsp)
+	movq %rbp, (RDX_OFFSET + 48)(%rsp)
+
+# define MOVAPS movaps
+# define XMM0 xmm0
+# define XMM1 xmm1
+# define XMM2 xmm2
+# define XMM3 xmm3
+# define XMM4 xmm4
+# define XMM5 xmm5
+# define XMM6 xmm6
+# define XMM7 xmm7
+
+# ifdef HAVE_AVX_SUPPORT
+	jmp *L(save_and_restore_vector)(%rip)
 
-1:
-	cfi_adjust_cfa_offset(48)
-	cfi_rel_offset(%rbx, 0)
-	cfi_def_cfa_register(%rbx)
+	.align 16
+L(save_and_restore_vector_sse):
+# endif
+
+# include "dl-trampoline.h"
+
+# ifdef HAVE_AVX_SUPPORT
+#  undef  MOVAPS
+#  define MOVAPS vmovaps
+#  undef  XMM0
+#  define XMM0 ymm0
+#  undef  XMM1
+#  define XMM1 ymm1
+#  undef  XMM2
+#  define XMM2 ymm2
+#  undef  XMM3
+#  define XMM3 ymm3
+#  undef  XMM4
+#  define XMM4 ymm4
+#  undef  XMM5
+#  define XMM5 ymm5
+#  undef  XMM6
+#  define XMM6 ymm6
+#  undef  XMM7
+#  define XMM7 ymm7
 
-	/* At this point we need to prepare new stack for the function
-	   which has to be called.  We copy the original stack to a
-	   temporary buffer of the size specified by the 'framesize'
-	   returned from _dl_profile_fixup */
-
-	leaq 56(%rbx), %rsi	# stack
-	addq $8, %r10
-	andq $0xfffffffffffffff0, %r10
-	movq %r10, %rcx
-	subq %r10, %rsp
-	movq %rsp, %rdi
-	shrq $3, %rcx
-	rep
-	movsq
-
-	movq 24(%rdi), %rcx	# Get back register content.
-	movq 32(%rdi), %rsi
-	movq 40(%rdi), %rdi
-
-	call *%r11
-
-	mov 24(%rbx), %rsp	# Drop the copied stack content
-
-	/* Now we have to prepare the La_x86_64_retval structure for the
-	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
-	   so we just need to allocate the sizeof(La_x86_64_retval) space on
-	   the stack, since the alignment has already been taken care of. */
-
-	subq $80, %rsp		# sizeof(La_x86_64_retval)
-	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
-
-	movq %rax, (%rcx)	# Fill in the La_x86_64_retval structure.
-	movq %rdx, 8(%rcx)
-	movaps %xmm0, 16(%rcx)
-	movaps %xmm1, 32(%rcx)
-	fstpt 48(%rcx)
-	fstpt 64(%rcx)
-
-	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-        movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	call _dl_call_pltexit
-
-	movq  (%rsp), %rax	# Restore return registers.
-	movq 8(%rsp), %rdx
-	movaps 16(%rsp), %xmm0
-	movaps 32(%rsp), %xmm1
-	fldt 64(%rsp)
-	fldt 48(%rsp)
-
-	movq %rbx, %rsp
-	movq  (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	retq
+	.align 16
+L(save_and_restore_vector_avx):
+#  include "dl-trampoline.h"
+# endif
 
 	cfi_endproc
 	.size _dl_runtime_profile, .-_dl_runtime_profile
+
+# ifdef HAVE_AVX_SUPPORT
+L(check_avx):
+	mov	%rbx,%r11		# Save rbx
+	movl	$1, %eax
+	cpuid
+	mov	%r11,%rbx		# Restore rbx
+	leaq    L(save_and_restore_vector_sse)(%rip), %rax
+	andl	$(1 << 28), %ecx	# Check if AVX is available.
+	jz	L(ret)
+	leaq    L(save_and_restore_vector_avx)(%rip), %rax
+L(ret):
+	movq	%rax,L(save_and_restore_vector)(%rip)
+	jmp	*%rax
+
+	.section .data.rel.local,"aw",@progbits
+	.align	8
+L(save_and_restore_vector):
+	.quad L(check_avx)
+# endif
 #endif
diff --git a/sysdeps/x86_64/dl-trampoline.h b/sysdeps/x86_64/dl-trampoline.h
new file mode 100644
index 0000000..38af47a
--- /dev/null
+++ b/sysdeps/x86_64/dl-trampoline.h
@@ -0,0 +1,146 @@
+/* Partial PLT profile trampoline to save and restore x86-64 vector
+   registers.
+   Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+	leaq 48(%rbx), %rax
+	movq %rax, (RDX_OFFSET + 56)(%rsp)
+
+	MOVAPS %XMM0,		     (%rsp)
+	MOVAPS %XMM1,   (VECTOR_SIZE)(%rsp)
+	MOVAPS %XMM2, (VECTOR_SIZE*2)(%rsp)
+	MOVAPS %XMM3, (VECTOR_SIZE*3)(%rsp)
+	MOVAPS %XMM4, (VECTOR_SIZE*4)(%rsp)
+	MOVAPS %XMM5, (VECTOR_SIZE*5)(%rsp)
+	MOVAPS %XMM6, (VECTOR_SIZE*6)(%rsp)
+	MOVAPS %XMM7, (VECTOR_SIZE*7)(%rsp)
+
+	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
+	movq 48(%rbx), %rdx	# Load return address if needed.
+	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
+	movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
+	leaq 16(%rbx), %r8
+	call _dl_profile_fixup	# Call resolver.
+
+	movq %rax, %r11		# Save return value.
+
+	movq 8(%rbx), %rax	# Get back register content.
+	movq	  (RDX_OFFSET)(%rsp), %rdx
+	movq (RDX_OFFSET +  8)(%rsp), %r8
+	movq (RDX_OFFSET + 16)(%rsp), %r9
+	MOVAPS		      (%rsp), %XMM0
+	MOVAPS   (VECTOR_SIZE)(%rsp), %XMM1
+	MOVAPS (VECTOR_SIZE*2)(%rsp), %XMM2
+	MOVAPS (VECTOR_SIZE*3)(%rsp), %XMM3
+	MOVAPS (VECTOR_SIZE*4)(%rsp), %XMM4
+	MOVAPS (VECTOR_SIZE*5)(%rsp), %XMM5
+	MOVAPS (VECTOR_SIZE*6)(%rsp), %XMM6
+	MOVAPS (VECTOR_SIZE*7)(%rsp), %XMM7
+
+	movq 16(%rbx), %r10	# Anything in framesize?
+	testq %r10, %r10
+	jns 1f
+
+	/* There's nothing in the frame size, so there
+	   will be no call to the _dl_call_pltexit. */
+
+	/* Get back registers content.  */
+	movq (RDX_OFFSET + 24)(%rsp), %rcx
+	movq (RDX_OFFSET + 32)(%rsp), %rsi
+	movq (RDX_OFFSET + 40)(%rsp), %rdi
+
+	movq %rbx, %rsp
+	movq (%rsp), %rbx
+	cfi_restore(rbx)
+	cfi_def_cfa_register(%rsp)
+
+	addq $48, %rsp		# Adjust the stack to the return value
+				# (eats the reloc index and link_map)
+	cfi_adjust_cfa_offset(-48)
+	jmp *%r11		# Jump to function address.
+
+1:
+	cfi_adjust_cfa_offset(48)
+	cfi_rel_offset(%rbx, 0)
+	cfi_def_cfa_register(%rbx)
+
+	/* At this point we need to prepare new stack for the function
+	   which has to be called.  We copy the original stack to a
+	   temporary buffer of the size specified by the 'framesize'
+	   returned from _dl_profile_fixup */
+
+	leaq (RDX_OFFSET + 56)(%rbx), %rsi	# stack
+	addq $8, %r10
+	andq $0xfffffffffffffff0, %r10
+	movq %r10, %rcx
+	subq %r10, %rsp
+	movq %rsp, %rdi
+	shrq $3, %rcx
+	rep
+	movsq
+
+	movq 24(%rdi), %rcx	# Get back register content.
+	movq 32(%rdi), %rsi
+	movq 40(%rdi), %rdi
+
+	call *%r11
+
+	mov 24(%rbx), %rsp	# Drop the copied stack content
+
+	/* Now we have to prepare the La_x86_64_retval structure for the
+	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
+	   so we just need to allocate the sizeof(La_x86_64_retval) space on
+	   the stack, since the alignment has already been taken care of. */
+
+	subq $112, %rsp		# sizeof(La_x86_64_retval)
+	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
+
+	/* Fill in the La_x86_64_retval structure.  */
+	movq %rax,     (RAX_OFFSET)(%rcx)
+	movq %rdx, (RAX_OFFSET + 8)(%rcx)
+
+	MOVAPS %XMM0,		   (%rcx)
+	MOVAPS %XMM1, (VECTOR_SIZE)(%rcx)
+
+	fstpt (RAX_OFFSET + 16)(%rcx)
+	fstpt (RAX_OFFSET + 32)(%rcx)
+
+	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
+	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
+        movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
+	call _dl_call_pltexit
+
+	/* Restore return registers.  */
+	movq	 (RAX_OFFSET)(%rsp), %rax
+	movq (RAX_OFFSET + 8)(%rsp), %rdx
+
+	MOVAPS		    (%rsp), %XMM0
+	MOVAPS (VECTOR_SIZE)(%rsp), %XMM1
+
+	fldt (RAX_OFFSET + 16)(%rsp)
+	fldt (RAX_OFFSET + 32)(%rsp)
+
+	movq %rbx, %rsp
+	movq (%rsp), %rbx
+	cfi_restore(rbx)
+	cfi_def_cfa_register(%rsp)
+
+	addq $48, %rsp		# Adjust the stack to the return value
+				# (eats the reloc index and link_map)
+	cfi_adjust_cfa_offset(-48)
+	retq
diff --git a/sysdeps/x86_64/elf/configure b/sysdeps/x86_64/elf/configure
index 7746549..221e74c 100755
--- a/sysdeps/x86_64/elf/configure
+++ b/sysdeps/x86_64/elf/configure
@@ -79,3 +79,28 @@ cat >>confdefs.h <<\_ACEOF
 #define PI_STATIC_AND_HIDDEN 1
 _ACEOF
 
+
+{ $as_echo "$as_me:$LINENO: checking for AVX support" >&5
+$as_echo_n "checking for AVX support... " >&6; }
+if test "${libc_cv_cc_avx+set}" = set; then
+  $as_echo_n "(cached) " >&6
+else
+  if { ac_try='${CC-cc} -mavx -xc /dev/null -S -o /dev/null'
+  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; }; then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $libc_cv_cc_avx" >&5
+$as_echo "$libc_cv_cc_avx" >&6; }
+if test $libc_cv_cc_avx = yes; then
+  cat >>confdefs.h <<\_ACEOF
+#define HAVE_AVX_SUPPORT 1
+_ACEOF
+
+fi
diff --git a/sysdeps/x86_64/elf/configure.in b/sysdeps/x86_64/elf/configure.in
index 9cb59d0..14d1875 100644
--- a/sysdeps/x86_64/elf/configure.in
+++ b/sysdeps/x86_64/elf/configure.in
@@ -32,3 +32,14 @@ fi
 dnl It is always possible to access static and hidden symbols in an
 dnl position independent way.
 AC_DEFINE(PI_STATIC_AND_HIDDEN)
+
+dnl Check if -mavx works.
+AC_CACHE_CHECK(for AVX support, libc_cv_cc_avx, [dnl
+if AC_TRY_COMMAND([${CC-cc} -mavx -xc /dev/null -S -o /dev/null]); then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi])
+if test $libc_cv_cc_avx = yes; then
+  AC_DEFINE(HAVE_AVX_SUPPORT)
+fi


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]