This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch master updated. glibc-2.17-482-gc2d9401


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, master has been updated
       via  c2d94018c6963b63e95e4666feac976a97816972 (commit)
       via  27ec37f185488a91521ddfbc42d74d8e783a29c9 (commit)
       via  a64d7e0efb34bde344bc1d4d17646a8ae6b6d588 (commit)
       via  d26dd3eb006e813d5f81bd7a5f02d70d480de8c4 (commit)
       via  0f6a8d4b0b5214cf07dce15881164c76298ffd0a (commit)
      from  e57b0c6100e63bfd816ae59339452eafc81f1d3a (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=c2d94018c6963b63e95e4666feac976a97816972

commit c2d94018c6963b63e95e4666feac976a97816972
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
Date:   Fri Mar 29 16:40:36 2013 +0530

    Remove ONE and MONE

diff --git a/ChangeLog b/ChangeLog
index 345a8d6..59797b9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,14 @@
 2013-03-29  Siddhesh Poyarekar  <siddhesh@redhat.com>
 
+	* sysdeps/ieee754/dbl-64/mpa.h (ONE, MONE): Remove defines.
+	(__pow_mp): Replace ONE and MONE with their values.
+	* sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+	* sysdeps/ieee754/dbl-64/mpatan.c (__mpatan): Likewise.
+	* sysdeps/ieee754/dbl-64/mpatan2.c (__mpatan2): Likewise.
+	* sysdeps/ieee754/dbl-64/mptan.c (__mptan): Likewise.
+	* sysdeps/ieee754/dbl-64/s_atan.c (atan): Likewise.
+	* sysdeps/ieee754/dbl-64/s_tan.c (tan): Likewise.
+
 	* sysdeps/ieee754/dbl-64/s_tan.c: Fix formatting.
 
 	* sysdeps/ieee754/dbl-64/mpa.h (ZERO, MZERO): Remove defines.
diff --git a/sysdeps/ieee754/dbl-64/e_log.c b/sysdeps/ieee754/dbl-64/e_log.c
index f8d5a19..f9300f9 100644
--- a/sysdeps/ieee754/dbl-64/e_log.c
+++ b/sysdeps/ieee754/dbl-64/e_log.c
@@ -91,7 +91,7 @@ __ieee754_log (double x)
 
   /* Regular values of x */
 
-  w = x - ONE;
+  w = x - 1;
   if (__builtin_expect (ABS (w) > U03, 1))
     goto case_03;
 
@@ -178,7 +178,7 @@ case_03:
   j = (num.i[HIGH_HALF] & 0x000fffff) >> 4;
 
   /* Compute w=(u-ui*vj)/(ui*vj) */
-  p0 = (ONE + (i - 75) * DEL_U) * (ONE + (j - 180) * DEL_V);
+  p0 = (1 + (i - 75) * DEL_U) * (1 + (j - 180) * DEL_V);
   q = u - p0;
   r0 = Iu[i].d * Iv[j].d;
   w = q * r0;
@@ -206,7 +206,7 @@ case_03:
 
   /* Improve the accuracy of r0 */
   EMULV (p0, r0, sa, sb, t1, t2, t3, t4, t5);
-  t = r0 * ((ONE - sa) - sb);
+  t = r0 * ((1 - sa) - sb);
   EADD (r0, t, ra, rb);
 
   /* Compute w */
diff --git a/sysdeps/ieee754/dbl-64/mpa.h b/sysdeps/ieee754/dbl-64/mpa.h
index 27bc7dc..ee81f5c 100644
--- a/sysdeps/ieee754/dbl-64/mpa.h
+++ b/sysdeps/ieee754/dbl-64/mpa.h
@@ -91,8 +91,6 @@ extern const mp_no mptwo;
 # define  TWO52     0x1.0p52		/* 2^52    */
 #endif
 
-#define  ONE       1.0			/* 1       */
-#define  MONE      -1.0			/* -1      */
 #define  TWO       2.0			/*  2      */
 
 #define  TWO5      TWOPOW (5)		/* 2^5     */
@@ -150,7 +148,7 @@ __pow_mp (int pow, mp_no *y, int p)
       rem += 24;
     }
   /* The sign of any 2^x is always positive.  */
-  Y[0] = ONE;
+  Y[0] = 1;
   Y[1] = 1 << rem;
 
   /* Everything else is 0.  */
diff --git a/sysdeps/ieee754/dbl-64/mpatan.c b/sysdeps/ieee754/dbl-64/mpatan.c
index cc879d8..33c6847 100644
--- a/sysdeps/ieee754/dbl-64/mpatan.c
+++ b/sysdeps/ieee754/dbl-64/mpatan.c
@@ -74,7 +74,7 @@ __mpatan (mp_no *x, mp_no *y, int p)
 	}
     }
   mptwoim1.e = 1;
-  mptwoim1.d[0] = ONE;
+  mptwoim1.d[0] = 1;
 
   /* Reduce x m times.  */
   __sqr (x, &mpsm, p);
diff --git a/sysdeps/ieee754/dbl-64/mpatan2.c b/sysdeps/ieee754/dbl-64/mpatan2.c
index f4aa9db..f66f9eb 100644
--- a/sysdeps/ieee754/dbl-64/mpatan2.c
+++ b/sysdeps/ieee754/dbl-64/mpatan2.c
@@ -51,7 +51,7 @@ __mpatan2 (mp_no *y, mp_no *x, mp_no *z, int p)
       __dvd (x, y, &mpt1, p);
       __mul (&mpt1, &mpt1, &mpt2, p);
       if (mpt1.d[0] != 0)
-	mpt1.d[0] = ONE;
+	mpt1.d[0] = 1;
       __add (&mpt2, &mpone, &mpt3, p);
       __mpsqrt (&mpt3, &mpt2, p);
       __add (&mpt1, &mpt2, &mpt3, p);
diff --git a/sysdeps/ieee754/dbl-64/mptan.c b/sysdeps/ieee754/dbl-64/mptan.c
index 51b5718..281bfca 100644
--- a/sysdeps/ieee754/dbl-64/mptan.c
+++ b/sysdeps/ieee754/dbl-64/mptan.c
@@ -56,7 +56,7 @@ __mptan (double x, mp_no *mpy, int p)
   if (n)
     {
       __dvd (&mpc, &mps, mpy, p);
-      mpy->d[0] *= MONE;
+      mpy->d[0] *= -1;
     }
   /* tan is negative in this area.  */
   else
diff --git a/sysdeps/ieee754/dbl-64/s_atan.c b/sysdeps/ieee754/dbl-64/s_atan.c
index e3c4be7..7b6c83f 100644
--- a/sysdeps/ieee754/dbl-64/s_atan.c
+++ b/sysdeps/ieee754/dbl-64/s_atan.c
@@ -179,9 +179,9 @@ atan (double x)
     {
       if (u < D)
 	{			/* C <= u < D */
-	  w = ONE / u;
+	  w = 1 / u;
 	  EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
-	  ww = w * ((ONE - t1) - t2);
+	  ww = w * ((1 - t1) - t2);
 	  i = (TWO52 + TWO8 * w) - TWO52;
 	  i -= 16;
 	  z = (w - cij[i][0].d) + ww;
@@ -200,7 +200,7 @@ atan (double x)
 	  if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
 	    return __signArctan (x, y);
 
-	  DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+	  DIV2 (1 , 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
 		t10);
 	  t1 = w - hij[i][0].d;
 	  EADD (t1, ww, z, zz);
@@ -230,7 +230,7 @@ atan (double x)
 	{
 	  if (u < E)
 	    {			/* D <= u < E */
-	      w = ONE / u;
+	      w = 1 / u;
 	      v = w * w;
 	      EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
 
@@ -241,13 +241,13 @@ atan (double x)
 	      yy = d3.d + v * yy;
 	      yy *= w * v;
 
-	      ww = w * ((ONE - t1) - t2);
+	      ww = w * ((1 - t1) - t2);
 	      ESUB (HPI, w, t3, cor);
 	      yy = ((HPI1 + cor) - ww) - yy;
 	      if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
 		return __signArctan (x, y);
 
-	      DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
+	      DIV2 (1 , 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
 		    t9, t10);
 	      MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
 
diff --git a/sysdeps/ieee754/dbl-64/s_tan.c b/sysdeps/ieee754/dbl-64/s_tan.c
index 91fe2b8..54f863e 100644
--- a/sysdeps/ieee754/dbl-64/s_tan.c
+++ b/sysdeps/ieee754/dbl-64/s_tan.c
@@ -154,7 +154,7 @@ tan (double x)
       i = ((int) (mfftnhf.d + TWO8 * w));
       z = w - xfg[i][0].d;
       z2 = z * z;
-      s = (x < 0.0) ? MONE : ONE;
+      s = (x < 0.0) ? -1 : 1;
       pz = z + z * z2 * (e0.d + z2 * e1.d);
       fi = xfg[i][1].d;
       gi = xfg[i][2].d;
@@ -214,13 +214,13 @@ tan (double x)
 	{
 	  ya = -a;
 	  yya = -da;
-	  sy = MONE;
+	  sy = -1;
 	}
       else
 	{
 	  ya = a;
 	  yya = da;
-	  sy = ONE;
+	  sy = 1;
 	}
 
       /* (IV),(V) The case 0.787 < abs(x) <= 25,    abs(y) <= 1e-7 */
@@ -438,13 +438,13 @@ tan (double x)
 	{
 	  ya = -a;
 	  yya = -da;
-	  sy = MONE;
+	  sy = -1;
 	}
       else
 	{
 	  ya = a;
 	  yya = da;
-	  sy = ONE;
+	  sy = 1;
 	}
 
       /* (+++) The case 25 < abs(x) <= 1e8,    abs(y) <= 1e-7 */
@@ -631,13 +631,13 @@ tan (double x)
     {
       ya = -a;
       yya = -da;
-      sy = MONE;
+      sy = -1;
     }
   else
     {
       ya = a;
       yya = da;
-      sy = ONE;
+      sy = 1;
     }
 
   /* (+++) The case 1e8 < abs(x) < 2**1024,    abs(y) <= 1e-7 */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=27ec37f185488a91521ddfbc42d74d8e783a29c9

commit 27ec37f185488a91521ddfbc42d74d8e783a29c9
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
Date:   Fri Mar 29 16:38:27 2013 +0530

    Format s_tan.c

diff --git a/ChangeLog b/ChangeLog
index 2ecbced..345a8d6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,7 @@
 2013-03-29  Siddhesh Poyarekar  <siddhesh@redhat.com>
 
+	* sysdeps/ieee754/dbl-64/s_tan.c: Fix formatting.
+
 	* sysdeps/ieee754/dbl-64/mpa.h (ZERO, MZERO): Remove defines.
 	(__pow_mp): Replace ZERO and MZERO with their values.
 	* sysdeps/ieee754/dbl-64/e_atan2.c (__ieee754_atan2): Likewise.
diff --git a/sysdeps/ieee754/dbl-64/s_tan.c b/sysdeps/ieee754/dbl-64/s_tan.c
index faa5221..91fe2b8 100644
--- a/sysdeps/ieee754/dbl-64/s_tan.c
+++ b/sysdeps/ieee754/dbl-64/s_tan.c
@@ -46,459 +46,782 @@
 # define SECTION
 #endif
 
-static double tanMp(double);
-void __mptan(double, mp_no *, int);
+static double tanMp (double);
+void __mptan (double, mp_no *, int);
 
 double
 SECTION
-tan(double x) {
+tan (double x)
+{
 #include "utan.h"
 #include "utan.tbl"
 
-  int ux,i,n;
-  double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
-  t,t1,t2,t3,t4,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
+  int ux, i, n;
+  double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
+    s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya, yya, z0,
+    z, zz, z2, zz2;
 #ifndef DLA_FMS
-  double t5,t6;
+  double t5, t6;
 #endif
   int p;
-  number num,v;
-  mp_no mpa,mpt1,mpt2;
+  number num, v;
+  mp_no mpa, mpt1, mpt2;
 
   double retval;
 
-  int __branred(double, double *, double *);
-  int __mpranred(double, mp_no *, int);
+  int __branred (double, double *, double *);
+  int __mpranred (double, mp_no *, int);
 
   SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
 
   /* x=+-INF, x=NaN */
-  num.d = x;  ux = num.i[HIGH_HALF];
-  if ((ux&0x7ff00000)==0x7ff00000) {
-    if ((ux&0x7fffffff)==0x7ff00000)
-      __set_errno (EDOM);
-    retval = x-x;
-    goto ret;
-  }
+  num.d = x;
+  ux = num.i[HIGH_HALF];
+  if ((ux & 0x7ff00000) == 0x7ff00000)
+    {
+      if ((ux & 0x7fffffff) == 0x7ff00000)
+	__set_errno (EDOM);
+      retval = x - x;
+      goto ret;
+    }
 
-  w=(x<0.0) ? -x : x;
+  w = (x < 0.0) ? -x : x;
 
   /* (I) The case abs(x) <= 1.259e-8 */
-  if (w<=g1.d) { retval = x; goto ret; }
+  if (w <= g1.d)
+    {
+      retval = x;
+      goto ret;
+    }
 
   /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
-  if (w<=g2.d) {
-
-    /* First stage */
-    x2 = x*x;
-    t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
-    if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) { retval = y; goto ret; }
-
-    /* Second stage */
-    c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
-	 x2*a27.d))))));
-    EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
-    ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    MUL2(x ,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(x    ,0.0,c2,cc2,c1,cc1,t1,t2)
-    if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) { retval = y; goto ret; }
-    retval = tanMp(x);
-    goto ret;
-  }
+  if (w <= g2.d)
+    {
 
-  /* (III) The case 0.0608 < abs(x) <= 0.787 */
-  if (w<=g3.d) {
-
-    /* First stage */
-    i = ((int) (mfftnhf.d+TWO8*w));
-    z = w-xfg[i][0].d;  z2 = z*z;   s = (x<0.0) ? MONE : ONE;
-    pz = z+z*z2*(e0.d+z2*e1.d);
-    fi = xfg[i][1].d;   gi = xfg[i][2].d;   t2 = pz*(gi+fi)/(gi-pz);
-    if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) { retval = (s*y); goto ret; }
-    t3 = (t2<0.0) ? -t2 : t2;
-    t4 = fi*ua3.d+t3*ub3.d;
-    if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (s*y); goto ret; }
-
-    /* Second stage */
-    ffi = xfg[i][3].d;
-    c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
-    EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
-    ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    MUL2(z ,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(z ,0.0,c2,cc2,c1,cc1,t1,t2)
-
-    ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
-    MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
-    SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-    DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-
-      if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) { retval = (s*y); goto ret; }
-    retval = tanMp(x);
-    goto ret;
-  }
+      /* First stage */
+      x2 = x * x;
 
-  /* (---) The case 0.787 < abs(x) <= 25 */
-  if (w<=g4.d) {
-    /* Range reduction by algorithm i */
-    t = (x*hpinv.d + toint.d);
-    xn = t - toint.d;
-    v.d = t;
-    t1 = (x - xn*mp1.d) - xn*mp2.d;
-    n =v.i[LOW_HALF] & 0x00000001;
-    da = xn*mp3.d;
-    a=t1-da;
-    da = (t1-a)-da;
-    if (a<0.0)  {ya=-a;  yya=-da;  sy=MONE;}
-    else         {ya= a;  yya= da;  sy= ONE;}
-
-    /* (IV),(V) The case 0.787 < abs(x) <= 25,    abs(y) <= 1e-7 */
-    if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
-
-    /* (VI) The case 0.787 < abs(x) <= 25,    1e-7 < abs(y) <= 0.0608 */
-    if (ya<=gy2.d) {
-      a2 = a*a;
-      t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
-      if (n) {
-	/* First stage -cot */
-	EADD(a,t2,b,db)
-	DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-	if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) { retval = (-y); goto ret; } }
-      else {
-	/* First stage tan */
-	if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) { retval = y; goto ret; } }
-      /* Second stage */
-      /* Range reduction by algorithm ii */
-      t = (x*hpinv.d + toint.d);
-      xn = t - toint.d;
-      v.d = t;
-      t1 = (x - xn*mp1.d) - xn*mp2.d;
-      n =v.i[LOW_HALF] & 0x00000001;
-      da = xn*pp3.d;
-      t=t1-da;
-      da = (t1-t)-da;
-      t1 = xn*pp4.d;
-      a = t - t1;
-      da = ((t-a)-t1)+da;
+      t2 = d9.d + x2 * d11.d;
+      t2 = d7.d + x2 * t2;
+      t2 = d5.d + x2 * t2;
+      t2 = d3.d + x2 * t2;
+      t2 *= x * x2;
+
+      if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
+	{
+	  retval = y;
+	  goto ret;
+	}
 
       /* Second stage */
-      EADD(a,da,t1,t2)   a=t1;  da=t2;
-      MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
-      c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
-	   x2*a27.d))))));
-      ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a  ,da  ,c2,cc2,c1,cc1,t1,t2)
-
-      if (n) {
-	/* Second stage -cot */
-	DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-	if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) { retval = (-y); goto ret; } }
-      else {
-	/* Second stage tan */
-	if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) { retval = y; goto ret; } }
-      retval = tanMp(x);
+      c1 = a25.d + x2 * a27.d;
+      c1 = a23.d + x2 * c1;
+      c1 = a21.d + x2 * c1;
+      c1 = a19.d + x2 * c1;
+      c1 = a17.d + x2 * c1;
+      c1 = a15.d + x2 * c1;
+      c1 *= x2;
+
+      EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
+      ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
+      if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
+	{
+	  retval = y;
+	  goto ret;
+	}
+      retval = tanMp (x);
       goto ret;
     }
 
-    /* (VII) The case 0.787 < abs(x) <= 25,    0.0608 < abs(y) <= 0.787 */
+  /* (III) The case 0.0608 < abs(x) <= 0.787 */
+  if (w <= g3.d)
+    {
+
+      /* First stage */
+      i = ((int) (mfftnhf.d + TWO8 * w));
+      z = w - xfg[i][0].d;
+      z2 = z * z;
+      s = (x < 0.0) ? MONE : ONE;
+      pz = z + z * z2 * (e0.d + z2 * e1.d);
+      fi = xfg[i][1].d;
+      gi = xfg[i][2].d;
+      t2 = pz * (gi + fi) / (gi - pz);
+      if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
+	{
+	  retval = (s * y);
+	  goto ret;
+	}
+      t3 = (t2 < 0.0) ? -t2 : t2;
+      t4 = fi * ua3.d + t3 * ub3.d;
+      if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+	{
+	  retval = (s * y);
+	  goto ret;
+	}
 
-    /* First stage */
-    i = ((int) (mfftnhf.d+TWO8*ya));
-    z = (z0=(ya-xfg[i][0].d))+yya;  z2 = z*z;
-    pz = z+z*z2*(e0.d+z2*e1.d);
-    fi = xfg[i][1].d;   gi = xfg[i][2].d;
+      /* Second stage */
+      ffi = xfg[i][3].d;
+      c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+      EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
+      ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
+
+      ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+      SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+      DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+	    t10);
+
+      if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
+	{
+	  retval = (s * y);
+	  goto ret;
+	}
+      retval = tanMp (x);
+      goto ret;
+    }
 
-    if (n) {
-      /* -cot */
-      t2 = pz*(fi+gi)/(fi+pz);
-      if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) { retval = (-sy*y); goto ret; }
-      t3 = (t2<0.0) ? -t2 : t2;
-      t4 = gi*ua10.d+t3*ub10.d;
-      if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
-    else   {
-      /* tan */
-      t2 = pz*(gi+fi)/(gi-pz);
-      if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) { retval = (sy*y); goto ret; }
-      t3 = (t2<0.0) ? -t2 : t2;
-      t4 = fi*ua9.d+t3*ub9.d;
-      if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
-
-    /* Second stage */
-    ffi = xfg[i][3].d;
-    EADD(z0,yya,z,zz)
-    MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
-    c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
-    ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
-    ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
-    MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
-    SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
-    if (n) {
-      /* -cot */
-      DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) { retval = (-sy*y); goto ret; } }
-    else {
-      /* tan */
-      DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) { retval = (sy*y); goto ret; } }
+  /* (---) The case 0.787 < abs(x) <= 25 */
+  if (w <= g4.d)
+    {
+      /* Range reduction by algorithm i */
+      t = (x * hpinv.d + toint.d);
+      xn = t - toint.d;
+      v.d = t;
+      t1 = (x - xn * mp1.d) - xn * mp2.d;
+      n = v.i[LOW_HALF] & 0x00000001;
+      da = xn * mp3.d;
+      a = t1 - da;
+      da = (t1 - a) - da;
+      if (a < 0.0)
+	{
+	  ya = -a;
+	  yya = -da;
+	  sy = MONE;
+	}
+      else
+	{
+	  ya = a;
+	  yya = da;
+	  sy = ONE;
+	}
+
+      /* (IV),(V) The case 0.787 < abs(x) <= 25,    abs(y) <= 1e-7 */
+      if (ya <= gy1.d)
+	{
+	  retval = tanMp (x);
+	  goto ret;
+	}
+
+      /* (VI) The case 0.787 < abs(x) <= 25,    1e-7 < abs(y) <= 0.0608 */
+      if (ya <= gy2.d)
+	{
+	  a2 = a * a;
+	  t2 = d9.d + a2 * d11.d;
+	  t2 = d7.d + a2 * t2;
+	  t2 = d5.d + a2 * t2;
+	  t2 = d3.d + a2 * t2;
+	  t2 = da + a * a2 * t2;
+
+	  if (n)
+	    {
+	      /* First stage -cot */
+	      EADD (a, t2, b, db);
+	      DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
+		    t9, t10);
+	      if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
+		{
+		  retval = (-y);
+		  goto ret;
+		}
+	    }
+	  else
+	    {
+	      /* First stage tan */
+	      if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
+		{
+		  retval = y;
+		  goto ret;
+		}
+	    }
+	  /* Second stage */
+	  /* Range reduction by algorithm ii */
+	  t = (x * hpinv.d + toint.d);
+	  xn = t - toint.d;
+	  v.d = t;
+	  t1 = (x - xn * mp1.d) - xn * mp2.d;
+	  n = v.i[LOW_HALF] & 0x00000001;
+	  da = xn * pp3.d;
+	  t = t1 - da;
+	  da = (t1 - t) - da;
+	  t1 = xn * pp4.d;
+	  a = t - t1;
+	  da = ((t - a) - t1) + da;
+
+	  /* Second stage */
+	  EADD (a, da, t1, t2);
+	  a = t1;
+	  da = t2;
+	  MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+
+	  c1 = a25.d + x2 * a27.d;
+	  c1 = a23.d + x2 * c1;
+	  c1 = a21.d + x2 * c1;
+	  c1 = a19.d + x2 * c1;
+	  c1 = a17.d + x2 * c1;
+	  c1 = a15.d + x2 * c1;
+	  c1 *= x2;
+
+	  ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+	  if (n)
+	    {
+	      /* Second stage -cot */
+	      DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
+		    t8, t9, t10);
+	      if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
+		{
+		  retval = (-y);
+		  goto ret;
+		}
+	    }
+	  else
+	    {
+	      /* Second stage tan */
+	      if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
+		{
+		  retval = y;
+		  goto ret;
+		}
+	    }
+	  retval = tanMp (x);
+	  goto ret;
+	}
+
+      /* (VII) The case 0.787 < abs(x) <= 25,    0.0608 < abs(y) <= 0.787 */
+
+      /* First stage */
+      i = ((int) (mfftnhf.d + TWO8 * ya));
+      z = (z0 = (ya - xfg[i][0].d)) + yya;
+      z2 = z * z;
+      pz = z + z * z2 * (e0.d + z2 * e1.d);
+      fi = xfg[i][1].d;
+      gi = xfg[i][2].d;
+
+      if (n)
+	{
+	  /* -cot */
+	  t2 = pz * (fi + gi) / (fi + pz);
+	  if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	  t3 = (t2 < 0.0) ? -t2 : t2;
+	  t4 = gi * ua10.d + t3 * ub10.d;
+	  if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* tan */
+	  t2 = pz * (gi + fi) / (gi - pz);
+	  if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	  t3 = (t2 < 0.0) ? -t2 : t2;
+	  t4 = fi * ua9.d + t3 * ub9.d;
+	  if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	}
 
-    retval = tanMp(x);
-    goto ret;
-  }
+      /* Second stage */
+      ffi = xfg[i][3].d;
+      EADD (z0, yya, z, zz)
+	MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+      c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+      ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+      ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+      SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+      if (n)
+	{
+	  /* -cot */
+	  DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* tan */
+	  DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	}
+
+      retval = tanMp (x);
+      goto ret;
+    }
 
   /* (---) The case 25 < abs(x) <= 1e8 */
-  if (w<=g5.d) {
-    /* Range reduction by algorithm ii */
-    t = (x*hpinv.d + toint.d);
-    xn = t - toint.d;
-    v.d = t;
-    t1 = (x - xn*mp1.d) - xn*mp2.d;
-    n =v.i[LOW_HALF] & 0x00000001;
-    da = xn*pp3.d;
-    t=t1-da;
-    da = (t1-t)-da;
-    t1 = xn*pp4.d;
-    a = t - t1;
-    da = ((t-a)-t1)+da;
-    EADD(a,da,t1,t2)   a=t1;  da=t2;
-    if (a<0.0)  {ya=-a;  yya=-da;  sy=MONE;}
-    else         {ya= a;  yya= da;  sy= ONE;}
-
-    /* (+++) The case 25 < abs(x) <= 1e8,    abs(y) <= 1e-7 */
-    if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
-
-    /* (VIII) The case 25 < abs(x) <= 1e8,    1e-7 < abs(y) <= 0.0608 */
-    if (ya<=gy2.d) {
-      a2 = a*a;
-      t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
-      if (n) {
-	/* First stage -cot */
-	EADD(a,t2,b,db)
-	DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-	if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) { retval = (-y); goto ret; } }
-      else {
-	/* First stage tan */
-	if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) { retval = y; goto ret; } }
+  if (w <= g5.d)
+    {
+      /* Range reduction by algorithm ii */
+      t = (x * hpinv.d + toint.d);
+      xn = t - toint.d;
+      v.d = t;
+      t1 = (x - xn * mp1.d) - xn * mp2.d;
+      n = v.i[LOW_HALF] & 0x00000001;
+      da = xn * pp3.d;
+      t = t1 - da;
+      da = (t1 - t) - da;
+      t1 = xn * pp4.d;
+      a = t - t1;
+      da = ((t - a) - t1) + da;
+      EADD (a, da, t1, t2);
+      a = t1;
+      da = t2;
+      if (a < 0.0)
+	{
+	  ya = -a;
+	  yya = -da;
+	  sy = MONE;
+	}
+      else
+	{
+	  ya = a;
+	  yya = da;
+	  sy = ONE;
+	}
+
+      /* (+++) The case 25 < abs(x) <= 1e8,    abs(y) <= 1e-7 */
+      if (ya <= gy1.d)
+	{
+	  retval = tanMp (x);
+	  goto ret;
+	}
+
+      /* (VIII) The case 25 < abs(x) <= 1e8,    1e-7 < abs(y) <= 0.0608 */
+      if (ya <= gy2.d)
+	{
+	  a2 = a * a;
+	  t2 = d9.d + a2 * d11.d;
+	  t2 = d7.d + a2 * t2;
+	  t2 = d5.d + a2 * t2;
+	  t2 = d3.d + a2 * t2;
+	  t2 = da + a * a2 * t2;
+
+	  if (n)
+	    {
+	      /* First stage -cot */
+	      EADD (a, t2, b, db);
+	      DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
+		    t9, t10);
+	      if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
+		{
+		  retval = (-y);
+		  goto ret;
+		}
+	    }
+	  else
+	    {
+	      /* First stage tan */
+	      if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
+		{
+		  retval = y;
+		  goto ret;
+		}
+	    }
+
+	  /* Second stage */
+	  MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+	  c1 = a25.d + x2 * a27.d;
+	  c1 = a23.d + x2 * c1;
+	  c1 = a21.d + x2 * c1;
+	  c1 = a19.d + x2 * c1;
+	  c1 = a17.d + x2 * c1;
+	  c1 = a15.d + x2 * c1;
+	  c1 *= x2;
+
+	  ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+	  MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+	  if (n)
+	    {
+	      /* Second stage -cot */
+	      DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
+		    t8, t9, t10);
+	      if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
+		{
+		  retval = (-y);
+		  goto ret;
+		}
+	    }
+	  else
+	    {
+	      /* Second stage tan */
+	      if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
+		{
+		  retval = (y);
+		  goto ret;
+		}
+	    }
+	  retval = tanMp (x);
+	  goto ret;
+	}
+
+      /* (IX) The case 25 < abs(x) <= 1e8,    0.0608 < abs(y) <= 0.787 */
+      /* First stage */
+      i = ((int) (mfftnhf.d + TWO8 * ya));
+      z = (z0 = (ya - xfg[i][0].d)) + yya;
+      z2 = z * z;
+      pz = z + z * z2 * (e0.d + z2 * e1.d);
+      fi = xfg[i][1].d;
+      gi = xfg[i][2].d;
+
+      if (n)
+	{
+	  /* -cot */
+	  t2 = pz * (fi + gi) / (fi + pz);
+	  if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	  t3 = (t2 < 0.0) ? -t2 : t2;
+	  t4 = gi * ua18.d + t3 * ub18.d;
+	  if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* tan */
+	  t2 = pz * (gi + fi) / (gi - pz);
+	  if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	  t3 = (t2 < 0.0) ? -t2 : t2;
+	  t4 = fi * ua17.d + t3 * ub17.d;
+	  if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	}
 
       /* Second stage */
-      MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
-      c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
-	   x2*a27.d))))));
-      ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
-      MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-      MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(a  ,da  ,c2,cc2,c1,cc1,t1,t2)
-
-      if (n) {
-	/* Second stage -cot */
-	DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-	if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) { retval = (-y); goto ret; } }
-      else {
-	/* Second stage tan */
-	if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) { retval = (y); goto ret; } }
-      retval = tanMp(x);
+      ffi = xfg[i][3].d;
+      EADD (z0, yya, z, zz);
+      MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+      c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+      ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+      ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+      SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+      if (n)
+	{
+	  /* -cot */
+	  DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
+	    {
+	      retval = (-sy * y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* tan */
+	  DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
+	    {
+	      retval = (sy * y);
+	      goto ret;
+	    }
+	}
+      retval = tanMp (x);
       goto ret;
     }
 
-    /* (IX) The case 25 < abs(x) <= 1e8,    0.0608 < abs(y) <= 0.787 */
-    /* First stage */
-    i = ((int) (mfftnhf.d+TWO8*ya));
-    z = (z0=(ya-xfg[i][0].d))+yya;  z2 = z*z;
-    pz = z+z*z2*(e0.d+z2*e1.d);
-    fi = xfg[i][1].d;   gi = xfg[i][2].d;
-
-    if (n) {
-      /* -cot */
-      t2 = pz*(fi+gi)/(fi+pz);
-      if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) { retval = (-sy*y); goto ret; }
-      t3 = (t2<0.0) ? -t2 : t2;
-      t4 = gi*ua18.d+t3*ub18.d;
-      if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
-    else   {
-      /* tan */
-      t2 = pz*(gi+fi)/(gi-pz);
-      if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) { retval = (sy*y); goto ret; }
-      t3 = (t2<0.0) ? -t2 : t2;
-      t4 = fi*ua17.d+t3*ub17.d;
-      if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
-
-    /* Second stage */
-    ffi = xfg[i][3].d;
-    EADD(z0,yya,z,zz)
-    MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
-    c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
-    ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
-    MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
-    ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
-    MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
-    SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
-    if (n) {
-      /* -cot */
-      DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) { retval = (-sy*y); goto ret; } }
-    else {
-      /* tan */
-      DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) { retval = (sy*y); goto ret; } }
-    retval = tanMp(x);
-    goto ret;
-  }
-
   /* (---) The case 1e8 < abs(x) < 2**1024 */
   /* Range reduction by algorithm iii */
-  n = (__branred(x,&a,&da)) & 0x00000001;
-  EADD(a,da,t1,t2)   a=t1;  da=t2;
-  if (a<0.0)  {ya=-a;  yya=-da;  sy=MONE;}
-  else         {ya= a;  yya= da;  sy= ONE;}
+  n = (__branred (x, &a, &da)) & 0x00000001;
+  EADD (a, da, t1, t2);
+  a = t1;
+  da = t2;
+  if (a < 0.0)
+    {
+      ya = -a;
+      yya = -da;
+      sy = MONE;
+    }
+  else
+    {
+      ya = a;
+      yya = da;
+      sy = ONE;
+    }
 
   /* (+++) The case 1e8 < abs(x) < 2**1024,    abs(y) <= 1e-7 */
-  if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
+  if (ya <= gy1.d)
+    {
+      retval = tanMp (x);
+      goto ret;
+    }
 
   /* (X) The case 1e8 < abs(x) < 2**1024,    1e-7 < abs(y) <= 0.0608 */
-  if (ya<=gy2.d) {
-    a2 = a*a;
-    t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
-    if (n) {
-      /* First stage -cot */
-      EADD(a,t2,b,db)
-      DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) { retval = (-y); goto ret; } }
-    else {
-      /* First stage tan */
-      if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) { retval = y; goto ret; } }
-
-    /* Second stage */
-    /* Reduction by algorithm iv */
-    p=10;    n = (__mpranred(x,&mpa,p)) & 0x00000001;
-    __mp_dbl(&mpa,&a,p);        __dbl_mp(a,&mpt1,p);
-    __sub(&mpa,&mpt1,&mpt2,p);  __mp_dbl(&mpt2,&da,p);
-
-    MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
-    c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
-	 x2*a27.d))))));
-    ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
-    MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-    MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-    ADD2(a    ,da    ,c2,cc2,c1,cc1,t1,t2)
-
-    if (n) {
-      /* Second stage -cot */
-      DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) { retval = (-y); goto ret; } }
-    else {
-      /* Second stage tan */
-      if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) { retval = y; goto ret; } }
-    retval = tanMp(x);
-    goto ret;
-  }
+  if (ya <= gy2.d)
+    {
+      a2 = a * a;
+      t2 = d9.d + a2 * d11.d;
+      t2 = d7.d + a2 * t2;
+      t2 = d5.d + a2 * t2;
+      t2 = d3.d + a2 * t2;
+      t2 = da + a * a2 * t2;
+      if (n)
+	{
+	  /* First stage -cot */
+	  EADD (a, t2, b, db);
+	  DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
+	    {
+	      retval = (-y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* First stage tan */
+	  if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
+	    {
+	      retval = y;
+	      goto ret;
+	    }
+	}
+
+      /* Second stage */
+      /* Reduction by algorithm iv */
+      p = 10;
+      n = (__mpranred (x, &mpa, p)) & 0x00000001;
+      __mp_dbl (&mpa, &a, p);
+      __dbl_mp (a, &mpt1, p);
+      __sub (&mpa, &mpt1, &mpt2, p);
+      __mp_dbl (&mpt2, &da, p);
+
+      MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+
+      c1 = a25.d + x2 * a27.d;
+      c1 = a23.d + x2 * c1;
+      c1 = a21.d + x2 * c1;
+      c1 = a19.d + x2 * c1;
+      c1 = a17.d + x2 * c1;
+      c1 = a15.d + x2 * c1;
+      c1 *= x2;
+
+      ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+      MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+      MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+      ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+      if (n)
+	{
+	  /* Second stage -cot */
+	  DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
+		t9, t10);
+	  if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
+	    {
+	      retval = (-y);
+	      goto ret;
+	    }
+	}
+      else
+	{
+	  /* Second stage tan */
+	  if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
+	    {
+	      retval = y;
+	      goto ret;
+	    }
+	}
+      retval = tanMp (x);
+      goto ret;
+    }
 
   /* (XI) The case 1e8 < abs(x) < 2**1024,    0.0608 < abs(y) <= 0.787 */
   /* First stage */
-  i = ((int) (mfftnhf.d+TWO8*ya));
-  z = (z0=(ya-xfg[i][0].d))+yya;  z2 = z*z;
-  pz = z+z*z2*(e0.d+z2*e1.d);
-  fi = xfg[i][1].d;   gi = xfg[i][2].d;
-
-  if (n) {
-    /* -cot */
-    t2 = pz*(fi+gi)/(fi+pz);
-    if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) { retval = (-sy*y); goto ret; }
-    t3 = (t2<0.0) ? -t2 : t2;
-    t4 = gi*ua26.d+t3*ub26.d;
-    if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
-  else   {
-    /* tan */
-    t2 = pz*(gi+fi)/(gi-pz);
-    if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) { retval = (sy*y); goto ret; }
-    t3 = (t2<0.0) ? -t2 : t2;
-    t4 = fi*ua25.d+t3*ub25.d;
-    if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
+  i = ((int) (mfftnhf.d + TWO8 * ya));
+  z = (z0 = (ya - xfg[i][0].d)) + yya;
+  z2 = z * z;
+  pz = z + z * z2 * (e0.d + z2 * e1.d);
+  fi = xfg[i][1].d;
+  gi = xfg[i][2].d;
+
+  if (n)
+    {
+      /* -cot */
+      t2 = pz * (fi + gi) / (fi + pz);
+      if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
+	{
+	  retval = (-sy * y);
+	  goto ret;
+	}
+      t3 = (t2 < 0.0) ? -t2 : t2;
+      t4 = gi * ua26.d + t3 * ub26.d;
+      if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+	{
+	  retval = (-sy * y);
+	  goto ret;
+	}
+    }
+  else
+    {
+      /* tan */
+      t2 = pz * (gi + fi) / (gi - pz);
+      if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
+	{
+	  retval = (sy * y);
+	  goto ret;
+	}
+      t3 = (t2 < 0.0) ? -t2 : t2;
+      t4 = fi * ua25.d + t3 * ub25.d;
+      if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+	{
+	  retval = (sy * y);
+	  goto ret;
+	}
+    }
 
   /* Second stage */
   ffi = xfg[i][3].d;
-  EADD(z0,yya,z,zz)
-  MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
-  c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
-  ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
-  MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
-  MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
-  MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
-  ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
-  MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
-  SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
-  if (n) {
-    /* -cot */
-    DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-    if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) { retval = (-sy*y); goto ret; } }
-  else {
-    /* tan */
-    DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-    if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) { retval = (sy*y); goto ret; } }
-  retval = tanMp(x);
+  EADD (z0, yya, z, zz);
+  MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+  c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+  ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+  MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+  MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+  ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+  MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+  SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+  if (n)
+    {
+      /* -cot */
+      DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+	    t10);
+      if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
+	{
+	  retval = (-sy * y);
+	  goto ret;
+	}
+    }
+  else
+    {
+      /* tan */
+      DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+	    t10);
+      if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
+	{
+	  retval = (sy * y);
+	  goto ret;
+	}
+    }
+  retval = tanMp (x);
   goto ret;
 
- ret:
+ret:
   return retval;
 }
 
@@ -507,14 +830,14 @@ tan(double x) {
 /* and converts result back to double                                    */
 static double
 SECTION
-tanMp(double x)
+tanMp (double x)
 {
   int p;
   double y;
   mp_no mpy;
-  p=32;
-  __mptan(x, &mpy, p);
-  __mp_dbl(&mpy,&y,p);
+  p = 32;
+  __mptan (x, &mpy, p);
+  __mp_dbl (&mpy, &y, p);
   return y;
 }
 

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=a64d7e0efb34bde344bc1d4d17646a8ae6b6d588

commit a64d7e0efb34bde344bc1d4d17646a8ae6b6d588
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
Date:   Fri Mar 29 16:37:26 2013 +0530

    Remove ZERO and MZERO macros

diff --git a/ChangeLog b/ChangeLog
index cf2cb41..2ecbced 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,15 @@
 2013-03-29  Siddhesh Poyarekar  <siddhesh@redhat.com>
 
+	* sysdeps/ieee754/dbl-64/mpa.h (ZERO, MZERO): Remove defines.
+	(__pow_mp): Replace ZERO and MZERO with their values.
+	* sysdeps/ieee754/dbl-64/e_atan2.c (__ieee754_atan2): Likewise.
+	* sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+	* sysdeps/ieee754/dbl-64/mpatan2.c (__mpatan2): Likewise.
+	* sysdeps/ieee754/dbl-64/mpexp.c (__mpexp): Likewise.
+	* sysdeps/ieee754/dbl-64/s_atan.c (atan): Likewise.
+	* sysdeps/powerpc/power4/fpu/mpa.c (__mul): Likewise.
+	(__sqr): Likewise.
+
 	* sysdeps/ieee754/dbl-64/s_atan.c: Fix formatting.
 
 	* sysdeps/ieee754/dbl-64/e_log.c: Fix formatting.
diff --git a/sysdeps/ieee754/dbl-64/e_atan2.c b/sysdeps/ieee754/dbl-64/e_atan2.c
index bfe0b3b..4ebe9c0 100644
--- a/sysdeps/ieee754/dbl-64/e_atan2.c
+++ b/sysdeps/ieee754/dbl-64/e_atan2.c
@@ -104,7 +104,7 @@ __ieee754_atan2 (double y, double x)
       if (dy == 0x00000000)
 	{
 	  if ((ux & 0x80000000) == 0x00000000)
-	    return ZERO;
+	    return 0;
 	  else
 	    return opi.d;
 	}
@@ -114,14 +114,14 @@ __ieee754_atan2 (double y, double x)
       if (dy == 0x00000000)
 	{
 	  if ((ux & 0x80000000) == 0x00000000)
-	    return MZERO;
+	    return -0.0;
 	  else
 	    return mopi.d;
 	}
     }
 
   /* x=+-0 */
-  if (x == ZERO)
+  if (x == 0)
     {
       if ((uy & 0x80000000) == 0x00000000)
 	return hpi.d;
@@ -147,9 +147,9 @@ __ieee754_atan2 (double y, double x)
 	  else
 	    {
 	      if ((uy & 0x80000000) == 0x00000000)
-		return ZERO;
+		return 0;
 	      else
-		return MZERO;
+		return -0.0;
 	    }
 	}
     }
@@ -190,16 +190,16 @@ __ieee754_atan2 (double y, double x)
     }
 
   /* either x/y or y/x is very close to zero */
-  ax = (x < ZERO) ? -x : x;
-  ay = (y < ZERO) ? -y : y;
+  ax = (x < 0) ? -x : x;
+  ay = (y < 0) ? -y : y;
   de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
   if (de >= ep)
     {
-      return ((y > ZERO) ? hpi.d : mhpi.d);
+      return ((y > 0) ? hpi.d : mhpi.d);
     }
   else if (de <= em)
     {
-      if (x > ZERO)
+      if (x > 0)
 	{
 	  if ((z = ay / ax) < TWOM1022)
 	    return normalized (ax, ay, y, z);
@@ -208,7 +208,7 @@ __ieee754_atan2 (double y, double x)
 	}
       else
 	{
-	  return ((y > ZERO) ? opi.d : mopi.d);
+	  return ((y > 0) ? opi.d : mopi.d);
 	}
     }
 
@@ -240,7 +240,7 @@ __ieee754_atan2 (double y, double x)
       du = ((ax - v) - vv) / ay;
     }
 
-  if (x > ZERO)
+  if (x > 0)
     {
       /* (i)   x>0, abs(y)< abs(x):  atan(ay/ax) */
       if (ay < ax)
@@ -262,7 +262,7 @@ __ieee754_atan2 (double y, double x)
 	      MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
 	      s1 = v * (f11.d + v * (f13.d
 				     + v * (f15.d + v * (f17.d + v * f19.d))));
-	      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	      ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	      ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -314,7 +314,7 @@ __ieee754_atan2 (double y, double x)
 			   +  v * (hij[i][13].d
 				   + v * (hij[i][14].d
 					  + v * hij[i][15].d))));
-	  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+	  ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -348,7 +348,7 @@ __ieee754_atan2 (double y, double x)
 	  s1 = v * (f11.d
 		    + v * (f13.d
 			   + v * (f15.d + v * (f17.d + v * f19.d))));
-	  ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	  ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -391,7 +391,7 @@ __ieee754_atan2 (double y, double x)
 			      + v * (hij[i][14].d
 				     + v * hij[i][15].d))));
 
-      ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+      ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
       ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -426,7 +426,7 @@ __ieee754_atan2 (double y, double x)
 	  MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
 	  s1 = v * (f11.d
 		    + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
-	  ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	  ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
 	  MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -466,7 +466,7 @@ __ieee754_atan2 (double y, double x)
 		       + v * (hij[i][13].d
 			      + v * (hij[i][14].d
 				     + v * hij[i][15].d))));
-      ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+      ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
       ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -497,7 +497,7 @@ __ieee754_atan2 (double y, double x)
 
       MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
       s1 = v * (f11.d + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
-      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+      ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
       ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
       MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -538,7 +538,7 @@ __ieee754_atan2 (double y, double x)
 		   + v * (hij[i][13].d
 			  + v * (hij[i][14].d + v * hij[i][15].d))));
 
-  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+  ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
   MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
   MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
diff --git a/sysdeps/ieee754/dbl-64/e_log.c b/sysdeps/ieee754/dbl-64/e_log.c
index d3b5e6e..f8d5a19 100644
--- a/sysdeps/ieee754/dbl-64/e_log.c
+++ b/sysdeps/ieee754/dbl-64/e_log.c
@@ -79,9 +79,9 @@ __ieee754_log (double x)
   if (__builtin_expect (ux < 0x00100000, 0))
     {
       if (__builtin_expect (((ux & 0x7fffffff) | dx) == 0, 0))
-	return MHALF / ZERO;	/* return -INF */
+	return MHALF / 0.0;	/* return -INF */
       if (__builtin_expect (ux < 0, 0))
-	return (x - x) / ZERO;	/* return NaN  */
+	return (x - x) / 0.0;	/* return NaN  */
       n -= 54;
       x *= two54.d;		/* scale x     */
       num.d = x;
@@ -130,25 +130,25 @@ __ieee754_log (double x)
 
   EMULV (w, a, s2, ss2, t1, t2, t3, t4, t5);
   ADD2 (d10.d, dd10.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d9.d, dd9.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d8.d, dd8.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d7.d, dd7.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d6.d, dd6.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d5.d, dd5.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d4.d, dd4.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d3.d, dd3.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
   ADD2 (d2.d, dd2.d, s2, ss2, s3, ss3, t1, t2);
-  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
-  MUL2 (w, ZERO, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
-  ADD2 (w, ZERO, s3, ss3, b, bb, t1, t2);
+  MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, 0, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (w, 0, s3, ss3, b, bb, t1, t2);
 
   /* End stage II, case abs(x-1) < 0.03 */
   if ((y = b + (bb + b * E4)) == b + (bb - b * E4))
@@ -210,7 +210,7 @@ case_03:
   EADD (r0, t, ra, rb);
 
   /* Compute w */
-  MUL2 (q, ZERO, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (q, 0, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
 
   EADD (A, B0, a0, aa0);
 
diff --git a/sysdeps/ieee754/dbl-64/mpa.h b/sysdeps/ieee754/dbl-64/mpa.h
index 54044a0..27bc7dc 100644
--- a/sysdeps/ieee754/dbl-64/mpa.h
+++ b/sysdeps/ieee754/dbl-64/mpa.h
@@ -91,8 +91,6 @@ extern const mp_no mptwo;
 # define  TWO52     0x1.0p52		/* 2^52    */
 #endif
 
-#define  ZERO      0.0			/* 0       */
-#define  MZERO     -0.0			/* 0 with the sign bit set */
 #define  ONE       1.0			/* 1       */
 #define  MONE      -1.0			/* -1      */
 #define  TWO       2.0			/*  2      */
@@ -155,7 +153,7 @@ __pow_mp (int pow, mp_no *y, int p)
   Y[0] = ONE;
   Y[1] = 1 << rem;
 
-  /* Everything else is ZERO.  */
+  /* Everything else is 0.  */
   for (i = 2; i <= p; i++)
-    Y[i] = ZERO;
+    Y[i] = 0;
 }
diff --git a/sysdeps/ieee754/dbl-64/mpatan2.c b/sysdeps/ieee754/dbl-64/mpatan2.c
index d29c2fb..f4aa9db 100644
--- a/sysdeps/ieee754/dbl-64/mpatan2.c
+++ b/sysdeps/ieee754/dbl-64/mpatan2.c
@@ -46,11 +46,11 @@ __mpatan2 (mp_no *y, mp_no *x, mp_no *z, int p)
 {
   mp_no mpt1, mpt2, mpt3;
 
-  if (X[0] <= ZERO)
+  if (X[0] <= 0)
     {
       __dvd (x, y, &mpt1, p);
       __mul (&mpt1, &mpt1, &mpt2, p);
-      if (mpt1.d[0] != ZERO)
+      if (mpt1.d[0] != 0)
 	mpt1.d[0] = ONE;
       __add (&mpt2, &mpone, &mpt3, p);
       __mpsqrt (&mpt3, &mpt2, p);
diff --git a/sysdeps/ieee754/dbl-64/mpexp.c b/sysdeps/ieee754/dbl-64/mpexp.c
index 565c6c8..a219401 100644
--- a/sysdeps/ieee754/dbl-64/mpexp.c
+++ b/sysdeps/ieee754/dbl-64/mpexp.c
@@ -85,7 +85,7 @@ __mpexp (mp_no *x, mp_no *y, int p)
     {
       for (i = 2; i <= p; i++)
 	{
-	  if (X[i] != ZERO)
+	  if (X[i] != 0)
 	    break;
 	}
       if (i == p + 1)
diff --git a/sysdeps/ieee754/dbl-64/s_atan.c b/sysdeps/ieee754/dbl-64/s_atan.c
index dc1716f..e3c4be7 100644
--- a/sysdeps/ieee754/dbl-64/s_atan.c
+++ b/sysdeps/ieee754/dbl-64/s_atan.c
@@ -78,7 +78,7 @@ atan (double x)
     return x + x;
 
   /* Regular values of x, including denormals +-0 and +-INF */
-  u = (x < ZERO) ? -x : x;
+  u = (x < 0) ? -x : x;
   if (u < C)
     {
       if (u < B)
@@ -106,7 +106,7 @@ atan (double x)
 	      s1 = f11.d + v * s1;
 	      s1 *= v;
 
-	      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	      ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	      ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -114,9 +114,9 @@ atan (double x)
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	      ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
-	      MUL2 (x, ZERO, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
+	      MUL2 (x, 0, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
 		    t8);
-	      ADD2 (x, ZERO, s2, ss2, s1, ss1, t1, t2);
+	      ADD2 (x, 0, s2, ss2, s1, ss1, t1, t2);
 	      if ((y = s1 + (ss1 - U5 * s1)) == s1 + (ss1 + U5 * s1))
 		return y;
 
@@ -160,14 +160,14 @@ atan (double x)
 	  s1 = hij[i][11].d + z * s1;
 	  s1 *= z;
 
-	  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
-	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
+	  MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
-	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
-	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
-	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
 	  if ((y = s2 + (ss2 - U6 * s2)) == s2 + (ss2 + U6 * s2))
 	    return __signArctan (x, y);
@@ -200,7 +200,7 @@ atan (double x)
 	  if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
 	    return __signArctan (x, y);
 
-	  DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+	  DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
 		t10);
 	  t1 = w - hij[i][0].d;
 	  EADD (t1, ww, z, zz);
@@ -211,7 +211,7 @@ atan (double x)
 	  s1 = hij[i][11].d + z * s1;
 	  s1 *= z;
 
-	  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+	  ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
 	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	  ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
 	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -247,7 +247,7 @@ atan (double x)
 	      if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
 		return __signArctan (x, y);
 
-	      DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
+	      DIV2 (ONE, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
 		    t9, t10);
 	      MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
 
@@ -257,7 +257,7 @@ atan (double x)
 	      s1 = f11.d + v * s1;
 	      s1 *= v;
 
-	      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	      ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
 	      ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
 	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
diff --git a/sysdeps/powerpc/power4/fpu/mpa.c b/sysdeps/powerpc/power4/fpu/mpa.c
index 1858c97..9d4d644 100644
--- a/sysdeps/powerpc/power4/fpu/mpa.c
+++ b/sysdeps/powerpc/power4/fpu/mpa.c
@@ -35,15 +35,15 @@ __mul (const mp_no *x, const mp_no *y, mp_no *z, int p)
   double u, zk, zk2;
 
   /* Is z=0?  */
-  if (__glibc_unlikely (X[0] * Y[0] == ZERO))
+  if (__glibc_unlikely (X[0] * Y[0] == 0))
     {
-      Z[0] = ZERO;
+      Z[0] = 0;
       return;
     }
 
   /* Multiply, add and carry */
   k2 = (p2 < 3) ? p2 + p2 : p2 + 3;
-  zk = Z[k2] = ZERO;
+  zk = Z[k2] = 0;
   for (k = k2; k > 1;)
     {
       if (k > p2)
@@ -101,7 +101,7 @@ __mul (const mp_no *x, const mp_no *y, mp_no *z, int p)
 
   int e = EX + EY;
   /* Is there a carry beyond the most significant digit?  */
-  if (Z[1] == ZERO)
+  if (Z[1] == 0)
     {
       for (i = 1; i <= p2; i++)
 	Z[i] = Z[i + 1];
@@ -123,24 +123,24 @@ __sqr (const mp_no *x, mp_no *y, int p)
   double u, yk;
 
   /* Is z=0?  */
-  if (__glibc_unlikely (X[0] == ZERO))
+  if (__glibc_unlikely (X[0] == 0))
     {
-      Y[0] = ZERO;
+      Y[0] = 0;
       return;
     }
 
   /* We need not iterate through all X's since it's pointless to
      multiply zeroes.  */
   for (ip = p; ip > 0; ip--)
-    if (X[ip] != ZERO)
+    if (X[ip] != 0)
       break;
 
   k = (__glibc_unlikely (p < 3)) ? p + p : p + 3;
 
   while (k > 2 * ip + 1)
-    Y[k--] = ZERO;
+    Y[k--] = 0;
 
-  yk = ZERO;
+  yk = 0;
 
   while (k > p)
     {
@@ -204,7 +204,7 @@ __sqr (const mp_no *x, mp_no *y, int p)
 
   int e = EX * 2;
   /* Is there a carry beyond the most significant digit?  */
-  if (__glibc_unlikely (Y[1] == ZERO))
+  if (__glibc_unlikely (Y[1] == 0))
     {
       for (i = 1; i <= p; i++)
 	Y[i] = Y[i + 1];

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d26dd3eb006e813d5f81bd7a5f02d70d480de8c4

commit d26dd3eb006e813d5f81bd7a5f02d70d480de8c4
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
Date:   Fri Mar 29 16:34:28 2013 +0530

    Format s_atan.c

diff --git a/ChangeLog b/ChangeLog
index a6d5d02..cf2cb41 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,7 @@
 2013-03-29  Siddhesh Poyarekar  <siddhesh@redhat.com>
 
+	* sysdeps/ieee754/dbl-64/s_atan.c: Fix formatting.
+
 	* sysdeps/ieee754/dbl-64/e_log.c: Fix formatting.
 
 2013-03-28  Roland McGrath  <roland@hack.frob.com>
diff --git a/sysdeps/ieee754/dbl-64/s_atan.c b/sysdeps/ieee754/dbl-64/s_atan.c
index aa3564d..dc1716f 100644
--- a/sysdeps/ieee754/dbl-64/s_atan.c
+++ b/sysdeps/ieee754/dbl-64/s_atan.c
@@ -43,177 +43,272 @@
 #include "atnat.h"
 #include <math.h>
 
-void __mpatan(mp_no *,mp_no *,int);          /* see definition in mpatan.c */
-static double atanMp(double,const int[]);
+void __mpatan (mp_no *, mp_no *, int);	/* see definition in mpatan.c */
+static double atanMp (double, const int[]);
 
   /* Fix the sign of y and return */
-static double  __signArctan(double x,double y){
-  return __copysign(y, x);
+static double
+__signArctan (double x, double y)
+{
+  return __copysign (y, x);
 }
 
 
 /* An ultimate atan() routine. Given an IEEE double machine number x,    */
 /* routine computes the correctly rounded (to nearest) value of atan(x). */
-double atan(double x) {
-
-
-  double cor,s1,ss1,s2,ss2,t1,t2,t3,t7,t8,t9,t10,u,u2,u3,
-	 v,vv,w,ww,y,yy,z,zz;
+double
+atan (double x)
+{
+  double cor, s1, ss1, s2, ss2, t1, t2, t3, t7, t8, t9, t10, u, u2, u3,
+    v, vv, w, ww, y, yy, z, zz;
 #ifndef DLA_FMS
-  double t4,t5,t6;
+  double t4, t5, t6;
 #endif
-  int i,ux,dx;
-  static const int pr[M]={6,8,10,32};
+  int i, ux, dx;
+  static const int pr[M] = { 6, 8, 10, 32 };
   number num;
 
-  num.d = x;  ux = num.i[HIGH_HALF];  dx = num.i[LOW_HALF];
+  num.d = x;
+  ux = num.i[HIGH_HALF];
+  dx = num.i[LOW_HALF];
 
   /* x=NaN */
-  if (((ux&0x7ff00000)==0x7ff00000) && (((ux&0x000fffff)|dx)!=0x00000000))
-    return x+x;
+  if (((ux & 0x7ff00000) == 0x7ff00000)
+      && (((ux & 0x000fffff) | dx) != 0x00000000))
+    return x + x;
 
   /* Regular values of x, including denormals +-0 and +-INF */
-  u = (x<ZERO) ? -x : x;
-  if (u<C) {
-    if (u<B) {
-      if (u<A) {                                           /* u < A */
-	 return x; }
-      else {                                               /* A <= u < B */
-	v=x*x;  yy=x*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
-	if ((y=x+(yy-U1*x)) == x+(yy+U1*x))  return y;
-
-	EMULV(x,x,v,vv,t1,t2,t3,t4,t5)                       /* v+vv=x^2 */
-	s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
-	ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	MUL2(x,ZERO,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(x,ZERO,s2,ss2,s1,ss1,t1,t2)
-	if ((y=s1+(ss1-U5*s1)) == s1+(ss1+U5*s1))  return y;
-
-	return atanMp(x,pr);
-      } }
-    else {  /* B <= u < C */
-      i=(TWO52+TWO8*u)-TWO52;  i-=16;
-      z=u-cij[i][0].d;
-      yy=z*(cij[i][2].d+z*(cij[i][3].d+z*(cij[i][4].d+
-			z*(cij[i][5].d+z* cij[i][6].d))));
-      t1=cij[i][1].d;
-      if (i<112) {
-	if (i<48)  u2=U21;    /* u < 1/4        */
-	else       u2=U22; }  /* 1/4 <= u < 1/2 */
-      else {
-	if (i<176) u2=U23;    /* 1/2 <= u < 3/4 */
-	else       u2=U24; }  /* 3/4 <= u <= 1  */
-      if ((y=t1+(yy-u2*t1)) == t1+(yy+u2*t1))  return __signArctan(x,y);
-
-      z=u-hij[i][0].d;
-      s1=z*(hij[i][11].d+z*(hij[i][12].d+z*(hij[i][13].d+
-	 z*(hij[i][14].d+z* hij[i][15].d))));
-      ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
-      MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
-      if ((y=s2+(ss2-U6*s2)) == s2+(ss2+U6*s2))  return __signArctan(x,y);
-
-      return atanMp(x,pr);
-    }
-  }
-  else {
-    if (u<D) { /* C <= u < D */
-      w=ONE/u;
-      EMULV(w,u,t1,t2,t3,t4,t5,t6,t7)
-      ww=w*((ONE-t1)-t2);
-      i=(TWO52+TWO8*w)-TWO52;  i-=16;
-      z=(w-cij[i][0].d)+ww;
-      yy=HPI1-z*(cij[i][2].d+z*(cij[i][3].d+z*(cij[i][4].d+
-			     z*(cij[i][5].d+z* cij[i][6].d))));
-      t1=HPI-cij[i][1].d;
-      if (i<112)  u3=U31;  /* w <  1/2 */
-      else        u3=U32;  /* w >= 1/2 */
-      if ((y=t1+(yy-u3)) == t1+(yy+u3))  return __signArctan(x,y);
-
-      DIV2(ONE,ZERO,u,ZERO,w,ww,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-      t1=w-hij[i][0].d;
-      EADD(t1,ww,z,zz)
-      s1=z*(hij[i][11].d+z*(hij[i][12].d+z*(hij[i][13].d+
-	 z*(hij[i][14].d+z* hij[i][15].d))));
-      ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
-      MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
-      MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-      ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
-      SUB2(HPI,HPI1,s2,ss2,s1,ss1,t1,t2)
-      if ((y=s1+(ss1-U7)) == s1+(ss1+U7))  return __signArctan(x,y);
-
-    return atanMp(x,pr);
-    }
-    else {
-      if (u<E) { /* D <= u < E */
-	w=ONE/u;   v=w*w;
-	EMULV(w,u,t1,t2,t3,t4,t5,t6,t7)
-	yy=w*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
-	ww=w*((ONE-t1)-t2);
-	ESUB(HPI,w,t3,cor)
-	yy=((HPI1+cor)-ww)-yy;
-	if ((y=t3+(yy-U4)) == t3+(yy+U4))  return __signArctan(x,y);
-
-	DIV2(ONE,ZERO,u,ZERO,w,ww,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-	MUL2(w,ww,w,ww,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
-	s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
-	ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
-	MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
-	MUL2(w,ww,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-	ADD2(w,ww,s2,ss2,s1,ss1,t1,t2)
-	SUB2(HPI,HPI1,s1,ss1,s2,ss2,t1,t2)
-	if ((y=s2+(ss2-U8)) == s2+(ss2+U8))  return __signArctan(x,y);
-
-      return atanMp(x,pr);
-      }
-      else {
-	/* u >= E */
-	if (x>0) return  HPI;
-	else     return MHPI; }
+  u = (x < ZERO) ? -x : x;
+  if (u < C)
+    {
+      if (u < B)
+	{
+	  if (u < A)
+	    return x;
+	  else
+	    {			/* A <= u < B */
+	      v = x * x;
+	      yy = d11.d + v * d13.d;
+	      yy = d9.d + v * yy;
+	      yy = d7.d + v * yy;
+	      yy = d5.d + v * yy;
+	      yy = d3.d + v * yy;
+	      yy *= x * v;
+
+	      if ((y = x + (yy - U1 * x)) == x + (yy + U1 * x))
+		return y;
+
+	      EMULV (x, x, v, vv, t1, t2, t3, t4, t5);	/* v+vv=x^2 */
+
+	      s1 = f17.d + v * f19.d;
+	      s1 = f15.d + v * s1;
+	      s1 = f13.d + v * s1;
+	      s1 = f11.d + v * s1;
+	      s1 *= v;
+
+	      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      MUL2 (x, ZERO, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
+		    t8);
+	      ADD2 (x, ZERO, s2, ss2, s1, ss1, t1, t2);
+	      if ((y = s1 + (ss1 - U5 * s1)) == s1 + (ss1 + U5 * s1))
+		return y;
+
+	      return atanMp (x, pr);
+	    }
+	}
+      else
+	{			/* B <= u < C */
+	  i = (TWO52 + TWO8 * u) - TWO52;
+	  i -= 16;
+	  z = u - cij[i][0].d;
+	  yy = cij[i][5].d + z * cij[i][6].d;
+	  yy = cij[i][4].d + z * yy;
+	  yy = cij[i][3].d + z * yy;
+	  yy = cij[i][2].d + z * yy;
+	  yy *= z;
+
+	  t1 = cij[i][1].d;
+	  if (i < 112)
+	    {
+	      if (i < 48)
+		u2 = U21;	/* u < 1/4        */
+	      else
+		u2 = U22;
+	    }			/* 1/4 <= u < 1/2 */
+	  else
+	    {
+	      if (i < 176)
+		u2 = U23;	/* 1/2 <= u < 3/4 */
+	      else
+		u2 = U24;
+	    }			/* 3/4 <= u <= 1  */
+	  if ((y = t1 + (yy - u2 * t1)) == t1 + (yy + u2 * t1))
+	    return __signArctan (x, y);
+
+	  z = u - hij[i][0].d;
+
+	  s1 = hij[i][14].d + z * hij[i][15].d;
+	  s1 = hij[i][13].d + z * s1;
+	  s1 = hij[i][12].d + z * s1;
+	  s1 = hij[i][11].d + z * s1;
+	  s1 *= z;
+
+	  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, ZERO, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
+	  if ((y = s2 + (ss2 - U6 * s2)) == s2 + (ss2 + U6 * s2))
+	    return __signArctan (x, y);
+
+	  return atanMp (x, pr);
+	}
     }
-  }
+  else
+    {
+      if (u < D)
+	{			/* C <= u < D */
+	  w = ONE / u;
+	  EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
+	  ww = w * ((ONE - t1) - t2);
+	  i = (TWO52 + TWO8 * w) - TWO52;
+	  i -= 16;
+	  z = (w - cij[i][0].d) + ww;
+
+	  yy = cij[i][5].d + z * cij[i][6].d;
+	  yy = cij[i][4].d + z * yy;
+	  yy = cij[i][3].d + z * yy;
+	  yy = cij[i][2].d + z * yy;
+	  yy  = HPI1 - z * yy;
 
+	  t1 = HPI - cij[i][1].d;
+	  if (i < 112)
+	    u3 = U31;		/* w <  1/2 */
+	  else
+	    u3 = U32;		/* w >= 1/2 */
+	  if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
+	    return __signArctan (x, y);
+
+	  DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+		t10);
+	  t1 = w - hij[i][0].d;
+	  EADD (t1, ww, z, zz);
+
+	  s1 = hij[i][14].d + z * hij[i][15].d;
+	  s1 = hij[i][13].d + z * s1;
+	  s1 = hij[i][12].d + z * s1;
+	  s1 = hij[i][11].d + z * s1;
+	  s1 *= z;
+
+	  ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
+	  MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	  ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
+	  SUB2 (HPI, HPI1, s2, ss2, s1, ss1, t1, t2);
+	  if ((y = s1 + (ss1 - U7)) == s1 + (ss1 + U7))
+	    return __signArctan (x, y);
+
+	  return atanMp (x, pr);
+	}
+      else
+	{
+	  if (u < E)
+	    {			/* D <= u < E */
+	      w = ONE / u;
+	      v = w * w;
+	      EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
+
+	      yy = d11.d + v * d13.d;
+	      yy = d9.d + v * yy;
+	      yy = d7.d + v * yy;
+	      yy = d5.d + v * yy;
+	      yy = d3.d + v * yy;
+	      yy *= w * v;
+
+	      ww = w * ((ONE - t1) - t2);
+	      ESUB (HPI, w, t3, cor);
+	      yy = ((HPI1 + cor) - ww) - yy;
+	      if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
+		return __signArctan (x, y);
+
+	      DIV2 (ONE, ZERO, u, ZERO, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
+		    t9, t10);
+	      MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
+
+	      s1 = f17.d + v * f19.d;
+	      s1 = f15.d + v * s1;
+	      s1 = f13.d + v * s1;
+	      s1 = f11.d + v * s1;
+	      s1 *= v;
+
+	      ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
+	      MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+	      MUL2 (w, ww, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+	      ADD2 (w, ww, s2, ss2, s1, ss1, t1, t2);
+	      SUB2 (HPI, HPI1, s1, ss1, s2, ss2, t1, t2);
+
+	      if ((y = s2 + (ss2 - U8)) == s2 + (ss2 + U8))
+		return __signArctan (x, y);
+
+	      return atanMp (x, pr);
+	    }
+	  else
+	    {
+	      /* u >= E */
+	      if (x > 0)
+		return HPI;
+	      else
+		return MHPI;
+	    }
+	}
+    }
 }
 
  /* Final stages. Compute atan(x) by multiple precision arithmetic */
-static double atanMp(double x,const int pr[]){
-  mp_no mpx,mpy,mpy2,mperr,mpt1,mpy1;
-  double y1,y2;
-  int i,p;
-
-for (i=0; i<M; i++) {
-    p = pr[i];
-    __dbl_mp(x,&mpx,p);          __mpatan(&mpx,&mpy,p);
-    __dbl_mp(u9[i].d,&mpt1,p);   __mul(&mpy,&mpt1,&mperr,p);
-    __add(&mpy,&mperr,&mpy1,p);  __sub(&mpy,&mperr,&mpy2,p);
-    __mp_dbl(&mpy1,&y1,p);       __mp_dbl(&mpy2,&y2,p);
-    if (y1==y2)   return y1;
-  }
-  return y1; /*if unpossible to do exact computing */
+static double
+atanMp (double x, const int pr[])
+{
+  mp_no mpx, mpy, mpy2, mperr, mpt1, mpy1;
+  double y1, y2;
+  int i, p;
+
+  for (i = 0; i < M; i++)
+    {
+      p = pr[i];
+      __dbl_mp (x, &mpx, p);
+      __mpatan (&mpx, &mpy, p);
+      __dbl_mp (u9[i].d, &mpt1, p);
+      __mul (&mpy, &mpt1, &mperr, p);
+      __add (&mpy, &mperr, &mpy1, p);
+      __sub (&mpy, &mperr, &mpy2, p);
+      __mp_dbl (&mpy1, &y1, p);
+      __mp_dbl (&mpy2, &y2, p);
+      if (y1 == y2)
+	return y1;
+    }
+  return y1;			/*if impossible to do exact computing */
 }
 
 #ifdef NO_LONG_DOUBLE

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=0f6a8d4b0b5214cf07dce15881164c76298ffd0a

commit 0f6a8d4b0b5214cf07dce15881164c76298ffd0a
Author: Siddhesh Poyarekar <siddhesh@redhat.com>
Date:   Fri Mar 29 16:31:52 2013 +0530

    Format e_log.c

diff --git a/ChangeLog b/ChangeLog
index d4da540..a6d5d02 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2013-03-29  Siddhesh Poyarekar  <siddhesh@redhat.com>
+
+	* sysdeps/ieee754/dbl-64/e_log.c: Fix formatting.
+
 2013-03-28  Roland McGrath  <roland@hack.frob.com>
 
 	* include/stdlib.h [!SHARED] (__call_tls_dtors):
diff --git a/sysdeps/ieee754/dbl-64/e_log.c b/sysdeps/ieee754/dbl-64/e_log.c
index 58c9a8e..d3b5e6e 100644
--- a/sysdeps/ieee754/dbl-64/e_log.c
+++ b/sysdeps/ieee754/dbl-64/e_log.c
@@ -44,7 +44,7 @@
 # define SECTION
 #endif
 
-void __mplog(mp_no *, mp_no *, int);
+void __mplog (mp_no *, mp_no *, int);
 
 /*********************************************************************/
 /* An ultimate log routine. Given an IEEE double machine number x     */
@@ -52,163 +52,201 @@ void __mplog(mp_no *, mp_no *, int);
 /*********************************************************************/
 double
 SECTION
-__ieee754_log(double x) {
+__ieee754_log (double x)
+{
 #define M 4
-  static const int pr[M]={8,10,18,32};
-  int i,j,n,ux,dx,p;
-  double dbl_n,u,p0,q,r0,w,nln2a,luai,lubi,lvaj,lvbj,
-	 sij,ssij,ttij,A,B,B0,y,y1,y2,polI,polII,sa,sb,
-	 t1,t2,t7,t8,t,ra,rb,ww,
-	 a0,aa0,s1,s2,ss2,s3,ss3,a1,aa1,a,aa,b,bb,c;
+  static const int pr[M] = {8, 10, 18, 32};
+  int i, j, n, ux, dx, p;
+  double dbl_n, u, p0, q, r0, w, nln2a, luai, lubi, lvaj, lvbj,
+    sij, ssij, ttij, A, B, B0, y, y1, y2, polI, polII, sa, sb,
+    t1, t2, t7, t8, t, ra, rb, ww,
+    a0, aa0, s1, s2, ss2, s3, ss3, a1, aa1, a, aa, b, bb, c;
 #ifndef DLA_FMS
-  double t3,t4,t5,t6;
+  double t3, t4, t5, t6;
 #endif
   number num;
-  mp_no mpx,mpy,mpy1,mpy2,mperr;
+  mp_no mpx, mpy, mpy1, mpy2, mperr;
 
 #include "ulog.tbl"
 #include "ulog.h"
 
   /* Treating special values of x ( x<=0, x=INF, x=NaN etc.). */
 
-  num.d = x;  ux = num.i[HIGH_HALF];  dx = num.i[LOW_HALF];
-  n=0;
-  if (__builtin_expect(ux < 0x00100000, 0)) {
-    if (__builtin_expect(((ux & 0x7fffffff) | dx) == 0, 0))
-      return MHALF/ZERO; /* return -INF */
-    if (__builtin_expect(ux < 0, 0))
-      return (x-x)/ZERO;                         /* return NaN  */
-    n -= 54;    x *= two54.d;                              /* scale x     */
-    num.d = x;
-  }
-  if (__builtin_expect(ux >= 0x7ff00000, 0))
-    return x+x;                        /* INF or NaN  */
+  num.d = x;
+  ux = num.i[HIGH_HALF];
+  dx = num.i[LOW_HALF];
+  n = 0;
+  if (__builtin_expect (ux < 0x00100000, 0))
+    {
+      if (__builtin_expect (((ux & 0x7fffffff) | dx) == 0, 0))
+	return MHALF / ZERO;	/* return -INF */
+      if (__builtin_expect (ux < 0, 0))
+	return (x - x) / ZERO;	/* return NaN  */
+      n -= 54;
+      x *= two54.d;		/* scale x     */
+      num.d = x;
+    }
+  if (__builtin_expect (ux >= 0x7ff00000, 0))
+    return x + x;		/* INF or NaN  */
 
   /* Regular values of x */
 
-  w = x-ONE;
-  if (__builtin_expect(ABS(w) > U03, 1)) { goto case_03; }
-
+  w = x - ONE;
+  if (__builtin_expect (ABS (w) > U03, 1))
+    goto case_03;
 
   /*--- Stage I, the case abs(x-1) < 0.03 */
 
-  t8 = MHALF*w;
-  EMULV(t8,w,a,aa,t1,t2,t3,t4,t5)
-  EADD(w,a,b,bb)
-
+  t8 = MHALF * w;
+  EMULV (t8, w, a, aa, t1, t2, t3, t4, t5);
+  EADD (w, a, b, bb);
   /* Evaluate polynomial II */
-  polII = (b0.d+w*(b1.d+w*(b2.d+w*(b3.d+w*(b4.d+
-	  w*(b5.d+w*(b6.d+w*(b7.d+w*b8.d))))))))*w*w*w;
-  c = (aa+bb)+polII;
+  polII = b7.d + w * b8.d;
+  polII = b6.d + w * polII;
+  polII = b5.d + w * polII;
+  polII = b4.d + w * polII;
+  polII = b3.d + w * polII;
+  polII = b2.d + w * polII;
+  polII = b1.d + w * polII;
+  polII = b0.d + w * polII;
+  polII *= w * w * w;
+  c = (aa + bb) + polII;
 
   /* End stage I, case abs(x-1) < 0.03 */
-  if ((y=b+(c+b*E2)) == b+(c-b*E2))  return y;
+  if ((y = b + (c + b * E2)) == b + (c - b * E2))
+    return y;
 
   /*--- Stage II, the case abs(x-1) < 0.03 */
 
-  a = d11.d+w*(d12.d+w*(d13.d+w*(d14.d+w*(d15.d+w*(d16.d+
-	    w*(d17.d+w*(d18.d+w*(d19.d+w*d20.d))))))));
-  EMULV(w,a,s2,ss2,t1,t2,t3,t4,t5)
-  ADD2(d10.d,dd10.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d9.d,dd9.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d8.d,dd8.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d7.d,dd7.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d6.d,dd6.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d5.d,dd5.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d4.d,dd4.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d3.d,dd3.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(d2.d,dd2.d,s2,ss2,s3,ss3,t1,t2)
-  MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  MUL2(w,ZERO,s2,ss2,s3,ss3,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(w,ZERO,    s3,ss3, b, bb,t1,t2)
+  a = d19.d + w * d20.d;
+  a = d18.d + w * a;
+  a = d17.d + w * a;
+  a = d16.d + w * a;
+  a = d15.d + w * a;
+  a = d14.d + w * a;
+  a = d13.d + w * a;
+  a = d12.d + w * a;
+  a = d11.d + w * a;
+
+  EMULV (w, a, s2, ss2, t1, t2, t3, t4, t5);
+  ADD2 (d10.d, dd10.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d9.d, dd9.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d8.d, dd8.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d7.d, dd7.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d6.d, dd6.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d5.d, dd5.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d4.d, dd4.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d3.d, dd3.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (d2.d, dd2.d, s2, ss2, s3, ss3, t1, t2);
+  MUL2 (w, ZERO, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (w, ZERO, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (w, ZERO, s3, ss3, b, bb, t1, t2);
 
   /* End stage II, case abs(x-1) < 0.03 */
-  if ((y=b+(bb+b*E4)) == b+(bb-b*E4))  return y;
+  if ((y = b + (bb + b * E4)) == b + (bb - b * E4))
+    return y;
   goto stage_n;
 
   /*--- Stage I, the case abs(x-1) > 0.03 */
-  case_03:
+case_03:
 
   /* Find n,u such that x = u*2**n,   1/sqrt(2) < u < sqrt(2)  */
   n += (num.i[HIGH_HALF] >> 20) - 1023;
   num.i[HIGH_HALF] = (num.i[HIGH_HALF] & 0x000fffff) | 0x3ff00000;
-  if (num.d > SQRT_2) { num.d *= HALF;  n++; }
-  u = num.d;  dbl_n = (double) n;
+  if (num.d > SQRT_2)
+    {
+      num.d *= HALF;
+      n++;
+    }
+  u = num.d;
+  dbl_n = (double) n;
 
   /* Find i such that ui=1+(i-75)/2**8 is closest to u (i= 0,1,2,...,181) */
   num.d += h1.d;
   i = (num.i[HIGH_HALF] & 0x000fffff) >> 12;
 
   /* Find j such that vj=1+(j-180)/2**16 is closest to v=u/ui (j= 0,...,361) */
-  num.d = u*Iu[i].d + h2.d;
+  num.d = u * Iu[i].d + h2.d;
   j = (num.i[HIGH_HALF] & 0x000fffff) >> 4;
 
   /* Compute w=(u-ui*vj)/(ui*vj) */
-  p0=(ONE+(i-75)*DEL_U)*(ONE+(j-180)*DEL_V);
-  q=u-p0;   r0=Iu[i].d*Iv[j].d;   w=q*r0;
+  p0 = (ONE + (i - 75) * DEL_U) * (ONE + (j - 180) * DEL_V);
+  q = u - p0;
+  r0 = Iu[i].d * Iv[j].d;
+  w = q * r0;
 
   /* Evaluate polynomial I */
-  polI = w+(a2.d+a3.d*w)*w*w;
+  polI = w + (a2.d + a3.d * w) * w * w;
 
   /* Add up everything */
-  nln2a = dbl_n*LN2A;
-  luai  = Lu[i][0].d;   lubi  = Lu[i][1].d;
-  lvaj  = Lv[j][0].d;   lvbj  = Lv[j][1].d;
-  EADD(luai,lvaj,sij,ssij)
-  EADD(nln2a,sij,A  ,ttij)
-  B0 = (((lubi+lvbj)+ssij)+ttij)+dbl_n*LN2B;
-  B  = polI+B0;
+  nln2a = dbl_n * LN2A;
+  luai = Lu[i][0].d;
+  lubi = Lu[i][1].d;
+  lvaj = Lv[j][0].d;
+  lvbj = Lv[j][1].d;
+  EADD (luai, lvaj, sij, ssij);
+  EADD (nln2a, sij, A, ttij);
+  B0 = (((lubi + lvbj) + ssij) + ttij) + dbl_n * LN2B;
+  B = polI + B0;
 
   /* End stage I, case abs(x-1) >= 0.03 */
-  if ((y=A+(B+E1)) == A+(B-E1))  return y;
+  if ((y = A + (B + E1)) == A + (B - E1))
+    return y;
 
 
   /*--- Stage II, the case abs(x-1) > 0.03 */
 
   /* Improve the accuracy of r0 */
-  EMULV(p0,r0,sa,sb,t1,t2,t3,t4,t5)
-  t=r0*((ONE-sa)-sb);
-  EADD(r0,t,ra,rb)
+  EMULV (p0, r0, sa, sb, t1, t2, t3, t4, t5);
+  t = r0 * ((ONE - sa) - sb);
+  EADD (r0, t, ra, rb);
 
   /* Compute w */
-  MUL2(q,ZERO,ra,rb,w,ww,t1,t2,t3,t4,t5,t6,t7,t8)
+  MUL2 (q, ZERO, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
 
-  EADD(A,B0,a0,aa0)
+  EADD (A, B0, a0, aa0);
 
   /* Evaluate polynomial III */
-  s1 = (c3.d+(c4.d+c5.d*w)*w)*w;
-  EADD(c2.d,s1,s2,ss2)
-  MUL2(s2,ss2,w,ww,s3,ss3,t1,t2,t3,t4,t5,t6,t7,t8)
-  MUL2(s3,ss3,w,ww,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
-  ADD2(s2,ss2,w,ww,s3,ss3,t1,t2)
-  ADD2(s3,ss3,a0,aa0,a1,aa1,t1,t2)
+  s1 = (c3.d + (c4.d + c5.d * w) * w) * w;
+  EADD (c2.d, s1, s2, ss2);
+  MUL2 (s2, ss2, w, ww, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+  MUL2 (s3, ss3, w, ww, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+  ADD2 (s2, ss2, w, ww, s3, ss3, t1, t2);
+  ADD2 (s3, ss3, a0, aa0, a1, aa1, t1, t2);
 
   /* End stage II, case abs(x-1) >= 0.03 */
-  if ((y=a1+(aa1+E3)) == a1+(aa1-E3)) return y;
+  if ((y = a1 + (aa1 + E3)) == a1 + (aa1 - E3))
+    return y;
 
 
   /* Final stages. Use multi-precision arithmetic. */
-  stage_n:
-
-  for (i=0; i<M; i++) {
-    p = pr[i];
-    __dbl_mp(x,&mpx,p);  __dbl_mp(y,&mpy,p);
-    __mplog(&mpx,&mpy,p);
-    __dbl_mp(e[i].d,&mperr,p);
-    __add(&mpy,&mperr,&mpy1,p);  __sub(&mpy,&mperr,&mpy2,p);
-    __mp_dbl(&mpy1,&y1,p);       __mp_dbl(&mpy2,&y2,p);
-    if (y1==y2)   return y1;
-  }
+stage_n:
+
+  for (i = 0; i < M; i++)
+    {
+      p = pr[i];
+      __dbl_mp (x, &mpx, p);
+      __dbl_mp (y, &mpy, p);
+      __mplog (&mpx, &mpy, p);
+      __dbl_mp (e[i].d, &mperr, p);
+      __add (&mpy, &mperr, &mpy1, p);
+      __sub (&mpy, &mperr, &mpy2, p);
+      __mp_dbl (&mpy1, &y1, p);
+      __mp_dbl (&mpy2, &y2, p);
+      if (y1 == y2)
+	return y1;
+    }
   return y1;
 }
+
 #ifndef __ieee754_log
 strong_alias (__ieee754_log, __log_finite)
 #endif

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                        |   27 +
 sysdeps/ieee754/dbl-64/e_atan2.c |   38 +-
 sysdeps/ieee754/dbl-64/e_log.c   |  228 +++++----
 sysdeps/ieee754/dbl-64/mpa.h     |   10 +-
 sysdeps/ieee754/dbl-64/mpatan.c  |    2 +-
 sysdeps/ieee754/dbl-64/mpatan2.c |    6 +-
 sysdeps/ieee754/dbl-64/mpexp.c   |    2 +-
 sysdeps/ieee754/dbl-64/mptan.c   |    2 +-
 sysdeps/ieee754/dbl-64/s_atan.c  |  397 +++++++++-----
 sysdeps/ieee754/dbl-64/s_tan.c   | 1133 ++++++++++++++++++++++++--------------
 sysdeps/powerpc/power4/fpu/mpa.c |   20 +-
 11 files changed, 1172 insertions(+), 693 deletions(-)


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]