This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch azanella/bz12683 created. glibc-2.26.9000-976-gf9c9cb2


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, azanella/bz12683 has been created
        at  f9c9cb280845a4b94ac5bdb5ddc58c74c5dbf83b (commit)

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=f9c9cb280845a4b94ac5bdb5ddc58c74c5dbf83b

commit f9c9cb280845a4b94ac5bdb5ddc58c74c5dbf83b
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Wed Jan 25 17:08:51 2017 -0200

    nptl: Consolidate pthread_{timed,try}join{_np}
    
    This patch consolidates the pthread_join and gnu extensions to avoid
    simplify implementation and avoid code duplication.  Both pthread_join
    and pthread_tryjoin are now based on pthread_timedjoin_np.
    
    It also fixes some inconsistencies on ESRCH, EINVAL, EDEADLK handling
    (where each implementation differs from each other) and also on
    clenup handler (which now always use a CAS).  It also replace the
    atomics operation with the C11 ones.
    
    Checked on i686-linux-gnu, x86_64-linux-gnu, x86_64-linux-gnux32,
    aarch64-linux-gnu, arm-linux-gnueabihf, and powerpc64le-linux-gnu.
    
    	* nptl/pthreadP.h (__pthread_timedjoin_np): Define.
    	* nptl/pthread_join.c (pthread_join): Use __pthread_timedjoin_np.
    	* nptl/pthread_tryjoin.c (pthread_tryjoin): Likewise.
    	* nptl/pthread_timedjoin.c (cleanup): Use CAS on argument setting.
    	(pthread_timedjoin_np): Define internal symbol and common code from
    	pthread_join.
    	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (__lll_timedwait_tid):
    	Remove superflous checks.
    	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (__lll_timedwait_tid):
    	Likewise.

diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index bd4190e..220b660 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -453,6 +453,8 @@ extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
 extern int __pthread_join (pthread_t threadid, void **thread_return);
 extern int __pthread_setcanceltype (int type, int *oldtype);
 extern void __pthread_testcancel (void);
+extern int __pthread_timedjoin_np (pthread_t threadid, void **thread_return,
+				   const struct timespec *abstime);
 
 #if IS_IN (libpthread)
 hidden_proto (__pthread_mutex_init)
@@ -471,6 +473,7 @@ hidden_proto (__pthread_setcancelstate)
 hidden_proto (__pthread_testcancel)
 hidden_proto (__pthread_mutexattr_init)
 hidden_proto (__pthread_mutexattr_settype)
+hidden_proto (__pthread_timedjoin_np)
 #endif
 
 extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond);
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index d2c8286..f6e6ae4 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -16,99 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
-#include <errno.h>
-#include <stdlib.h>
-
-#include <atomic.h>
 #include "pthreadP.h"
 
-#include <stap-probe.h>
-
-
-static void
-cleanup (void *arg)
-{
-  /* If we already changed the waiter ID, reset it.  The call cannot
-     fail for any reason but the thread not having done that yet so
-     there is no reason for a loop.  */
-  (void) atomic_compare_and_exchange_bool_acq ((struct pthread **) arg, NULL,
-					       THREAD_SELF);
-}
-
-
 int
 __pthread_join (pthread_t threadid, void **thread_return)
 {
-  struct pthread *pd = (struct pthread *) threadid;
-
-  /* Make sure the descriptor is valid.  */
-  if (INVALID_NOT_TERMINATED_TD_P (pd))
-    /* Not a valid thread handle.  */
-    return ESRCH;
-
-  /* Is the thread joinable?.  */
-  if (IS_DETACHED (pd))
-    /* We cannot wait for the thread.  */
-    return EINVAL;
-
-  struct pthread *self = THREAD_SELF;
-  int result = 0, ct;
-
-  LIBC_PROBE (pthread_join, 1, threadid);
-
-  /* During the wait we change to asynchronous cancellation.  If we
-     are canceled the thread we are waiting for must be marked as
-     un-wait-ed for again.  */
-  pthread_cleanup_push (cleanup, &pd->joinid);
-
-  /* Switch to asynchronous cancellation.  */
-  __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
-
-  unsigned int ch = atomic_load_relaxed (&pd->cancelhandling);
-  if ((pd == self || (self->joinid == pd && ch == 0))
-      && !(self->cancelstate == PTHREAD_CANCEL_ENABLE
-           && ch & THREAD_CANCELED))
-    /* This is a deadlock situation.  The threads are waiting for each
-       other to finish.  Note that this is a "may" error.  To be 100%
-       sure we catch this error we would have to lock the data
-       structures but it is not necessary.  In the unlikely case that
-       two threads are really caught in this situation they will
-       deadlock.  It is the programmer's problem to figure this
-       out.  */
-    result = EDEADLK;
-  /* Wait for the thread to finish.  If it is already locked something
-     is wrong.  There can only be one waiter.  */
-  else if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
-								   self,
-								   NULL), 0))
-    /* There is already somebody waiting for the thread.  */
-    result = EINVAL;
-  else
-    /* Wait for the child.  */
-    lll_wait_tid (pd->tid);
-
-  __pthread_setcanceltype (ct, NULL);
-
-  /* Remove the handler.  */
-  pthread_cleanup_pop (0);
-
-
-  if (__glibc_likely (result == 0))
-    {
-      /* We mark the thread as terminated and as joined.  */
-      pd->tid = -1;
-
-      /* Store the return value if the caller is interested.  */
-      if (thread_return != NULL)
-	*thread_return = pd->result;
-
-
-      /* Free the TCB.  */
-      __free_tcb (pd);
-    }
-
-  LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
-
-  return result;
+  return __pthread_timedjoin_np (threadid, thread_return, NULL);
 }
 weak_alias (__pthread_join, pthread_join)
diff --git a/nptl/pthread_timedjoin.c b/nptl/pthread_timedjoin.c
index 75a9cfe..a685972 100644
--- a/nptl/pthread_timedjoin.c
+++ b/nptl/pthread_timedjoin.c
@@ -21,21 +21,26 @@
 #include <atomic.h>
 #include "pthreadP.h"
 
+#include <stap-probe.h>
+
 
 static void
 cleanup (void *arg)
 {
-  *(void **) arg = NULL;
+  /* If we already changed the waiter ID, reset it.  The call cannot
+     fail for any reason but the thread not having done that yet so
+     there is no reason for a loop.  */
+  struct pthread *self = THREAD_SELF;
+  atomic_compare_exchange_weak_acquire (&arg, &self, NULL);
 }
 
 
 int
-pthread_timedjoin_np (pthread_t threadid, void **thread_return,
-		      const struct timespec *abstime)
+__pthread_timedjoin_np (pthread_t threadid, void **thread_return,
+			const struct timespec *abstime)
 {
-  struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
-  int result, ct;
+  int result = 0, ct;
 
   /* Make sure the descriptor is valid.  */
   if (INVALID_NOT_TERMINATED_TD_P (pd))
@@ -47,8 +52,13 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
     /* We cannot wait for the thread.  */
     return EINVAL;
 
-  self = THREAD_SELF;
-  if (pd == self || self->joinid == pd)
+  LIBC_PROBE (pthread_join, 1, threadid);
+
+  struct pthread *self = THREAD_SELF;
+  unsigned int ch = atomic_load_relaxed (&pd->cancelhandling);
+  if ((pd == self || (self->joinid == pd && ch == 0))
+      && !(self->cancelstate == PTHREAD_CANCEL_ENABLE
+           && ch & THREAD_CANCELED))
     /* This is a deadlock situation.  The threads are waiting for each
        other to finish.  Note that this is a "may" error.  To be 100%
        sure we catch this error we would have to lock the data
@@ -60,9 +70,8 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
 
   /* Wait for the thread to finish.  If it is already locked something
      is wrong.  There can only be one waiter.  */
-  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
-							      self, NULL), 0))
-    /* There is already somebody waiting for the thread.  */
+  if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid,
+							      &self, NULL)))
     return EINVAL;
 
 
@@ -75,7 +84,10 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
   __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
 
   /* Wait for the child.  */
-  result = lll_timedwait_tid (pd->tid, abstime);
+  if (abstime != NULL)
+    result = lll_timedwait_tid (pd->tid, abstime);
+  else
+    lll_wait_tid (pd->tid);
 
   __pthread_setcanceltype (ct, NULL);
 
@@ -84,18 +96,24 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
 
 
   /* We might have timed out.  */
-  if (result == 0)
+  if (__glibc_likely (result == 0))
     {
+      /* We mark the thread as terminated and as joined.  */
+      pd->tid = -1;
+
       /* Store the return value if the caller is interested.  */
       if (thread_return != NULL)
 	*thread_return = pd->result;
 
-
       /* Free the TCB.  */
       __free_tcb (pd);
     }
   else
     pd->joinid = NULL;
 
+  LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
+
   return result;
 }
+weak_alias (__pthread_timedjoin_np, pthread_timedjoin_np)
+hidden_def (__pthread_timedjoin_np)
diff --git a/nptl/pthread_tryjoin.c b/nptl/pthread_tryjoin.c
index 6a3b62e..14c5d38 100644
--- a/nptl/pthread_tryjoin.c
+++ b/nptl/pthread_tryjoin.c
@@ -26,47 +26,12 @@
 int
 pthread_tryjoin_np (pthread_t threadid, void **thread_return)
 {
-  struct pthread *self;
-  struct pthread *pd = (struct pthread *) threadid;
-
-  /* Make sure the descriptor is valid.  */
-  if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
-    /* Not a valid thread handle.  */
-    return ESRCH;
-
-  /* Is the thread joinable?.  */
-  if (IS_DETACHED (pd))
-    /* We cannot wait for the thread.  */
-    return EINVAL;
-
-  self = THREAD_SELF;
-  if (pd == self || self->joinid == pd)
-    /* This is a deadlock situation.  The threads are waiting for each
-       other to finish.  Note that this is a "may" error.  To be 100%
-       sure we catch this error we would have to lock the data
-       structures but it is not necessary.  In the unlikely case that
-       two threads are really caught in this situation they will
-       deadlock.  It is the programmer's problem to figure this
-       out.  */
-    return EDEADLK;
-
   /* Return right away if the thread hasn't terminated yet.  */
+  struct pthread *pd = (struct pthread *) threadid;
   if (pd->tid != 0)
     return EBUSY;
 
-  /* Wait for the thread to finish.  If it is already locked something
-     is wrong.  There can only be one waiter.  */
-  if (atomic_compare_and_exchange_bool_acq (&pd->joinid, self, NULL))
-    /* There is already somebody waiting for the thread.  */
-    return EINVAL;
-
-  /* Store the return value if the caller is interested.  */
-  if (thread_return != NULL)
-    *thread_return = pd->result;
-
-
-  /* Free the TCB.  */
-  __free_tcb (pd);
-
-  return 0;
+  /* If pd->tid != 0 then lll_wait_tid will not block on futex
+     operation.  */
+  return __pthread_timedjoin_np (threadid, thread_return, NULL);
 }
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index e54d1ea..0368c9d 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -237,12 +237,7 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
   ({									      \
     int __result = 0;							      \
     if ((tid) != 0)							      \
-      {									      \
-	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
-	  __result = EINVAL;						      \
-	else								      \
-	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
-      }									      \
+      __result = __lll_timedwait_tid (&(tid), (abstime));		      \
     __result; })
 
 
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index bb6d9ee..74e2c80 100644
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -249,12 +249,7 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
   ({									      \
     int __result = 0;							      \
     if ((tid) != 0)							      \
-      {									      \
-	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
-	  __result = EINVAL;						      \
-	else								      \
-	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
-      }									      \
+      __result = __lll_timedwait_tid (&(tid), (abstime));		      \
     __result; })
 
 extern int __lll_lock_elision (int *futex, short *adapt_count, int private)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=21b638834dfde9c9946045bbbdc445be691168ad

commit 21b638834dfde9c9946045bbbdc445be691168ad
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Tue Jan 24 21:05:56 2017 -0200

    nptl: Remove THREAD_ATOMIC_* macros
    
    This patch removes the ununsed THREAD_ATOMIC_* macros now that nptl
    code is using C11 atomics.
    
    Checked on x86_64-linux-gnu and i686-linux-gnu.
    
    	* manual/pattern.texi (THREAD_ATOMIC_BIT_SET): Remove.
    	(THREAD_ATOMIC_CMPXCHG_VAL): Likewise.
    	* nptl/pthreadP.h (THREAD_ATOMIC_CMPXCHG_VAL): Likewise.
    	(THREAD_ATOMIC_BIT_SET): Likewise.
    	* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL): Likewise.
    	(THREAD_ATOMIC_AND): Likewise.
    	* sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL): Likewise.
    	(THREAD_ATOMIC_AND): Likewise.

diff --git a/manual/pattern.texi b/manual/pattern.texi
index 7bdb367..fd9d036 100644
--- a/manual/pattern.texi
+++ b/manual/pattern.texi
@@ -1820,7 +1820,6 @@ the beginning of the vector.
 @c      (disable cancellation around exec_comm; it may do_cancel the
 @c       second time, if async cancel is enabled)
 @c     do_cancel @ascuplugin @ascuheap @acsmem
-@c      THREAD_ATOMIC_BIT_SET dup ok
 @c      pthread_unwind @ascuplugin @ascuheap @acsmem
 @c       Unwind_ForcedUnwind if available @ascuplugin @ascuheap @acsmem
 @c       libc_unwind_longjmp otherwise
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 6cf9c02..bd4190e 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -35,18 +35,6 @@
 #include <nptl-signals.h>
 
 
-/* Atomic operations on TLS memory.  */
-#ifndef THREAD_ATOMIC_CMPXCHG_VAL
-# define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, new, old) \
-  atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)
-#endif
-
-#ifndef THREAD_ATOMIC_BIT_SET
-# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
-  atomic_bit_set (&(descr)->member, bit)
-#endif
-
-
 /* Adaptive mutex definitions.  */
 #ifndef MAX_ADAPTIVE_COUNT
 # define MAX_ADAPTIVE_COUNT 100

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=8d8211a1ba7152910f5342a21bea21c17a78aed4

commit 8d8211a1ba7152910f5342a21bea21c17a78aed4
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Tue Jan 24 09:17:37 2017 -0200

    nptl: cancelhandling refactor
    
    This patch basically uses glibc C11 atomic function to access and
    modify struct pthread cancelhandling variable.  All plain access
    are handled using the atomic_load_* functions and THREAD_ATOMIC_BIT_SET
    is replaced by atomic_fetch_or_acquire.
    
    Checked on i686-linux-gnu, x86_64-linux-gnu, x86_64-linux-gnux32,
    aarch64-linux-gnu, arm-linux-gnueabihf, and powerpc64le-linux-gnu.
    
    	* nptl/descr.h (CANCELED_BITMASK): Rename to THREAD_CANCELED.
    	(EXITING_BITMASK): Rename to THREAD_EXITING.
    	(TERMINATED_BITMARK): Rename to THREAD_TERMINATED.
    	* nptl/allocatestack.c (setxid_mark_thread): Use THREAD_* instead of
    	*_BITMASK and use glibc C11 atomic function to access pthread
    	cancelhandling.
    	* nptl/libc-cancellation.c (__syscall_cancel): Likewise.
    	* nptl/nptl-init.c (sigcancel_handler): Likewise.
    	* nptl/pthread_cancel.c (pthread_cancel): Likewise.
    	* nptl/pthread_create.c (__free_tcb): Likewise.
    	(START_THREAD_DEFN): Likewise.
    	(__pthread_create_2_1): Likewise.
    	* nptl/pthread_detach.c (pthread_detach): Likewise.
    	* nptl/pthread_exit.c (__pthread_exit): Likewise.
    	* nptl/pthread_join.c (pthread_join): Likewise.
    	* nptl_db/td_thr_get_info.c (td_thr_get_info): Likewise.
    	* nptl_db/td_thr_getfpregs.c (td_thr_getfpregs): Likewise.
    	* nptl_db/td_thr_getgregs.c (td_thr_getregs): Likewise.
    	* nptl_db/td_thr_setfpregs.c (td_thr_setfpregs): Likewise.
    	* nptl_db/td_thr_setgregs.c (td_thr_setgregs): Likewise.
    	* sysdeps/unix/sysv/linux/arm/syscall_cancel.S
    	(__syscall_cancel_arch): Change CANCELED_BITMASK to
    	THREAD_CANCELED in comment.
    	* sysdeps/unix/sysv/linux/syscall_cancel.c
    	(__syscall_cancel_arch): Likewise.
    	* sysdeps/x86_64/nptl/tcb-offsets.sym (TCB_CANCELED_BITMASK):
    	Remove.
    	(TCB_EXITING_BITMASK): Likewise.
    	(TCB_TERMINATED_BITMASK): Likewise.
    	* sysdeps/unix/sysv/linux/i386/syscall_cancel.S
    	(__syscall_cancel_arch): Likewise.
    	* sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
    	(__syscall_cancel_arch): Likewise.
    	* sysdeps/unix/sysv/linux/sh/syscall_cancel.S
    	(__syscall_cancel_arch): Likewise.
    	* sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
    	(__syscall_cancel_arch): Likewise.
    	* sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
    	(__syscall_cancel_arch): Likewise.

diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index ca617ee..4c210a3 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -1035,7 +1035,7 @@ setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
     {
       /* If the thread is exiting right now, ignore it.  */
       int ch = atomic_load_relaxed (&t->cancelhandling);
-      if ((ch & EXITING_BITMASK) != 0)
+      if ((ch & THREAD_EXITING) != 0)
 	{
 	  /* Release the futex if there is no other setxid in
 	     progress.  */
diff --git a/nptl/descr.h b/nptl/descr.h
index d77d10b..7673150 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -270,18 +270,27 @@ struct pthread
   struct pthread_unwind_buf *cleanup_jmp_buf;
 #define HAVE_CLEANUP_JMP_BUF
 
-  /* Flags determining processing of cancellation.  */
+  /* Flags determining process of thread cancellation, termination, and
+     finalization.  Initial state is 0 set at allocate_stack
+     (pthread_create.c).
+
+     THREAD_CANCELED bit is used to mark the thread as cancelled and it
+     is set atomically by pthread_cancel.
+
+     THREAD_EXITING bit is used to mark the thread as exiting and it
+     is set atomically either by 1. pthread_exit, 2. thread cancellation
+     signal handler at nptl-init.c, or 3. after thread execution at
+     pthread_create.c.
+
+     THREAD_TERMINATED bit used to mark the thread resources are being
+     freed and set by __free_tcb (called by pthread_detach and
+     pthread_{timed,try}join.
+   */
+#define THREAD_CANCELED		0x04
+#define THREAD_EXITING		0x08
+#define THREAD_TERMINATED	0x10
   int cancelhandling;
 
-#define CANCELED_BIT		2
-#define CANCELED_BITMASK	(0x01 << CANCELED_BIT)
-  /* Bit set if thread is exiting.  */
-#define EXITING_BIT		3
-#define EXITING_BITMASK		(0x01 << EXITING_BIT)
-  /* Bit set if thread terminated and TCB is freed.  */
-#define TERMINATED_BIT		4
-#define TERMINATED_BITMASK	(0x01 << TERMINATED_BIT)
-
   /* Flag to indicate thread cancel disable state (PTHREAD_CANCEL_ENABLE or
      PTHREAD_CANCEL_DISABLE).  */
   int cancelstate;
diff --git a/nptl/libc-cancellation.c b/nptl/libc-cancellation.c
index 335acab..65caf6e 100644
--- a/nptl/libc-cancellation.c
+++ b/nptl/libc-cancellation.c
@@ -48,7 +48,7 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
 			          a6 __SYSCALL_CANCEL7_ARG7);
 
   if ((result == -EINTR)
-      && (pd->cancelhandling & CANCELED_BITMASK)
+      && (atomic_load_relaxed (&pd->cancelhandling) & THREAD_CANCELED)
       && (pd->cancelstate != PTHREAD_CANCEL_DISABLE))
     __syscall_do_cancel ();
 
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 3a3e23d..15c98cf 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -209,7 +209,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
   volatile struct pthread *pd = (volatile struct pthread *) self;
 
   if ((pd->cancelstate == PTHREAD_CANCEL_DISABLE)
-      || ((pd->cancelhandling & CANCELED_BITMASK) == 0))
+      || (atomic_load_relaxed (&pd->cancelhandling) & THREAD_CANCELED) == 0)
     return;
 
   /* Add SIGCANCEL on ignored sigmask to avoid the handler to be called
@@ -228,7 +228,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
       || (pc >= (uintptr_t) __syscall_cancel_arch_start
           && pc < (uintptr_t) __syscall_cancel_arch_end))
     {
-      THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+      atomic_fetch_or_acquire (&self->cancelhandling, THREAD_EXITING);
       THREAD_SETMEM (self, result, PTHREAD_CANCELED);
 
       INTERNAL_SYSCALL_CALL (rt_sigprocmask, err, SIG_SETMASK, set, NULL,
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
index b97763b..5acc136 100644
--- a/nptl/pthread_cancel.c
+++ b/nptl/pthread_cancel.c
@@ -38,7 +38,7 @@ __pthread_cancel (pthread_t th)
   pthread_cancel_init ();
 #endif
 
-  THREAD_ATOMIC_BIT_SET (pd, cancelhandling, CANCELED_BIT);
+  atomic_fetch_or_acquire (&pd->cancelhandling, THREAD_CANCELED);
 
   /* A single-threaded process should be able to kill itself, since there is
      nothing in the POSIX specification that says that it cannot.  So we set
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 0691b6e..90977d3 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -343,8 +343,9 @@ void
 __free_tcb (struct pthread *pd)
 {
   /* The thread is exiting now.  */
-  if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
-					     TERMINATED_BIT) == 0, 1))
+  unsigned int ch = atomic_fetch_or_acquire (&pd->cancelhandling,
+					     THREAD_TERMINATED);
+  if (__glibc_likely ((ch & THREAD_TERMINATED) == 0))
     {
       /* Remove the descriptor from the list.  */
       if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
@@ -412,13 +413,14 @@ START_THREAD_DEFN
   /* If the parent was running cancellation handlers while creating
      the thread the new thread inherited the signal mask.  Reset the
      cancellation signal mask.  */
-  if (__glibc_unlikely (pd->parent_cancelhandling & CANCELED_BITMASK))
+  unsigned int ch = atomic_load_relaxed (&pd->parent_cancelhandling);
+  if (__glibc_unlikely (ch & THREAD_CANCELED))
     {
       INTERNAL_SYSCALL_DECL (err);
       sigset_t mask;
       __sigemptyset (&mask);
       __sigaddset (&mask, SIGCANCEL);
-      (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
+      INTERNAL_SYSCALL_CALL (rt_sigprocmask, err, SIG_UNBLOCK, &mask,
 			       NULL, _NSIG / 8);
     }
 #endif
@@ -515,7 +517,7 @@ START_THREAD_DEFN
   /* The thread is exiting now.  Don't set this bit until after we've hit
      the event-reporting breakpoint, so that td_thr_get_info on us while at
      the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */
-  atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
+  atomic_fetch_or_acquire (&pd->cancelhandling, THREAD_EXITING);
 
 #ifndef __ASSUME_SET_ROBUST_LIST
   /* If this thread has any robust mutexes locked, handle them now.  */
@@ -554,10 +556,11 @@ START_THREAD_DEFN
 		      pd->guardsize);
 
   /* If the thread is detached free the TCB.  */
+  unsigned int s;
   if (IS_DETACHED (pd))
     /* Free the TCB.  */
     __free_tcb (pd);
-  else
+  else if ((s = atomic_load_relaxed (&pd->setxid_op)) == 1)
     {
       /* Some other thread might call any of the setXid functions and expect
 	 us to reply.  In this case wait until we did that.  */
@@ -713,7 +716,7 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
 
   /* Inform start_thread (above) about cancellation state that might
      translate into inherited signal state.  */
-  pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
+  pd->parent_cancelhandling = atomic_load_relaxed (&self->cancelhandling);
 
   /* Determine scheduling parameters for the thread.  */
   if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
diff --git a/nptl/pthread_detach.c b/nptl/pthread_detach.c
index 5c4c8f7..cc3ea09 100644
--- a/nptl/pthread_detach.c
+++ b/nptl/pthread_detach.c
@@ -46,7 +46,7 @@ __pthread_detach (pthread_t th)
   else
     /* Check whether the thread terminated meanwhile.  In this case we
        will just free the TCB.  */
-    if ((pd->cancelhandling & EXITING_BITMASK) != 0)
+    if ((atomic_load_relaxed(&pd->cancelhandling) & THREAD_EXITING) != 0)
       /* Note that the code in __free_tcb makes sure each thread
 	 control block is freed only once.  */
       __free_tcb (pd);
diff --git a/nptl/pthread_exit.c b/nptl/pthread_exit.c
index 82ef4f8..6172a25 100644
--- a/nptl/pthread_exit.c
+++ b/nptl/pthread_exit.c
@@ -27,7 +27,7 @@ __pthread_exit (void *value)
 
   THREAD_SETMEM (self, result, value);
 
-  THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+  atomic_fetch_or_acquire (&self->cancelhandling, THREAD_EXITING);
 
   __pthread_unwind ((__pthread_unwind_buf_t *)
 		    THREAD_GETMEM (self, cleanup_jmp_buf));
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index 66263cd..d2c8286 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -64,13 +64,10 @@ __pthread_join (pthread_t threadid, void **thread_return)
   /* Switch to asynchronous cancellation.  */
   __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
 
-  if ((pd == self
-       || (self->joinid == pd
-	   && (pd->cancelhandling
-	       & (CANCELED_BITMASK | EXITING_BITMASK
-		  | TERMINATED_BITMASK)) == 0))
+  unsigned int ch = atomic_load_relaxed (&pd->cancelhandling);
+  if ((pd == self || (self->joinid == pd && ch == 0))
       && !(self->cancelstate == PTHREAD_CANCEL_ENABLE
-           && self->cancelhandling & CANCELED_BITMASK))
+           && ch & THREAD_CANCELED))
     /* This is a deadlock situation.  The threads are waiting for each
        other to finish.  Note that this is a "may" error.  To be 100%
        sure we catch this error we would have to lock the data
diff --git a/nptl_db/td_thr_get_info.c b/nptl_db/td_thr_get_info.c
index 48979b8..d549bc8 100644
--- a/nptl_db/td_thr_get_info.c
+++ b/nptl_db/td_thr_get_info.c
@@ -89,10 +89,10 @@ td_thr_get_info (const td_thrhandle_t *th, td_thrinfo_t *infop)
 		   ? 0 : (uintptr_t) schedprio);
   infop->ti_type = TD_THR_USER;
 
-  if ((((int) (uintptr_t) cancelhandling) & EXITING_BITMASK) == 0)
+  if ((((int) (uintptr_t) cancelhandling) & THREAD_EXITING) == 0)
     /* XXX For now there is no way to get more information.  */
     infop->ti_state = TD_THR_ACTIVE;
-  else if ((((int) (uintptr_t) cancelhandling) & TERMINATED_BITMASK) == 0)
+  else if ((((int) (uintptr_t) cancelhandling) & THREAD_TERMINATED) == 0)
     infop->ti_state = TD_THR_ZOMBIE;
   else
     infop->ti_state = TD_THR_UNKNOWN;
diff --git a/nptl_db/td_thr_getfpregs.c b/nptl_db/td_thr_getfpregs.c
index 3c5462c..aaba61a 100644
--- a/nptl_db/td_thr_getfpregs.c
+++ b/nptl_db/td_thr_getfpregs.c
@@ -40,7 +40,7 @@ td_thr_getfpregs (const td_thrhandle_t *th, prfpregset_t *regset)
     return err;
 
   /* If the thread already terminated we return all zeroes.  */
-  if (((int) (uintptr_t) cancelhandling) & TERMINATED_BITMASK)
+  if (((int) (uintptr_t) cancelhandling) & THREAD_TERMINATED)
     memset (regset, '\0', sizeof (*regset));
   /* Otherwise get the register content through the callback.  */
   else
diff --git a/nptl_db/td_thr_getgregs.c b/nptl_db/td_thr_getgregs.c
index 61ffa12..1be9072 100644
--- a/nptl_db/td_thr_getgregs.c
+++ b/nptl_db/td_thr_getgregs.c
@@ -40,7 +40,7 @@ td_thr_getgregs (const td_thrhandle_t *th, prgregset_t regset)
     return err;
 
   /* If the thread already terminated we return all zeroes.  */
-  if (((int) (uintptr_t) cancelhandling) & TERMINATED_BITMASK)
+  if (((int) (uintptr_t) cancelhandling) & THREAD_TERMINATED)
     memset (regset, '\0', sizeof (*regset));
   /* Otherwise get the register content through the callback.  */
   else
diff --git a/nptl_db/td_thr_setfpregs.c b/nptl_db/td_thr_setfpregs.c
index 092fc1a..e5cb76d 100644
--- a/nptl_db/td_thr_setfpregs.c
+++ b/nptl_db/td_thr_setfpregs.c
@@ -40,7 +40,7 @@ td_thr_setfpregs (const td_thrhandle_t *th, const prfpregset_t *fpregs)
     return err;
 
   /* Only set the registers if the thread hasn't yet terminated.  */
-  if ((((int) (uintptr_t) cancelhandling) & TERMINATED_BITMASK) == 0)
+  if ((((int) (uintptr_t) cancelhandling) & THREAD_TERMINATED) == 0)
     {
       err = DB_GET_FIELD (tid, th->th_ta_p, th->th_unique, pthread, tid, 0);
       if (err != TD_OK)
diff --git a/nptl_db/td_thr_setgregs.c b/nptl_db/td_thr_setgregs.c
index a927119..2eb03c1 100644
--- a/nptl_db/td_thr_setgregs.c
+++ b/nptl_db/td_thr_setgregs.c
@@ -40,7 +40,7 @@ td_thr_setgregs (const td_thrhandle_t *th, prgregset_t gregs)
     return err;
 
   /* Only set the registers if the thread hasn't yet terminated.  */
-  if ((((int) (uintptr_t) cancelhandling) & TERMINATED_BITMASK) == 0)
+  if ((((int) (uintptr_t) cancelhandling) & THREAD_TERMINATED) == 0)
     {
       err = DB_GET_FIELD (tid, th->th_ta_p, th->th_unique, pthread, tid, 0);
       if (err != TD_OK)
diff --git a/sysdeps/unix/sysv/linux/i386/syscall_cancel.S b/sysdeps/unix/sysv/linux/i386/syscall_cancel.S
index 5596b3e..96e2bcc 100644
--- a/sysdeps/unix/sysv/linux/i386/syscall_cancel.S
+++ b/sysdeps/unix/sysv/linux/i386/syscall_cancel.S
@@ -45,7 +45,7 @@ ENTRY (__syscall_cancel_arch)
 	.type   __syscall_cancel_arch_start, @function
 __syscall_cancel_arch_start:
 
-	/* if (*cancelhandling & CANCELED_BITMASK)
+	/* if (*cancelhandling & THREAD_CANCELED)
 	     __syscall_do_cancel()  */
 	testb	$4, (%eax)
 	jne     1f
diff --git a/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S b/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
index bda2199..5175929 100644
--- a/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
+++ b/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
@@ -33,7 +33,7 @@ ENTRY (__syscall_cancel_arch)
 	.type  __syscall_cancel_arch_start,@function
 __syscall_cancel_arch_start:
 
-	/* if (*cancelhandling & CANCELED_BITMASK)
+	/* if (*cancelhandling & THREAD_CANCELED)
 	     __syscall_do_cancel()  */
 	lwz     r0,0(r3)
 	rldicl. r0,r0,62,63
diff --git a/sysdeps/unix/sysv/linux/sh/syscall_cancel.S b/sysdeps/unix/sysv/linux/sh/syscall_cancel.S
index 02a050d..22c3b4a 100644
--- a/sysdeps/unix/sysv/linux/sh/syscall_cancel.S
+++ b/sysdeps/unix/sysv/linux/sh/syscall_cancel.S
@@ -48,7 +48,7 @@ ENTRY (__syscall_cancel_arch)
 	.globl __syscall_cancel_arch_start
 __syscall_cancel_arch_start:
 
-	/* if (*cancelhandling & CANCELED_BITMASK)
+	/* if (*cancelhandling & THREAD_CANCELED)
 	     __syscall_do_cancel()  */
 	mov.l	@r4,r0
 	tst	#4,r0
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S b/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
index c06f9d1..cc6c7e1 100644
--- a/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
+++ b/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
@@ -37,7 +37,7 @@ ENTRY (__syscall_cancel_arch)
 	.globl __syscall_cancel_arch_start
 __syscall_cancel_arch_start:
 
-	/* if (*cancelhandling & CANCELED_BITMASK)
+	/* if (*cancelhandling & THREAD_CANCELED)
 	     __syscall_do_cancel()  */
 	ld	[%i0], %g2
 	andcc	%g2, 4, %g0
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S b/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
index f3eef78..afe49e5 100644
--- a/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
+++ b/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
@@ -37,7 +37,7 @@ ENTRY (__syscall_cancel_arch)
 	.globl __syscall_cancel_arch_start
 __syscall_cancel_arch_start:
 
-	/* if (*cancelhandling & CANCELED_BITMASK)
+	/* if (*cancelhandling & THREAD_CANCELED)
 	     __syscall_do_cancel()  */
 	lduw	[%i0], %g1
 	andcc	%g1, 4, %g0
diff --git a/sysdeps/unix/sysv/linux/syscall_cancel.c b/sysdeps/unix/sysv/linux/syscall_cancel.c
index 5a3d383..0255a0f 100644
--- a/sysdeps/unix/sysv/linux/syscall_cancel.c
+++ b/sysdeps/unix/sysv/linux/syscall_cancel.c
@@ -50,7 +50,7 @@ __syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
 		       __SYSCALL_CANCEL7_ARG_DEF)
 {
   ADD_LABEL ("__syscall_cancel_arch_start");
-  if (__glibc_unlikely (*ch & CANCELED_BITMASK))
+  if (__glibc_unlikely (atomic_load_relaxed (ch) & THREAD_CANCELED))
     __syscall_do_cancel();
 
   INTERNAL_SYSCALL_DECL(err);
diff --git a/sysdeps/x86_64/nptl/tcb-offsets.sym b/sysdeps/x86_64/nptl/tcb-offsets.sym
index 37069dd..8710fd9 100644
--- a/sysdeps/x86_64/nptl/tcb-offsets.sym
+++ b/sysdeps/x86_64/nptl/tcb-offsets.sym
@@ -17,7 +17,4 @@ PRIVATE_FUTEX		offsetof (tcbhead_t, private_futex)
 #endif
 
 -- Not strictly offsets, but these values are also used in the TCB.
-TCB_CANCELED_BITMASK	 CANCELED_BITMASK
-TCB_EXITING_BITMASK	 EXITING_BITMASK
-TCB_TERMINATED_BITMASK	 TERMINATED_BITMASK
 TCB_PTHREAD_CANCELED	 PTHREAD_CANCELED

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=fa0b3064c46fb717db37590223b98e8d0cccd9a6

commit fa0b3064c46fb717db37590223b98e8d0cccd9a6
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Tue Jul 18 16:33:41 2017 -0300

    nptl: Remove setxid bit from cancelhandling
    
    This patch removes the setxid signaling out of cancelhandling flag
    and use its own member of pthread instead.  The idea is 1 simplify
    setxid handling to now mix atomic operation with thread cancellation
    since both are orthogonal, 2. isolate cancelhandling variable to only
    track thread cancel state.
    
    It also uses glibc C11 atomic operation while accessing new setxid_op
    field.
    
    Checked on i686-linux-gnu, x86_64-linux-gnu, x86_64-linux-gnux32,
    aarch64-linux-gnu, arm-linux-gnueabihf, and powerpc64le-linux-gnu.
    
     	* nptl/allocatestack.c (get_cached_stack): Set sexid_op on new
     	allocated stack.
    	(setxid_mark_thread): Use setxid_op to mark thread as executing
    	a sexid operation insteaf of using cancelhandling.  Also adapt
     	algorithm to use C11 atomic analogous functions.
     	(setxid_unmark_thread): Likewise.
     	(setxid_signal_thread): Likewise.
     	* nptl/nptl-init.c (sighandler_setxid): Likewise.
     	* nptl/pthread_create.c (sighandler_setxid): Likewise.
     	* nptl/descr.h (SETXID_BIT): Remove define.
     	(SETXID_BITMASK): Likewise.
     	(setxid_op): New member.

diff --git a/manual/users.texi b/manual/users.texi
index 8690b65..eb321ec 100644
--- a/manual/users.texi
+++ b/manual/users.texi
@@ -297,7 +297,7 @@ include the header files @file{sys/types.h} and @file{unistd.h}.
 @c    nptl to propagate the syscall to all cloned processes used to
 @c    implement threads.
 @c   nptl_setxid @asulock @aculock
-@c     while holding the stack_alloc_lock, mark with SETXID_BITMASK all
+@c     while holding the stack_alloc_lock, mark setxid_op for all
 @c     threads that are not exiting, signal them until no thread remains
 @c     marked, clear the marks and run the syscall, then release the lock.
 @c    lll_lock @asulock @aculock
@@ -305,13 +305,12 @@ include the header files @file{sys/types.h} and @file{unistd.h}.
 @c    list_entry ok
 @c    setxid_mark_thread ok
 @c      if a thread is initializing, wait for it to be cloned.
-@c      mark it with SETXID_BITMASK if it's not exiting
+@c      mark setxid_opt if it's not exiting
 @c    setxid_signal_thread ok
-@c      if a thread is marked with SETXID_BITMASK,
+@c      if a setxid_opt is marked,
 @c        send it the SIGSETXID signal
 @c    setxid_unmark_thread ok
-@c      clear SETXID_BITMASK and release the futex if SETXID_BITMASK is
-@c      set.
+@c      unmark setxid_op release the futex if setxid_op is marked.
 @c    <syscall> ok
 @c    lll_unlock @aculock
 @c
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index b6e6bcf..ca617ee 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -239,6 +239,8 @@ get_cached_stack (size_t *sizep, void **memp)
   /* No pending event.  */
   result->nextevent = NULL;
 
+  result->setxid_op = 0;
+
   /* Clear the DTV.  */
   dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
   for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
@@ -1018,8 +1020,6 @@ __find_thread_by_id (pid_t tid)
 static void
 setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
 {
-  int ch;
-
   /* Wait until this thread is cloned.  */
   if (t->setxid_futex == -1
       && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
@@ -1030,16 +1030,16 @@ setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
   /* Don't let the thread exit before the setxid handler runs.  */
   t->setxid_futex = 0;
 
+  int s = atomic_load_relaxed (&t->setxid_op);
   do
     {
-      ch = t->cancelhandling;
-
       /* If the thread is exiting right now, ignore it.  */
+      int ch = atomic_load_relaxed (&t->cancelhandling);
       if ((ch & EXITING_BITMASK) != 0)
 	{
 	  /* Release the futex if there is no other setxid in
 	     progress.  */
-	  if ((ch & SETXID_BITMASK) == 0)
+	  if (s == 0)
 	    {
 	      t->setxid_futex = 1;
 	      futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
@@ -1047,24 +1047,20 @@ setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
 	  return;
 	}
     }
-  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
-					       ch | SETXID_BITMASK, ch));
+  while (!atomic_compare_exchange_weak_acquire (&t->setxid_op, &s, 1));
 }
 
 
 static void
 setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
 {
-  int ch;
-
+  int s = atomic_load_relaxed (&t->setxid_op);
   do
     {
-      ch = t->cancelhandling;
-      if ((ch & SETXID_BITMASK) == 0)
+      if (s == 0)
 	return;
     }
-  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
-					       ch & ~SETXID_BITMASK, ch));
+  while (!atomic_compare_exchange_weak_acquire (&t->setxid_op, &s, 0));
 
   /* Release the futex just in case.  */
   t->setxid_futex = 1;
@@ -1075,7 +1071,8 @@ setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
 static int
 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
 {
-  if ((t->cancelhandling & SETXID_BITMASK) == 0)
+  int stxid = atomic_load_relaxed (&t->setxid_op);
+  if (stxid == 0)
     return 0;
 
   int val;
diff --git a/nptl/descr.h b/nptl/descr.h
index 16466a6..d77d10b 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -281,9 +281,6 @@ struct pthread
   /* Bit set if thread terminated and TCB is freed.  */
 #define TERMINATED_BIT		4
 #define TERMINATED_BITMASK	(0x01 << TERMINATED_BIT)
-  /* Bit set if thread is supposed to change XID.  */
-#define SETXID_BIT		5
-#define SETXID_BITMASK		(0x01 << SETXID_BIT)
 
   /* Flag to indicate thread cancel disable state (PTHREAD_CANCEL_ENABLE or
      PTHREAD_CANCEL_DISABLE).  */
@@ -330,6 +327,9 @@ struct pthread
   /* Lock to synchronize access to the descriptor.  */
   int lock;
 
+  /* Set if thread is supposed to change XID.  */
+  int setxid_op;
+
   /* Lock for synchronizing setxid calls.  */
   unsigned int setxid_futex;
 
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index c524f09..3a3e23d 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -271,14 +271,12 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
 
   /* Reset the SETXID flag.  */
   struct pthread *self = THREAD_SELF;
-  int flags, newval;
+  int setxid;
   do
     {
-      flags = THREAD_GETMEM (self, cancelhandling);
-      newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-					  flags & ~SETXID_BITMASK, flags);
+      setxid = atomic_load_relaxed (&self->setxid_op);
     }
-  while (flags != newval);
+  while (!atomic_compare_exchange_weak_acquire (&self->setxid_op, &setxid, 0));
 
   /* And release the futex.  */
   self->setxid_futex = 1;
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 64526e2..0691b6e 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -557,20 +557,24 @@ START_THREAD_DEFN
   if (IS_DETACHED (pd))
     /* Free the TCB.  */
     __free_tcb (pd);
-  else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
+  else
     {
       /* Some other thread might call any of the setXid functions and expect
 	 us to reply.  In this case wait until we did that.  */
-      do
+      int s = atomic_load_relaxed (&pd->setxid_op);
+      if (__glibc_unlikely (s == 1))
+	{
 	/* XXX This differs from the typical futex_wait_simple pattern in that
 	   the futex_wait condition (setxid_futex) is different from the
 	   condition used in the surrounding loop (cancelhandling).  We need
 	   to check and document why this is correct.  */
-	futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
-      while (pd->cancelhandling & SETXID_BITMASK);
+	  do
+	    futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
+	  while (atomic_compare_exchange_weak_acquire (&pd->setxid_op, &s, 1));
 
-      /* Reset the value so that the stack can be reused.  */
-      pd->setxid_futex = 0;
+	  /* Reset the value so that the stack can be reused.  */
+	  pd->setxid_futex = 0;
+	}
     }
 
   /* We cannot call '_exit' here.  '_exit' will terminate the process.

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=48f68d8181a9a09205ae4a83de7f7e3ad0db4ffe

commit 48f68d8181a9a09205ae4a83de7f7e3ad0db4ffe
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Fri Jan 20 17:24:35 2017 -0200

    nptl: Move cancel state and type out cancelhandling
    
    This patch move both the cancel state (PTHREAD_CANCEL_ENABLE and
    PTHREAD_CANCEL_DISABLE) and cancel type (PTHREAD_CANCEL_DEFERRED and
    PTHREAD_CANCEL_ASYNCHRONOUS) out of cancelhandling member.  The idea
    is to avoid atomic handling of cancelhandling where these states
    are concerned since with exclusive member they are not concurrent
    accessed:
    
      * pthread_setcancel{type,state} and internal functions
        __pthread_{un}register_cancel_defer (and its analogous compat
        versions) only change the type/state of the calling thread.
      * __syscall_cancel also only requires to check the state of
        calling thread.
      * sigcancel_handler is executed only for the thread about to be
        potentially canceled.
      * pthread_join deadlock checks only requires access the state for
        the calling thread.
      * Same behavior for pthread_testcancel.
    
    With this change the cancelhandling member from pthread struct is
    used solely for cancelation handling and setxid signaling.
    
    Checked on i686-linux-gnu, x86_64-linux-gnu, x86_64-linux-gnux32,
    aarch64-linux-gnu, arm-linux-gnueabihf, and powerpc64le-linux-gnu.
    
    	* nptl/allocatestack.c (get_cached_stack): Set both cancelstate
    	and canceltype on new stack allocation.
    	* nptl/cleanup_defer.c (__pthread_register_cancel_defer): Set
    	canceltype directly instead of using cancelhandling member.
    	(__pthread_unregister_cancel_restore): Likewise.
    	* nptl/cleanup_defer_compat.c (_pthread_cleanup_push_defer):
    	Likewise.
    	(_pthread_cleanup_pop_restore): Likewise.
    	* nptl/descr.h (CANCELSTATE_BIT): Remove flag.
    	(CANCELSTATE_BITMASK): Likewise.
    	(CANCELTYPE_BIT): Likewise.
    	(CANCELTYPE_BITMSK): Likewise.
    	(CANCEL_RESTMASK): Likewise.
    	(CANCEL_ENABLED_AND_CANCELED): Likewise.
    	(CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS): Likewise.
    	(cancelstate): New member.
    	(canceltype): Likewise.
    	* nptl/libc-cancellation.c (__syscall_cancel): Use cancelstate
    	instead of cancelhandling member.
    	* nptl/nptl-init.c (sigcancel_handler): Likewise.
    	* nptl/pthreadP.h (__do_cancel): Likewise.
    	(CANCELLATION_P): Likewise.
    	* nptl/pthread_join.c (pthread_join): Remove CANCELLATION_P
    	usage.
    	* nptl/pthread_testcancel.c (__pthread_testcancel): Likewise.
    	* nptl/pthread_setcancelstate.c (__pthread_setcancelstate):
    	Use cancelstate member instead of cancelhandling.
    	* nptl/pthread_setcanceltype.c (__pthread_setcanceltype): Use
    	canceltype member instead of canceltype.
    	* sysdeps/x86_64/nptl/tcb-offsets.sym (TCB_CANCELSTATE_BITMASK):
    	Remove.
    	(TCB_CANCELTYPE_BITMASK): Likewise.
    	(TCB_CANCEL_RETMASK): Likewise.

diff --git a/manual/pattern.texi b/manual/pattern.texi
index 39ae97a..7bdb367 100644
--- a/manual/pattern.texi
+++ b/manual/pattern.texi
@@ -1819,8 +1819,6 @@ the beginning of the vector.
 @c    pthread_setcancelstate @ascuplugin @ascuheap @acsmem
 @c      (disable cancellation around exec_comm; it may do_cancel the
 @c       second time, if async cancel is enabled)
-@c     THREAD_ATOMIC_CMPXCHG_VAL dup ok
-@c     CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS dup ok
 @c     do_cancel @ascuplugin @ascuheap @acsmem
 @c      THREAD_ATOMIC_BIT_SET dup ok
 @c      pthread_unwind @ascuplugin @ascuheap @acsmem
diff --git a/manual/process.texi b/manual/process.texi
index b82b91f..c4087fb 100644
--- a/manual/process.texi
+++ b/manual/process.texi
@@ -84,12 +84,7 @@ until the subprogram terminates before you can do anything else.
 @c    libc_cleanup_region_end ok
 @c     pthread_cleanup_pop_restore ok
 @c  SINGLE_THREAD_P ok
-@c  LIBC_CANCEL_ASYNC @ascuplugin @ascuheap @acsmem
-@c   libc_enable_asynccancel @ascuplugin @ascuheap @acsmem
-@c    CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS dup ok
 @c    do_cancel dup @ascuplugin @ascuheap @acsmem
-@c  LIBC_CANCEL_RESET ok
-@c   libc_disable_asynccancel ok
 @c    lll_futex_wait dup ok
 This function executes @var{command} as a shell command.  In @theglibc{},
 it always uses the default shell @code{sh} to run the command.
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 1cc7893..b6e6bcf 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -232,6 +232,8 @@ get_cached_stack (size_t *sizep, void **memp)
 
   /* Cancellation handling is back to the default.  */
   result->cancelhandling = 0;
+  result->cancelstate = PTHREAD_CANCEL_ENABLE; 
+  result->canceltype = PTHREAD_CANCEL_DEFERRED;
   result->cleanup = NULL;
 
   /* No pending event.  */
diff --git a/nptl/cleanup_defer.c b/nptl/cleanup_defer.c
index 7e1942d..0eb6332 100644
--- a/nptl/cleanup_defer.c
+++ b/nptl/cleanup_defer.c
@@ -31,27 +31,9 @@ __pthread_register_cancel_defer (__pthread_unwind_buf_t *buf)
   ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
   ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);
 
-  int cancelhandling = THREAD_GETMEM (self, cancelhandling);
-
-  /* Disable asynchronous cancellation for now.  */
-  if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
-    while (1)
-      {
-	int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-						cancelhandling
-						& ~CANCELTYPE_BITMASK,
-						cancelhandling);
-	if (__glibc_likely (curval == cancelhandling))
-	  /* Successfully replaced the value.  */
-	  break;
-
-	/* Prepare for the next round.  */
-	cancelhandling = curval;
-      }
-
-  ibuf->priv.data.canceltype = (cancelhandling & CANCELTYPE_BITMASK
-				? PTHREAD_CANCEL_ASYNCHRONOUS
-				: PTHREAD_CANCEL_DEFERRED);
+  volatile struct pthread *pd = (volatile struct pthread *) self;
+  pd->canceltype = PTHREAD_CANCEL_DEFERRED;
+  ibuf->priv.data.canceltype = pd->canceltype;
 
   /* Store the new cleanup handler info.  */
   THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
@@ -67,25 +49,11 @@ __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf)
 
   THREAD_SETMEM (self, cleanup_jmp_buf, ibuf->priv.data.prev);
 
-  int cancelhandling;
+  volatile struct pthread *pd = (volatile struct pthread *) self;
   if (ibuf->priv.data.canceltype != PTHREAD_CANCEL_DEFERRED
-      && ((cancelhandling = THREAD_GETMEM (self, cancelhandling))
-	  & CANCELTYPE_BITMASK) == 0)
+      && pd->canceltype == PTHREAD_CANCEL_DEFERRED)
     {
-      while (1)
-	{
-	  int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-						  cancelhandling
-						  | CANCELTYPE_BITMASK,
-						  cancelhandling);
-	  if (__glibc_likely (curval == cancelhandling))
-	    /* Successfully replaced the value.  */
-	    break;
-
-	  /* Prepare for the next round.  */
-	  cancelhandling = curval;
-	}
-
-      CANCELLATION_P (self);
+      pd->canceltype = PTHREAD_CANCEL_ASYNCHRONOUS;
+      __pthread_testcancel ();
     }
 }
diff --git a/nptl/cleanup_defer_compat.c b/nptl/cleanup_defer_compat.c
index 2705b46..7644977 100644
--- a/nptl/cleanup_defer_compat.c
+++ b/nptl/cleanup_defer_compat.c
@@ -29,27 +29,9 @@ _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
   buffer->__arg = arg;
   buffer->__prev = THREAD_GETMEM (self, cleanup);
 
-  int cancelhandling = THREAD_GETMEM (self, cancelhandling);
-
-  /* Disable asynchronous cancellation for now.  */
-  if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
-    while (1)
-      {
-	int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-						cancelhandling
-						& ~CANCELTYPE_BITMASK,
-						cancelhandling);
-	if (__glibc_likely (curval == cancelhandling))
-	  /* Successfully replaced the value.  */
-	  break;
-
-	/* Prepare for the next round.  */
-	cancelhandling = curval;
-      }
-
-  buffer->__canceltype = (cancelhandling & CANCELTYPE_BITMASK
-			  ? PTHREAD_CANCEL_ASYNCHRONOUS
-			  : PTHREAD_CANCEL_DEFERRED);
+  volatile struct pthread *pd = (volatile struct pthread *) self;
+  pd->canceltype = PTHREAD_CANCEL_DEFERRED;
+  buffer->__canceltype = pd->canceltype;
 
   THREAD_SETMEM (self, cleanup, buffer);
 }
@@ -64,26 +46,12 @@ _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
 
   THREAD_SETMEM (self, cleanup, buffer->__prev);
 
-  int cancelhandling;
-  if (__builtin_expect (buffer->__canceltype != PTHREAD_CANCEL_DEFERRED, 0)
-      && ((cancelhandling = THREAD_GETMEM (self, cancelhandling))
-	  & CANCELTYPE_BITMASK) == 0)
+  volatile struct pthread *pd = (volatile struct pthread *) self;
+  if (buffer->__canceltype != PTHREAD_CANCEL_DEFERRED
+      && pd->canceltype == PTHREAD_CANCEL_DEFERRED)
     {
-      while (1)
-	{
-	  int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-						  cancelhandling
-						  | CANCELTYPE_BITMASK,
-						  cancelhandling);
-	  if (__glibc_likely (curval == cancelhandling))
-	    /* Successfully replaced the value.  */
-	    break;
-
-	  /* Prepare for the next round.  */
-	  cancelhandling = curval;
-	}
-
-      CANCELLATION_P (self);
+      pd->canceltype = PTHREAD_CANCEL_ASYNCHRONOUS;
+      __pthread_testcancel ();
     }
 
   /* If necessary call the cleanup routine after we removed the
diff --git a/nptl/descr.h b/nptl/descr.h
index c3e1cc0..16466a6 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -272,13 +272,7 @@ struct pthread
 
   /* Flags determining processing of cancellation.  */
   int cancelhandling;
-  /* Bit set if cancellation is disabled.  */
-#define CANCELSTATE_BIT		0
-#define CANCELSTATE_BITMASK	(0x01 << CANCELSTATE_BIT)
-  /* Bit set if asynchronous cancellation mode is selected.  */
-#define CANCELTYPE_BIT		1
-#define CANCELTYPE_BITMASK	(0x01 << CANCELTYPE_BIT)
-  /* Bit set if threads is canceled.  */
+
 #define CANCELED_BIT		2
 #define CANCELED_BITMASK	(0x01 << CANCELED_BIT)
   /* Bit set if thread is exiting.  */
@@ -290,16 +284,13 @@ struct pthread
   /* Bit set if thread is supposed to change XID.  */
 #define SETXID_BIT		5
 #define SETXID_BITMASK		(0x01 << SETXID_BIT)
-  /* Mask for the rest.  Helps the compiler to optimize.  */
-#define CANCEL_RESTMASK		0xffffffc0
-
-#define CANCEL_ENABLED_AND_CANCELED(value) \
-  (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK	      \
-	       | CANCEL_RESTMASK | TERMINATED_BITMASK)) == CANCELED_BITMASK)
-#define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \
-  (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK    \
-	       | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK))     \
-   == (CANCELTYPE_BITMASK | CANCELED_BITMASK))
+
+  /* Flag to indicate thread cancel disable state (PTHREAD_CANCEL_ENABLE or
+     PTHREAD_CANCEL_DISABLE).  */
+  int cancelstate;
+  /* Flag to indicate thread cancel type (PTHREAD_CANCEL_DEFERRED or
+     PTHREAD_CANCEL_ASYNCHRONOUS).  */
+  int canceltype;
 
   /* Flags.  Including those copied from the thread attribute.  */
   int flags;
diff --git a/nptl/libc-cancellation.c b/nptl/libc-cancellation.c
index a7bbd7f..335acab 100644
--- a/nptl/libc-cancellation.c
+++ b/nptl/libc-cancellation.c
@@ -32,7 +32,7 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
   long int result;
 
   /* If cancellation is not enabled, call the syscall directly.  */
-  if (pd->cancelhandling & CANCELSTATE_BITMASK)
+  if (pd->cancelstate == PTHREAD_CANCEL_DISABLE)
     {
       INTERNAL_SYSCALL_DECL (err);
       result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6
@@ -49,7 +49,7 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
 
   if ((result == -EINTR)
       && (pd->cancelhandling & CANCELED_BITMASK)
-      && !(pd->cancelhandling & CANCELSTATE_BITMASK))
+      && (pd->cancelstate != PTHREAD_CANCEL_DISABLE))
     __syscall_do_cancel ();
 
   return result;
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 33ee7c4..c524f09 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -208,7 +208,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
   struct pthread *self = THREAD_SELF;
   volatile struct pthread *pd = (volatile struct pthread *) self;
 
-  if (((pd->cancelhandling & (CANCELSTATE_BITMASK)) != 0)
+  if ((pd->cancelstate == PTHREAD_CANCEL_DISABLE)
       || ((pd->cancelhandling & CANCELED_BITMASK) == 0))
     return;
 
@@ -224,7 +224,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
      '__syscall_cancel_arch_end', thus disabling the cancellation and allowing
      the process to handle such conditions.  */
   uintptr_t pc = ucontext_get_pc (ctx);
-  if (pd->cancelhandling & CANCELTYPE_BITMASK
+  if (pd->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS
       || (pc >= (uintptr_t) __syscall_cancel_arch_start
           && pc < (uintptr_t) __syscall_cancel_arch_end))
     {
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 88eebca..6cf9c02 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -244,18 +244,6 @@ extern int __pthread_debug attribute_hidden;
 #endif
 
 
-/* Cancellation test.  */
-#define CANCELLATION_P(self) \
-  do {									      \
-    int cancelhandling = THREAD_GETMEM (self, cancelhandling);		      \
-    if (CANCEL_ENABLED_AND_CANCELED (cancelhandling))			      \
-      {									      \
-	THREAD_SETMEM (self, result, PTHREAD_CANCELED);			      \
-	__do_cancel ();							      \
-      }									      \
-  } while (0)
-
-
 extern void __pthread_unwind (__pthread_unwind_buf_t *__buf)
      __cleanup_fct_attribute __attribute ((__noreturn__))
 #if !defined SHARED && !IS_IN (libpthread)
@@ -291,22 +279,8 @@ __do_cancel (void)
 {
   struct pthread *self = THREAD_SELF;
 
-  /* Make sure we get no more cancellations by clearing the cancel
-     state.  */
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-  while (1)
-    {
-      int newval = (oldval | CANCELSTATE_BITMASK);
-      newval &= ~(CANCELTYPE_BITMASK);
-      if (oldval == newval)
-	break;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					  oldval);
-      if (__glibc_likely (curval == oldval))
-	break;
-      oldval = curval;
-    }
+  /* Make sure we get no more cancellations.  */
+  self->cancelstate = PTHREAD_CANCEL_DISABLE;
 
   THREAD_SETMEM (self, result, PTHREAD_CANCELED);
 
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
index 04d9b89..b97763b 100644
--- a/nptl/pthread_cancel.c
+++ b/nptl/pthread_cancel.c
@@ -50,7 +50,7 @@ __pthread_cancel (pthread_t th)
 
   /* Avoid signaling when thread attempts cancel itself (pthread_kill
      is expensive).  */
-  if (pd == THREAD_SELF && !(pd->cancelhandling & CANCELTYPE_BITMASK))
+  if (pd == THREAD_SELF && pd->canceltype == PTHREAD_CANCEL_DEFERRED)
     return 0;
 
   return __pthread_kill (th, SIGCANCEL);
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index 8255a7d..66263cd 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -69,7 +69,8 @@ __pthread_join (pthread_t threadid, void **thread_return)
 	   && (pd->cancelhandling
 	       & (CANCELED_BITMASK | EXITING_BITMASK
 		  | TERMINATED_BITMASK)) == 0))
-      && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
+      && !(self->cancelstate == PTHREAD_CANCEL_ENABLE
+           && self->cancelhandling & CANCELED_BITMASK))
     /* This is a deadlock situation.  The threads are waiting for each
        other to finish.  Note that this is a "may" error.  To be 100%
        sure we catch this error we would have to lock the data
diff --git a/nptl/pthread_setcancelstate.c b/nptl/pthread_setcancelstate.c
index 8a1cb9a..40cfe54 100644
--- a/nptl/pthread_setcancelstate.c
+++ b/nptl/pthread_setcancelstate.c
@@ -24,46 +24,13 @@
 int
 __pthread_setcancelstate (int state, int *oldstate)
 {
-  volatile struct pthread *self;
-
   if (state < PTHREAD_CANCEL_ENABLE || state > PTHREAD_CANCEL_DISABLE)
     return EINVAL;
 
-  self = THREAD_SELF;
-
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-  while (1)
-    {
-      int newval = (state == PTHREAD_CANCEL_DISABLE
-		    ? oldval | CANCELSTATE_BITMASK
-		    : oldval & ~CANCELSTATE_BITMASK);
-
-      /* Store the old value.  */
-      if (oldstate != NULL)
-	*oldstate = ((oldval & CANCELSTATE_BITMASK)
-		     ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE);
-
-      /* Avoid doing unnecessary work.  The atomic operation can
-	 potentially be expensive if the memory has to be locked and
-	 remote cache lines have to be invalidated.  */
-      if (oldval == newval)
-	break;
-
-      /* Update the cancel handling word.  This has to be done
-	 atomically since other bits could be modified as well.  */
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					      oldval);
-      if (__glibc_likely (curval == oldval))
-	{
-	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
-	    __do_cancel ();
-
-	  break;
-	}
-
-      /* Prepare for the next round.  */
-      oldval = curval;
-    }
+  volatile struct pthread *self = THREAD_SELF;
+  if (oldstate)
+    *oldstate = self->cancelstate;
+  self->cancelstate = state;
 
   return 0;
 }
diff --git a/nptl/pthread_setcanceltype.c b/nptl/pthread_setcanceltype.c
index dd1f374..4273359 100644
--- a/nptl/pthread_setcanceltype.c
+++ b/nptl/pthread_setcanceltype.c
@@ -33,43 +33,18 @@ __pthread_setcanceltype (int type, int *oldtype)
 #endif
 
   volatile struct pthread *self = THREAD_SELF;
-
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-  while (1)
-    {
-      int newval = (type == PTHREAD_CANCEL_ASYNCHRONOUS
-		    ? oldval | CANCELTYPE_BITMASK
-		    : oldval & ~CANCELTYPE_BITMASK);
-
-      /* Store the old value.  */
-      if (oldtype != NULL)
-	*oldtype = ((oldval & CANCELTYPE_BITMASK)
-		    ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED);
-
-      /* Avoid doing unnecessary work.  The atomic operation can
-	 potentially be expensive if the memory has to be locked and
-	 remote cache lines have to be invalidated.  */
-      if (oldval == newval)
-	break;
-
-      /* Update the cancel handling word.  This has to be done
-	 atomically since other bits could be modified as well.  */
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					      oldval);
-      if (__glibc_likely (curval == oldval))
-	{
-	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
-	    {
-	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-	      __do_cancel ();
-	    }
-
-	  break;
-	}
-
-      /* Prepare for the next round.  */
-      oldval = curval;
-    }
+  if (oldtype)
+    *oldtype = self->canceltype;
+  self->canceltype = type;
+
+  /* Although pthread_setcanceltype is not a cancellation point,
+     there is a small race window where cancellation arrives just
+     before async cancellation is enabled and without a explicit
+     test the cancellation might not act upon.  Also, an application
+     can not work around by checking with pthread_testcase because
+     only pthread_setcanceltype is async-safe.  */
+  if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
+    __pthread_testcancel ();
 
   return 0;
 }
diff --git a/nptl/pthread_testcancel.c b/nptl/pthread_testcancel.c
index 38b343f..8a65b94 100644
--- a/nptl/pthread_testcancel.c
+++ b/nptl/pthread_testcancel.c
@@ -23,7 +23,11 @@
 void
 __pthread_testcancel (void)
 {
-  CANCELLATION_P (THREAD_SELF);
+  pthread_t self = (pthread_t) THREAD_SELF;
+  volatile struct pthread *pd = (volatile struct pthread *) self;
+
+  if (pd->cancelhandling && pd->cancelstate == PTHREAD_CANCEL_ENABLE)
+    __do_cancel ();
 }
 strong_alias (__pthread_testcancel, pthread_testcancel)
 hidden_def (__pthread_testcancel)
diff --git a/sysdeps/x86_64/nptl/tcb-offsets.sym b/sysdeps/x86_64/nptl/tcb-offsets.sym
index b225e5b..37069dd 100644
--- a/sysdeps/x86_64/nptl/tcb-offsets.sym
+++ b/sysdeps/x86_64/nptl/tcb-offsets.sym
@@ -17,10 +17,7 @@ PRIVATE_FUTEX		offsetof (tcbhead_t, private_futex)
 #endif
 
 -- Not strictly offsets, but these values are also used in the TCB.
-TCB_CANCELSTATE_BITMASK	 CANCELSTATE_BITMASK
-TCB_CANCELTYPE_BITMASK	 CANCELTYPE_BITMASK
 TCB_CANCELED_BITMASK	 CANCELED_BITMASK
 TCB_EXITING_BITMASK	 EXITING_BITMASK
-TCB_CANCEL_RESTMASK	 CANCEL_RESTMASK
 TCB_TERMINATED_BITMASK	 TERMINATED_BITMASK
 TCB_PTHREAD_CANCELED	 PTHREAD_CANCELED

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=578c2706e6adb34ca35abecad59f33d27aca32fc

commit 578c2706e6adb34ca35abecad59f33d27aca32fc
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Fri Feb 3 21:07:07 2017 -0200

    nptl: hppa: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the hppa modifications required for the BZ#12683.
    It basically adds the required __pthread_get_pc function.
    
    HPPA requires an arch-specific syscall_cancel because the
    INTERNAL_SYSCALL_NCS adds some instruction to fetch the returned
    syscalls value.  The implementation were based on on default C
    version built with GCC 6.1
    
    Checked on hppa-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/hppa/sigcontextinfo.h: New file.
    	* sysdeps/unix/sysv/linux/hppa/syscall_cancel.S: Likewise.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 9376317..71f0882 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/hppa/sigcontextinfo.h: New file.
+	* sysdeps/unix/sysv/linux/hppa/syscall_cancel.S: Likewise.
+
 	* nptl/libc-cancellation.c (__syscall_cancel): Define and use 7
 	argument syscall if architecture requires it.
 	* nptl/pthreadP.h (__syscall_cancel_arch): Likewise.
diff --git a/sysdeps/unix/sysv/linux/hppa/sigcontextinfo.h b/sysdeps/unix/sysv/linux/hppa/sigcontextinfo.h
new file mode 100644
index 0000000..4f59c1e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/hppa/sigcontextinfo.h
@@ -0,0 +1,38 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
+/* In general we cannot provide any information.  */
+#define SIGCONTEXT struct sigcontext *
+#define SIGCONTEXT_EXTRA_ARGS
+#define GET_PC(ctx)	((void *) 0)
+#define GET_FRAME(ctx)	((void *) 0)
+#define GET_STACK(ctx)	((void *) 0)
+#define CALL_SIGHANDLER(handler, signo, ctx) \
+  (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.sc_iaoq[0] & ~0x3;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/hppa/syscall_cancel.S b/sysdeps/unix/sysv/linux/hppa/syscall_cancel.S
new file mode 100644
index 0000000..552ba2f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/hppa/syscall_cancel.S
@@ -0,0 +1,82 @@
+/* Cancellable syscall wrapper.  Linux/hppa version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+				   long int nr,
+				   long int arg1,
+				   long int arg2,
+				   long int arg3,
+				   long int arg4,
+				   long int arg5,
+				   long int arg6)  */
+
+	.text
+ENTRY(__syscall_cancel_arch)
+	stw	%r2,-20(%r30)
+	ldo	128(%r30),%r30
+	cfi_def_cfa_offset (-128)
+	cfi_offset (2, -20)
+	ldw	-180(%r30),%r28
+	copy	%r24,%r31
+	stw	%r28,-104(%r30)
+	ldw	-184(%r30),%r28
+	stw	%r28,-108(%r30)
+	ldw	-188(%r30),%r28
+	stw	%r28,-112(%r30)
+	ldw	-192(%r30),%r28
+	stw	%r4,-100(%r30)
+	stw	%r28,-116(%r30)
+	copy	%r25,%r28
+	copy	%r23,%r25
+	stw	%r19,-32(%r30)
+	cfi_offset (4, 28)
+
+	.global __syscall_cancel_arch_start
+.type __syscall_cancel_arch_start,@function
+__syscall_cancel_arch_start:
+
+	ldw	0(%r26),%r20
+	stw	%r20,-120(%r30)
+	ldw	-120(%r30),%r20
+	bb,<	%r20,29,1f
+	ldw	-116(%r30),%r21
+	ldw	-112(%r30),%r22
+	copy	%r31,%r26
+	ldw	-108(%r30),%r23
+	ldw	-104(%r30),%r24
+	copy	%r19, %r4
+	ble	0x100(%sr2, %r0)
+
+	.global __syscall_cancel_arch_end
+.type __syscall_cancel_arch_end,@function	
+__syscall_cancel_arch_end:
+
+	copy	%r28, %r20
+	copy	%r4, %r19
+	ldw	-148(%r30),%r2
+	ldw	-100(%r30),%r4
+	bv	%r0(%r2)
+	ldo	-128(%r30),%r30
+1:
+	bl __syscall_do_cancel,%r2
+	nop
+	nop
+END(__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=4817f7a34850944bbfa63079321a2d3a07a533dd

commit 4817f7a34850944bbfa63079321a2d3a07a533dd
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:03:07 2017 -0200

    nptl: mips: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the mips modifications required for the BZ#12683.
    It adds the required ucontext_get_pc function, a mips32 cancellable
    syscall wrapper and 7 argument cancellable syscall support.
    
    To avoid code pessimization and add a requirement on all architectures
    to support {INLINE,INTERNAL)_SYSCALL with 7 argument, its support is
    added through a flag, HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS, which changes
    the signature and prototype of the requires macros and functions
    (SYSCALL_CANCEL, __syscall_cancel and __syscall_cancel_arch). As
    default 6 arguments cancellable syscalls are use.
    
    MIPS o32 requires an arch-specific implementation because
    INTERNAL_SYSCALL_NCS adds an 'addiu' just after the syscall
    instruction which invalidates the checks on sigcancel_handler.
    
    Checked against a build and make check run-built-tests=no for
    mips-gnu-linux, mips64-linux-gnu, mips64-n32-linux-gnu.  I also
    ran some basic o32 and n64 cancellation tests on a simulated
    mips64 qemu system.
    
    	* nptl/libc-cancellation.c (__syscall_cancel): Define and use 7
    	argument syscall if architecture requires it.
    	* nptl/pthreadP.h (__syscall_cancel_arch): Likewise.
    	* sysdeps/unix/sysdep.h (__syscall_cancel, __SYSCALL_CANCEL*): Define
    	with 7 argument if architecture requires it.
    	(__SYSCALL_CANCEL7_ARG_DEF): New macro.
    	(__SYSCALL_CANCEL7_ARG): Likewise.
    	(__SYSCALL_CANCEL7_ARG7): Likewise.
    	* sysdeps/unix/sysv/linux/syscall_cancel.c (__syscall_cancel_arch):
    	Likewise.
    	* sysdeps/mips/nptl/tls.h (READ_THREAD_POINTER): Check __mips_isa_rev
    	existance for macro definition.
    	* sysdeps/unix/sysv/linux/mips/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    	* sysdeps/unix/sysv/linux/mips/mips32/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
    	(HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS): Define.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 47537de..9376317 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,23 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* nptl/libc-cancellation.c (__syscall_cancel): Define and use 7
+	argument syscall if architecture requires it.
+	* nptl/pthreadP.h (__syscall_cancel_arch): Likewise.
+	* sysdeps/unix/sysdep.h (__syscall_cancel, __SYSCALL_CANCEL*): Define
+	with 7 argument if architecture requires it.
+	(__SYSCALL_CANCEL7_ARG_DEF): New macro.
+	(__SYSCALL_CANCEL7_ARG): Likewise.
+	(__SYSCALL_CANCEL7_ARG7): Likewise.
+	* sysdeps/unix/sysv/linux/syscall_cancel.c (__syscall_cancel_arch):
+	Likewise.
+	* sysdeps/mips/nptl/tls.h (READ_THREAD_POINTER): Check __mips_isa_rev
+	existance for macro definition.
+	* sysdeps/unix/sysv/linux/mips/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+	* sysdeps/unix/sysv/linux/mips/mips32/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
+	(HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS): Define.
+
 	* sysdeps/sh/sysdep.h (L): New macro.
 	* sysdeps/unix/sysv/linux/sh/syscall_cancel.S: New file.
 	* sysdeps/unix/sysv/linux/sh/sigcontextinfo.h (ucontext_get_pc):
diff --git a/nptl/libc-cancellation.c b/nptl/libc-cancellation.c
index b013435..a7bbd7f 100644
--- a/nptl/libc-cancellation.c
+++ b/nptl/libc-cancellation.c
@@ -25,7 +25,7 @@ long int
 __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
 		  __syscall_arg_t a2, __syscall_arg_t a3,
 		  __syscall_arg_t a4, __syscall_arg_t a5,
-		  __syscall_arg_t a6)
+		  __syscall_arg_t a6 __SYSCALL_CANCEL7_ARG_DEF)
 {
   pthread_t self = (pthread_t) THREAD_SELF;
   volatile struct pthread *pd = (volatile struct pthread *) self;
@@ -35,7 +35,8 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
   if (pd->cancelhandling & CANCELSTATE_BITMASK)
     {
       INTERNAL_SYSCALL_DECL (err);
-      result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6);
+      result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6
+					  __SYSCALL_CANCEL7_ARG7);
       if (INTERNAL_SYSCALL_ERROR_P (result, err))
 	return -INTERNAL_SYSCALL_ERRNO (result, err);
       return result;
@@ -44,7 +45,7 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
   /* Call the arch-specific entry points that contains the globals markers
      to be checked by SIGCANCEL handler.  */
   result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
-			          a6);
+			          a6 __SYSCALL_CANCEL7_ARG7);
 
   if ((result == -EINTR)
       && (pd->cancelhandling & CANCELED_BITMASK)
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 35d99f6..88eebca 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -317,7 +317,8 @@ __do_cancel (void)
 
 extern long int __syscall_cancel_arch (volatile int *, __syscall_arg_t nr,
      __syscall_arg_t arg1, __syscall_arg_t arg2, __syscall_arg_t arg3,
-     __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6);
+     __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6
+     __SYSCALL_CANCEL7_ARG_DEF);
 libc_hidden_proto (__syscall_cancel_arch);
 
 extern void __syscall_do_cancel (void)
diff --git a/sysdeps/mips/nptl/tls.h b/sysdeps/mips/nptl/tls.h
index 913f7d3..0d32424 100644
--- a/sysdeps/mips/nptl/tls.h
+++ b/sysdeps/mips/nptl/tls.h
@@ -35,7 +35,7 @@
 # define READ_THREAD_POINTER() (__builtin_thread_pointer ())
 #else
 /* Note: rd must be $v1 to be ABI-conformant.  */
-# if __mips_isa_rev >= 2
+# if defined __mips_isa_rev && __mips_isa_rev >= 2
 #  define READ_THREAD_POINTER() \
      ({ void *__result;							      \
         asm volatile ("rdhwr\t%0, $29" : "=v" (__result));	      	      \
diff --git a/sysdeps/unix/sysdep.h b/sysdeps/unix/sysdep.h
index 10669ae..c90898d 100644
--- a/sysdeps/unix/sysdep.h
+++ b/sysdeps/unix/sysdep.h
@@ -121,29 +121,51 @@ typedef long int __syscall_arg_t;
 # define __SSC(__x) ((__syscall_arg_t) (__x))
 #endif
 
+/* Adjust both the __syscall_cancel and the SYSCALL_CANCEL macro to support
+   7 arguments instead of default 6 (for some architectures like mip32).
+   We need it because using 7 arguments for all architecture would require
+   then to implement both {INTERNAL,INLINE}_SYSCALL and __syscall_cancel_arch
+   to accept 7 arguments.  */
+#ifdef HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS
+# define __SYSCALL_CANCEL7_ARG_DEF 	, __syscall_arg_t arg7
+# define __SYSCALL_CANCEL7_ARG		, 0
+# define __SYSCALL_CANCEL7_ARG7		, arg7
+#else
+# define __SYSCALL_CANCEL7_ARG_DEF
+# define __SYSCALL_CANCEL7_ARG
+# define __SYSCALL_CANCEL7_ARG7
+#endif
+
 long int __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
 			   __syscall_arg_t arg2, __syscall_arg_t arg3,
 			   __syscall_arg_t arg4, __syscall_arg_t arg5,
-			   __syscall_arg_t arg6);
+			   __syscall_arg_t arg6 __SYSCALL_CANCEL7_ARG_DEF);
 libc_hidden_proto (__syscall_cancel);
 
 #define __SYSCALL_CANCEL0(name) \
-  (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0)
+  (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0 \
+		     __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL1(name, a1) \
-  (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0)
+  (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0 \
+		     __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL2(name, a1, a2) \
-  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0)
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0 \
+		     __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL3(name, a1, a2, a3) \
-  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0)
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0 \
+		     __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL4(name, a1, a2, a3, a4) \
   (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
-		     __SSC(a4), 0, 0)
+		     __SSC(a4), 0, 0 __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL5(name, a1, a2, a3, a4, a5) \
   (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
-		     __SSC(a4), __SSC(a5), 0)
+		     __SSC(a4), __SSC(a5), 0 __SYSCALL_CANCEL7_ARG)
 #define __SYSCALL_CANCEL6(name, a1, a2, a3, a4, a5, a6) \
   (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
-		     __SSC(a4), __SSC(a5), __SSC(a6))
+		     __SSC(a4), __SSC(a5), __SSC(a6) __SYSCALL_CANCEL7_ARG)
+#define __SYSCALL_CANCEL7(name, a1, a2, a3, a4, a5, a6, a7) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+		     __SSC(a4), __SSC(a5), __SSC(a6), __SSC(a7))
 
 #define __SYSCALL_CANCEL_NARGS_X(a,b,c,d,e,f,g,h,n,...) n
 #define __SYSCALL_CANCEL_NARGS(...) \
diff --git a/sysdeps/unix/sysv/linux/mips/mips32/syscall_cancel.S b/sysdeps/unix/sysv/linux/mips/mips32/syscall_cancel.S
new file mode 100644
index 0000000..75c4390
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/mips/mips32/syscall_cancel.S
@@ -0,0 +1,128 @@
+/* Cancellable syscall wrapper.  Linux/mips32 version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <sys/asm.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+				   __syscall_arg_t nr,
+				   __syscall_arg_t arg1,
+				   __syscall_arg_t arg2,
+				   __syscall_arg_t arg3,
+				   __syscall_arg_t arg4,
+				   __syscall_arg_t arg5,
+				   __syscall_arg_t arg6,
+				   __syscall_arg_t arg7)  */
+
+#define FRAME_SIZE 56
+
+NESTED (__syscall_cancel_arch, FRAME_SIZE, fp)
+	.mask	0xc0070000,-SZREG
+	.fmask	0x00000000,0
+
+	PTR_ADDIU sp, -FRAME_SIZE
+	cfi_def_cfa_offset (FRAME_SIZE)
+
+	sw	fp,48(sp)
+	sw	ra,52(sp)
+	sw	s2,44(sp)
+	sw	s1,40(sp)
+	sw	s0,36(sp)
+#ifdef __PIC__
+	.cprestore	16
+#endif
+	cfi_offset (31, -4)
+	cfi_offset (30, -8)
+	cfi_offset (18, -12)
+	cfi_offset (17, -16)
+	cfi_offset (16, -20)
+	move	fp,sp
+	cfi_def_cfa_register (30)
+
+	.globl __syscall_cancel_arch_start
+	.type __syscall_cancel_arch_start, @function
+__syscall_cancel_arch_start:
+
+	lw	v0,0(a0)
+	andi	v0,v0,0x4
+	bne	v0,zero,2f
+
+	addiu	sp,sp,-16
+	addiu	v0,sp,16
+	sw	v0,24(fp)
+
+	move	s0,a1
+	move	a0,a2
+	move	a1,a3
+	lw	a2,72(fp)
+	lw	a3,76(fp)
+	lw	v0,84(fp)
+	lw	s1,80(fp)
+	lw	s2,88(fp)
+
+	.set	noreorder
+	subu	sp, 32
+	sw	s1, 16(sp)
+	sw	v0, 20(sp)
+	sw	s2, 24(sp)
+	move	v0, $16
+	syscall
+
+	.globl __syscall_cancel_arch_end
+	.type __syscall_cancel_arch_end, @function
+__syscall_cancel_arch_end:
+	addiu	sp, 32
+	.set	reorder
+
+	beq	a3,zero,1f
+	subu	v0,zero,v0
+1:
+	move	sp,fp
+	cfi_remember_state
+	cfi_def_cfa_register (29)
+	lw	ra,52(fp)
+	lw	fp,48(sp)
+	lw	s2,44(sp)
+	lw	s1,40(sp)
+	lw	s0,36(sp)
+	.set	noreorder
+	.set	nomacro
+	jr	ra
+	addiu	sp,sp,FRAME_SIZE
+
+	.set	macro
+	.set	reorder
+
+	cfi_def_cfa_offset (0)
+	cfi_restore (16)
+	cfi_restore (17)
+	cfi_restore (18)
+	cfi_restore (30)
+	cfi_restore (31)
+
+2:
+	cfi_restore_state
+#ifdef __PIC__
+	PTR_LA	t9, __syscall_do_cancel
+	jalr	t9
+#else
+	jal	__syscall_do_cancel
+#endif
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
diff --git a/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h b/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
index dadfa18..f4f23ce 100644
--- a/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
+++ b/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
@@ -18,6 +18,10 @@
 #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
 #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
 
+/* mips32 have cancelable syscalls with 7 arguments (currently only
+   sync_file_range).  */
+#define HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS	1
+
 /* There is some commonality.  */
 #include <sysdeps/unix/sysv/linux/sysdep.h>
 #include <sysdeps/unix/mips/mips32/sysdep.h>
diff --git a/sysdeps/unix/sysv/linux/mips/sigcontextinfo.h b/sysdeps/unix/sysv/linux/mips/sigcontextinfo.h
index c9bc083..f9c1758 100644
--- a/sysdeps/unix/sysv/linux/mips/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/mips/sigcontextinfo.h
@@ -16,7 +16,10 @@
    License along with the GNU C Library.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
 
+#include <stdint.h>
 #include <sgidefs.h>
 
 #if _MIPS_SIM == _ABIO32
@@ -39,4 +42,12 @@
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
 
-#endif
+#endif /* _MIPS_SIM == _ABIO32  */
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/syscall_cancel.c b/sysdeps/unix/sysv/linux/syscall_cancel.c
index ac08bb7..5a3d383 100644
--- a/sysdeps/unix/sysv/linux/syscall_cancel.c
+++ b/sysdeps/unix/sysv/linux/syscall_cancel.c
@@ -46,14 +46,16 @@ long int
 __syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
 		       __syscall_arg_t a1, __syscall_arg_t a2,
 		       __syscall_arg_t a3, __syscall_arg_t a4,
-		       __syscall_arg_t a5, __syscall_arg_t a6)
+		       __syscall_arg_t a5, __syscall_arg_t a6
+		       __SYSCALL_CANCEL7_ARG_DEF)
 {
   ADD_LABEL ("__syscall_cancel_arch_start");
   if (__glibc_unlikely (*ch & CANCELED_BITMASK))
     __syscall_do_cancel();
 
   INTERNAL_SYSCALL_DECL(err);
-  long int result = INTERNAL_SYSCALL_NCS (nr, err, 6, a1, a2, a3, a4, a5, a6);
+  long int result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5,
+					       a6 __SYSCALL_CANCEL7_ARG7);
   ADD_LABEL ("__syscall_cancel_arch_end");
   if (INTERNAL_SYSCALL_ERROR_P (result, err))
     return -INTERNAL_SYSCALL_ERRNO (result, err);

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=43ce4692d02a0a524e5c65eddad339b1ba679fc5

commit 43ce4692d02a0a524e5c65eddad339b1ba679fc5
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:03:03 2017 -0200

    nptl: sh: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the sh modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function and a
    cancellable syscall wrapper.
    
    SH requires an arch-specific syscall_cancel because the
    INTERNAL_SYSCALL_NCS adds the required or instruction to workaround
    a hardware bug [1].  The implementation were based on on default C
    version built with GCC 6.2.1.
    
    Checked against a build and make check run-built-tests=no for
    sh4-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/sh/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/sh/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    [1] http://documentation.renesas.com/eng/products/mpumcu/tu/tnsh7456ae.pdf
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index ab3c417..47537de 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,10 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/sh/sysdep.h (L): New macro.
+	* sysdeps/unix/sysv/linux/sh/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/sh/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/sh/sysdep.h b/sysdeps/sh/sysdep.h
index 939c931..4208da9 100644
--- a/sysdeps/sh/sysdep.h
+++ b/sysdeps/sh/sysdep.h
@@ -24,6 +24,7 @@
 
 #define ALIGNARG(log2) log2
 #define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+#define L(label) .L##label
 
 #ifdef SHARED
 #define PLTJMP(_x)	_x##@PLT
diff --git a/sysdeps/unix/sysv/linux/sh/sigcontextinfo.h b/sysdeps/unix/sysv/linux/sh/sigcontextinfo.h
index 546f23b..3520286 100644
--- a/sysdeps/unix/sysv/linux/sh/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/sh/sigcontextinfo.h
@@ -16,6 +16,9 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
 #define SIGCONTEXT int _a2, int _a3, int _a4, struct sigcontext
 
 #define SIGCONTEXT_EXTRA_ARGS _a2, _a3, _a4,
@@ -24,3 +27,11 @@
 #define GET_STACK(ctx)	((void *) ctx.sc_regs[15])
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/sh/syscall_cancel.S b/sysdeps/unix/sysv/linux/sh/syscall_cancel.S
new file mode 100644
index 0000000..02a050d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/syscall_cancel.S
@@ -0,0 +1,125 @@
+/* Cancellable syscall wrapper.  Linux/sh version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+				   long int nr,
+				   long int arg1,
+				   long int arg2,
+				   long int arg3,
+				   long int arg4,
+				   long int arg5,
+				   long int arg6)  */
+
+ENTRY (__syscall_cancel_arch)
+
+#ifdef SHARED
+	mov.l	r12,@-r15
+	cfi_def_cfa_offset (4)
+	cfi_offset (12, -4)
+	mova	L(GT),r0
+	mov.l	L(GT),r12
+	sts.l	pr,@-r15
+	cfi_def_cfa_offset (8)
+	cfi_offset (17, -8)
+	add	r0,r12
+#else
+	sts.l	pr,@-r15
+	cfi_def_cfa_offset (4)
+	cfi_offset (17, -4)
+#endif
+
+	.globl __syscall_cancel_arch_start
+__syscall_cancel_arch_start:
+
+	/* if (*cancelhandling & CANCELED_BITMASK)
+	     __syscall_do_cancel()  */
+	mov.l	@r4,r0
+	tst	#4,r0
+	bf/s	1f
+
+	/* Issue a 6 argument syscall.  */
+	mov	r5,r3
+	mov	r6,r4
+	mov	r7,r5
+#ifdef SHARED
+	mov.l	@(8,r15),r6
+	mov.l	@(12,r15),r7
+	mov.l	@(16,r15),r0
+	mov.l	@(20,r15),r1
+#else
+	mov.l	@(4,r15),r6
+	mov.l	@(8,r15),r7
+	mov.l	@(12,r15),r0
+	mov.l	@(16,r15),r1
+#endif
+	trapa	#0x16
+
+/* The additional or is a workaround for a hardware issue:
+   http://documentation.renesas.com/eng/products/mpumcu/tu/tnsh7456ae.pdf  */
+	.globl __syscall_cancel_arch_end
+__syscall_cancel_arch_end:
+
+	or	r0,r0
+	or	r0,r0
+	or	r0,r0
+	or	r0,r0
+	or	r0,r0
+
+	lds.l	@r15+,pr
+	cfi_remember_state
+	cfi_restore (17)
+#ifdef SHARED
+	cfi_def_cfa_offset (4)
+	rts
+	mov.l	@r15+,r12
+	cfi_def_cfa_offset (0)
+	cfi_restore (12)
+	.align 1
+1:
+	cfi_restore_state
+	mov.l	L(SC),r1
+	bsrf	r1
+L(M):
+	nop
+
+	.align 2
+L(GT):
+	.long	_GLOBAL_OFFSET_TABLE_
+L(SC):
+	.long	__syscall_do_cancel-(L(M)+2)
+#else
+	cfi_def_cfa_offset (0)
+	rts
+	nop
+
+	.align 1
+1:
+	cfi_restore_state
+	mov.l	2f,r1
+	jsr	@r1
+	nop
+
+	.align 2
+2:
+	.long	__syscall_do_cancel
+#endif
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=699a11d33da467906c14dcc61c1db4f4d38ce5b6

commit 699a11d33da467906c14dcc61c1db4f4d38ce5b6
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:02:53 2017 -0200

    nptl: nios2: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the nios2 modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function.
    
    The default syscall_cancel.c should be as expected for NIOS2. With
    GCC 6.2.1 syscall_cancel.c generates the following code:
    
    ---
    00000000 <__GI___syscall_cancel_arch>:
       0:   defffe04        addi    sp,sp,-8
       4:   dd800015        stw     r22,0(sp)
       8:   002ce03a        nextpc  r22
       c:   02000034        movhi   r8,0
      10:   42000004        addi    r8,r8,0
      14:   dfc00115        stw     ra,4(sp)
      18:   b22d883a        add     r22,r22,r8
    
    0000001c <__syscall_cancel_arch_start>:
      1c:   20c00017        ldw     r3,0(r4)
      20:   18c0010c        andi    r3,r3,4
      24:   18000f1e        bne     r3,zero,64 <__syscall_cancel_arch_end+0x18>
      28:   3015883a        mov     r10,r6
      2c:   2805883a        mov     r2,r5
      30:   da400517        ldw     r9,20(sp)
      34:   380b883a        mov     r5,r7
      38:   da000417        ldw     r8,16(sp)
      3c:   d9c00317        ldw     r7,12(sp)
      40:   d9800217        ldw     r6,8(sp)
      44:   5009883a        mov     r4,r10
      48:   003b683a        trap    0
    
    0000004c <__syscall_cancel_arch_end>:
      4c:   38000126        beq     r7,zero,54 <__syscall_cancel_arch_end+0x8>
      50:   0085c83a        sub     r2,zero,r2
      54:   dfc00117        ldw     ra,4(sp)
      58:   dd800017        ldw     r22,0(sp)
      5c:   dec00204        addi    sp,sp,8
      60:   f800283a        ret
      64:   b0800017        ldw     r2,0(r22)
      68:   103ee83a        callr   r2
    ---
    
    Checked against a build and make check run-built-tests=no for
    nios2-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index c244af5..ab3c417 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
 	(ucontext_get_pc): New function.
 	* sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
diff --git a/sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h b/sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h
index b244478..1fba6cd 100644
--- a/sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/nios2/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #include <sys/ucontext.h>
 #include "kernel-features.h"
 
@@ -33,3 +38,14 @@
   (act)->sa_flags |= SA_SIGINFO; \
   (sigaction) (sig, act, oact); \
 })
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  /* rt_restore_ucontext (arch/nios/kernel/signal.c) sets this position
+     to 'ea' register which is stated as exception return address (pc)
+     at arch/nios2/include/asm/ptrace.h.  */
+  return uc->uc_mcontext.regs[27];
+}
+
+#endif /* _SIGCONTEXTINFO_H  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=092188484f176b8f2a17c06f09b0018af973af34

commit 092188484f176b8f2a17c06f09b0018af973af34
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:02:36 2017 -0200

    nptl: sparc: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the sparc modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function, cancellable
    syscall wrappers, and a fix for pause.
    
    Sparc requires arch-specific syscall_cancel implementation because
    INLINE_SYSCALL_NCS uses the __SYSCALL_STRING (defined different
    for sparc32 and sparc64) and it issues additional instructions
    after the syscall one to check the resulting error code.  When used
    in the default syscall_cancel.c implementation the label
    __syscall_cancel_arch_end is not placed just after the syscall as
    expected.  Both 32 and 64 bits version were based on default C version
    built with GCC 6.1.
    
    Also, different than other architectures, SPARC passes the sigcontext_t
    struct pointer as third argument in the signal handler set with
    SA_SIGINFO (some info at [1]) for 64 bits and the pt_regs in 32 bits.
    From Linux code:
    
    * arch/sparc/kernel/signal_64.c
    
    428         /* 3. signal handler back-trampoline and parameters */
    429         regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
    430         regs->u_regs[UREG_I0] = ksig->sig;
    431         regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
    432
    433         /* The sigcontext is passed in this way because of how it
    434          * is defined in GLIBC's /usr/include/bits/sigcontext.h
    435          * for sparc64.  It includes the 128 bytes of siginfo_t.
    436          */
    437         regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
    
    * arch/sparc/kernel/signal_32.c:
    
    392         regs->u_regs[UREG_FP] = (unsigned long) sf;
    393         regs->u_regs[UREG_I0] = ksig->sig;
    394         regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
    395         regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
    396
    397         regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
    398         regs->npc = (regs->pc + 4);
    
    So to access the signal mask in the signal frame, a arch-specific
    ucontext_get_mask is defined which obtain the sa_mask from the context.
    
    Checked on a SPARC T5 for sparc64-linux-gnu and sparcv9-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
    	(ucontext_get_pc): New function.
    	* sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
    	(ucontext_get_pc): Likewise.
    	* sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S: Likwise.
    	* sysdeps/unix/sysv/linux/sparc/sparc64/pause.c: New file.
    
    [1] https://www.spinics.net/lists/sparclinux/msg05037.html
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 3e077f4..c244af5 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,13 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
+	(ucontext_get_pc): New function.
+	* sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
+	(ucontext_get_pc): Likewise.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S: Likwise.
+	* sysdeps/unix/sysv/linux/sparc/sparc64/pause.c: New file.
+
 	* sysdeps/unix/sysv/linux/tile/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index e36fde6..98ccaa1 100644
--- a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -117,7 +117,7 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
     {							\
       __typeof (tid) __tid;				\
       while ((__tid = (tid)) != 0)			\
-	lll_futex_wait (&(tid), __tid, LLL_SHARED);	\
+	lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
     }							\
   while (0)
 
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h b/sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
index 99cf6ab..25db049 100644
--- a/sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/sparc/sparc32/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS
 #define GET_PC(__ctx)	((void *) ((__ctx)->si_regs.pc))
@@ -29,3 +34,50 @@
 #define GET_FRAME(__ctx)	ADVANCE_STACK_FRAME (GET_STACK(__ctx))
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+/* Different that other architectures, SPARC32 pass a pt_regs (or pt_regs32
+   in 32 bits compat mode) struct pointer as third argument for sa_sigaction
+   handler with SA_SIGINFO.
+
+   Also current sparc32 rt signal frame layout is:
+
+   field                                  | size
+   ---------------------------------------| ----
+   struct rt_signal_frame {               |
+     struct sparc_stackf     ss;          |  96
+     siginfo_t               info;        | 128
+     struct pt_regs          regs;        |  80
+     sigset_t                mask;        | 128
+     __siginfo_fpu_t __user  *fpu_save;   |   4
+     unsigned int            insns[2];    |   8
+     stack_t                 stack;       |  12
+     unsigned int            extra_size;  |   4
+     __siginfo_rwin_t __user *rwin_save;  |   4
+   };
+
+   So to obtain a pointer to signal mask based on address of pt_regs
+   we need to add 208.  */
+
+struct pt_regs32
+{
+   unsigned int psr;
+   unsigned int pc;
+   unsigned int npc;
+   unsigned int y;
+   unsigned int u_regs[16];
+};
+
+static inline uintptr_t
+ucontext_get_pc (struct pt_regs32 *regs)
+{
+  return regs->pc;
+}
+
+static inline sigset_t *
+ucontext_get_mask (const void *ctx)
+{
+  return (sigset_t *)((uintptr_t)ctx + 208);
+}
+#define UCONTEXT_SIGMASK(ctx) ucontext_get_mask (ctx)
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S b/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
new file mode 100644
index 0000000..c06f9d1
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sparc/sparc32/syscall_cancel.S
@@ -0,0 +1,74 @@
+/* Cancellable syscall wrapper.  Linux/sparc32 version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+				   long int nr,
+				   long int arg1,
+				   long int arg2,
+				   long int arg3,
+				   long int arg4,
+				   long int arg5,
+				   long int arg6)  */
+
+ENTRY (__syscall_cancel_arch)
+	save	%sp, -104, %sp
+
+	cfi_window_save
+	cfi_register (15, 31)
+	cfi_def_cfa_register (30)
+
+	.globl __syscall_cancel_arch_start
+__syscall_cancel_arch_start:
+
+	/* if (*cancelhandling & CANCELED_BITMASK)
+	     __syscall_do_cancel()  */
+	ld	[%i0], %g2
+	andcc	%g2, 4, %g0
+	bne,pn	%icc, 2f
+
+	/* Issue a 6 argument syscall.  */
+	mov	%i1, %g1
+	mov	%i2, %o0
+	mov	%i3, %o1
+	mov	%i4, %o2
+	mov	%i5, %o3
+	ld	[%fp+92], %o4
+	ld	[%fp+96], %o5
+	ta	0x10
+
+	.globl __syscall_cancel_arch_end
+__syscall_cancel_arch_end:
+	bcc	1f
+	mov	0,%g1
+	sub	%g0, %o0, %o0
+	mov	1, %g1
+
+1:
+	mov	%o0, %i0
+	return	%i7+8
+	 nop
+
+2:
+	call	__syscall_do_cancel, 0
+	 nop
+	nop
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h b/sysdeps/unix/sysv/linux/sparc/sparc64/pause.c
similarity index 52%
copy from sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
copy to sysdeps/unix/sysv/linux/sparc/sparc64/pause.c
index ba53b4a..4a0cf4d 100644
--- a/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/sparc/sparc64/pause.c
@@ -1,6 +1,6 @@
-/* Copyright (C) 1999-2017 Free Software Foundation, Inc.
+/* Linux pause syscall implementation.  Linux/sparc64.
+   Copyright (C) 2017 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
-   Contributed by Jakub Jelinek <jj@ultra.linux.cz>, 1999.
 
    The GNU C Library is free software; you can redistribute it and/or
    modify it under the terms of the GNU Lesser General Public
@@ -16,16 +16,10 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
-#ifndef STACK_BIAS
-#define STACK_BIAS 2047
-#endif
-#define SIGCONTEXT struct sigcontext *
-#define SIGCONTEXT_EXTRA_ARGS
-#define GET_PC(__ctx)	((void *) ((__ctx)->sigc_regs.tpc))
-#define ADVANCE_STACK_FRAME(__next) \
-	((void *) (((unsigned long *) (((unsigned long int) (__next))     \
-					   + STACK_BIAS))+14))
-#define GET_STACK(__ctx)	((void *) ((__ctx)->sigc_regs.u_regs[14]))
-#define GET_FRAME(__ctx)	ADVANCE_STACK_FRAME (GET_STACK (__ctx))
-#define CALL_SIGHANDLER(handler, signo, ctx) \
-  (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+#include <sys/syscall.h>
+
+/* On sparc interrupted pause syscall returns with a PC indicating a
+   side-effect and this deviates from other architectures.  Fall back to
+   ppool implementation.  */
+#undef __NR_pause
+#include <sysdeps/unix/sysv/linux/pause.c>
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h b/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
index ba53b4a..836ba01 100644
--- a/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #ifndef STACK_BIAS
 #define STACK_BIAS 2047
 #endif
@@ -29,3 +34,38 @@
 #define GET_FRAME(__ctx)	ADVANCE_STACK_FRAME (GET_STACK (__ctx))
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+/* Different that other architectures, SPARC64 pass a sigcontext_t struct
+   pointer in third argument for sa_sigaction handler with SA_SIGINFO.
+
+   Also current sparc64 rt signal frame layout is:
+
+   field                                  | size
+   ---------------------------------------| ----
+   struct rt_signal_frame {               |
+      struct sparc_stackf     ss;         | 192
+      siginfo_t               info;       | 128
+      struct pt_regs          regs;       | 160
+      __siginfo_fpu_t __user  *fpu_save;  |   8
+      stack_t                 stack;      |  24
+      sigset_t                mask;       | 128
+      __siginfo_rwin_t        *rwin_save; |   8
+   };
+
+   So to obtain a pointer to signal mask based on address of info
+   we need to add 320.  */
+
+static inline uintptr_t
+ucontext_get_pc (const struct sigcontext *sigctx)
+{
+  return sigctx->sigc_regs.tpc;
+}
+
+static inline sigset_t *
+ucontext_get_mask (const void *ctx)
+{
+  return (sigset_t *)((uintptr_t)ctx + 320);
+}
+#define UCONTEXT_SIGMASK(ctx) ucontext_get_mask (ctx)
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S b/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
new file mode 100644
index 0000000..f3eef78
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sparc/sparc64/syscall_cancel.S
@@ -0,0 +1,74 @@
+/* Cancellable syscall wrapper.  Linux/sparc64 version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+					long int nr,
+					long int arg1,
+					long int arg2,
+					long int arg3,
+					long int arg4,
+					long int arg5,
+					long int arg6)  */
+
+ENTRY (__syscall_cancel_arch)
+	save	%sp, -176, %sp
+
+	cfi_window_save
+	cfi_register (15, 31)
+	cfi_def_cfa_register (30)
+
+	.globl __syscall_cancel_arch_start
+__syscall_cancel_arch_start:
+
+	/* if (*cancelhandling & CANCELED_BITMASK)
+	     __syscall_do_cancel()  */
+	lduw	[%i0], %g1
+	andcc	%g1, 4, %g0
+	bne,pn	%xcc, 2f
+
+	/* Issue a 6 argument syscall.  */
+	mov	%i1, %g1
+	mov	%i2, %o0
+	mov	%i3, %o1
+	mov	%i4, %o2
+	mov	%i5, %o3
+	ldx	[%fp + STACK_BIAS + 176], %o4
+	ldx	[%fp + STACK_BIAS + 184], %o5
+	ta	0x6d
+
+	.global __syscall_cancel_arch_end
+__syscall_cancel_arch_end:
+
+	bcc,pt	%xcc, 1f
+	mov	0, %g1
+	sub	%g0, %o0, %o0
+	mov	1, %g1
+1:
+	mov	%o0, %i0
+	return	%i7+8
+	nop
+
+2:
+	call	__syscall_do_cancel, 0
+	nop
+	nop
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=aaa422919dcc8eb39bf1daa40a7269c2e685fba3

commit aaa422919dcc8eb39bf1daa40a7269c2e685fba3
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:02:28 2017 -0200

    nptl: tile: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the tile modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function
    
    The default syscall_cancel.c should be as expected for TILE. With
    GCC 6.1 syscall_cancel.c generates the following code:
    
    * TILEGX
    
    0000000000000000 <__GI___syscall_cancel_arch>:
       0:   c7bf46ce576bfd9d        { move r29, sp ; addi r28, sp, -24 ; st sp, lr }
       8:   283bf825401e0db6        { addi sp, sp, -32 ; move r10, r1 }
      10:   eeedf85b85c18d9b        { addi r27, sp, 24 ; move lr, r2 ; st r28, r29 }
      18:   eef5f88155bbf0c1        { move r1, r3 ; move r2, r4 ; st r27, r30 }
      20:   283bf8c25107f143        { move r3, r5 ; move r4, r6 }
      28:   283bf8e2d1483000        { move r5, r7 }
    
    0000000000000030 <__syscall_cancel_arch_start>:
      30:   9c5e4000340c3000        { ld4s r11, r0 }
      38:   18182165d1483000        { andi r11, r11, 4 }
      40:   17c00163d1483000        { bnez r11, 78 <__syscall_cancel_arch_end+0x20> }
      48:   283bfee051483000        { move r0, lr }
      50:   286b180051485000        { swint1 }
    
    0000000000000058 <__syscall_cancel_arch_end>:
      58:   180906ced1401ff7        { subx lr, zero, r1 ; addi r29, sp, 32 }
      60:   87b8c6ce4dd77040        { cmovnez r0, r1, lr ; addi r28, sp, 24 ; ld lr, r29 }
      68:   9ef6400035cc3000        { ld r30, r28 }
      70:   286a6ee040120db6        { addi sp, sp, 32 ; jrp lr }
      78:   2000000051483000        { jal 78 <__syscall_cancel_arch_end+0x20> }
    
    * TILEGX32
    
    00000000 <__GI___syscall_cancel_arch>:
       0:   cbbfc6ce576bfd9d        { move r29, sp ; addxi r28, sp, -8 ; st sp, lr }
       8:   283bf825402f0db6        { addxi sp, sp, -16 ; move r10, r1 }
      10:   eeedf860d5cbf0b7        { move lr, r2 ; move r1, r3 ; st r28, r29 }
      18:   283bf8a1d107f102        { move r2, r4 ; move r3, r5 }
      20:   283bf8e2d107f184        { move r4, r6 ; move r5, r7 }
    
    00000028 <__syscall_cancel_arch_start>:
      28:   9c5e4000340c3000        { ld4s r11, r0 }
      30:   18182165d1483000        { andi r11, r11, 4 }
      38:   17c0016351483000        { bnez r11, 68 <__syscall_cancel_arch_end+0x18> }
      40:   283bfee051483000        { move r0, lr }
      48:   286b180051485000        { swint1 }
    
    00000050 <__syscall_cancel_arch_end>:
      50:   28660fe540210d9d        { addxi r29, sp, 16 ; subx r10, zero, r1 }
      58:   9fbe40004dd4a040        { cmovnez r0, r1, r10 ; ld lr, r29 }
      60:   286a6ee040210db6        { addxi sp, sp, 16 ; jrp lr }
      68:   2000000051483000        { jal 68 <__syscall_cancel_arch_end+0x18> }
    
    * TILEPRO
    
    00000000 <__GI___syscall_cancel_arch>:
       0:   bfdfa6ce1b7bfd9d        { move r29, sp ; addi r28, sp, -12 ; sw sp, lr }
       8:   0833f825403f0db6        { addi sp, sp, -16 ; move r10, r1 }
      10:   9f75f85bc9d0cd9b        { addi r27, sp, 12 ; move lr, r2 ; sw r28, r29 }
      18:   9f6df8811debf0c1        { move r1, r3 ; move r2, r4 ; sw r27, r30 }
      20:   0833f8c200cff143        { move r3, r5 ; move r4, r6 }
      28:   0833f8e2f0165000        { move r5, r7 }
    
    00000030 <__syscall_cancel_arch_start>:
      30:   dc05080068ba5000        { lw r11, r0 }
      38:   30202165f0165000        { andi r11, r11, 4 }
      40:   2800396170165000        { bnz r11, 78 <__syscall_cancel_arch_end+0x20> }
      48:   0833fee070165000        { move r0, lr }
      50:   400b980070166000        { swint1 }
    
    00000058 <__syscall_cancel_arch_end>:
      58:   301886ce81741ff7        { sub lr, zero, r1 ; addi r29, sp, 16 }
      60:   bc7066ce17777040        { mvnz r0, r1, lr ; addi r28, sp, 12 ; lw lr, r29 }
      68:   dc75080069ea5000        { lw r30, r28 }
      70:   081606e040310db6        { addi sp, sp, 16 ; jrp lr }
      78:   6000000070165000        { jal 78 <__syscall_cancel_arch_end+0x20> }
    
    Checked against a build and make check run-built-tests=no for
    tile-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/tile/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 89a8edb..3e077f4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/tile/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S: New file.
 	* sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
 	(ucontext_get_pc): New function.
diff --git a/sysdeps/unix/sysv/linux/tile/sigcontextinfo.h b/sysdeps/unix/sysv/linux/tile/sigcontextinfo.h
index bad81e4..043ac75 100644
--- a/sysdeps/unix/sysv/linux/tile/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/tile/sigcontextinfo.h
@@ -16,6 +16,10 @@
    License along with the GNU C Library.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
 #include <arch/abi.h>
 
 #define SIGCONTEXT siginfo_t *_si, ucontext_t *
@@ -25,3 +29,11 @@
 #define GET_STACK(ctx)	((void *) (long) ctx->uc_mcontext.sp)
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=798159f67403b3ab44ea51bf15d50ecb0c8551ca

commit 798159f67403b3ab44ea51bf15d50ecb0c8551ca
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:02:17 2017 -0200

    nptl: microblaze: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the microblaze modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function.
    
    Microblaze requires an arch specific assembly implementation because
    the archicture INTERNAL_SYSCALL_NCS implementation adds a nop after
    the brki instruction because the instruction expect a delay branch
    slot.  I based this implementation on generated assembly using GCC 6.1.
    
    Checked against a build and make check run-built-tests=no for
    microblaze-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
    	(ucontext_get_pc): New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 82bea8e..89a8edb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
+	(ucontext_get_pc): New function.
+
 	* sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h b/sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
index 145b3cb..1708937 100644
--- a/sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/microblaze/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT int _code, ucontext_t *
 #define SIGCONTEXT_EXTRA_ARGS _code,
 #define GET_PC(ctx)    ((void *) (ctx)->uc_mcontext.regs.pc)
@@ -23,3 +28,11 @@
 #define GET_STACK(ctx) ((void *) (ctx)->uc_mcontext.regs.sp)
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.regs.pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S b/sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S
new file mode 100644
index 0000000..d892362
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/microblaze/syscall_cancel.S
@@ -0,0 +1,62 @@
+/* Cancellable syscall wrapper.  Linux/microblaze version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+				   long int nr,
+				   long int arg1,
+				   long int arg2,
+				   long int arg3,
+				   long int arg4,
+				   long int arg5,
+				   long int arg6)  */
+
+ENTRY (__syscall_cancel_arch)
+
+	.globl __syscall_cancel_arch_start
+__syscall_cancel_arch_start:
+
+	lwi	r3,r5,0
+	andi	r3,r3,4 #and1
+	bneid	r3,1f
+	addk	r12,r6,r0
+
+	addk	r5,r7,r0
+	addk	r6,r8,r0
+	addk	r7,r9,r0
+	addk	r8,r10,r0
+	lwi	r9,r1,56
+	lwi	r10,r1,60
+	brki	r14,8
+
+	.globl __syscall_cancel_arch_end
+__syscall_cancel_arch_end:
+
+	nop
+	lwi	r15,r1,0
+	rtsd	r15,8
+	addik	r1,r1,28
+
+1:
+	brlid	r15, __syscall_do_cancel
+	nop
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
+

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=3d21fddb78e8fe2027cdfbe7d9ce64b67cacfa89

commit 3d21fddb78e8fe2027cdfbe7d9ce64b67cacfa89
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:02:08 2017 -0200

    nptl: m68k: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the m68k modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function
    
    The default syscall_cancel.c should is as expected for m68k with
    GCC 6.2.1.  It generates the following code for syscall_cancel.os:
    
    ---
    00000000 <__GI___syscall_cancel_arch>:
       0:   4e56 0000       linkw %fp,#0
       4:   48e7 3c00       moveml %d2-%d5,%sp@-
    
    00000008 <__syscall_cancel_arch_start>:
       8:   206e 0008       moveal %fp@(8),%a0
       c:   2010            movel %a0@,%d0
       e:   0800 0002       btst #2,%d0
      12:   6628            bnes 3c <__syscall_cancel_arch_end+0xa>
      14:   206e 0024       moveal %fp@(36),%a0
      18:   2a2e 0020       movel %fp@(32),%d5
      1c:   282e 001c       movel %fp@(28),%d4
      20:   262e 0018       movel %fp@(24),%d3
      24:   242e 0014       movel %fp@(20),%d2
      28:   222e 0010       movel %fp@(16),%d1
      2c:   202e 000c       movel %fp@(12),%d0
      30:   4e40            trap #0
    
    00000032 <__syscall_cancel_arch_end>:
      32:   4cee 003c fff0  moveml %fp@(-16),%d2-%d5
      38:   4e5e            unlk %fp
      3a:   4e75            rts
      3c:   61ff 0000 0000  bsrl 3e <__syscall_cancel_arch_end+0xc>
    ---
    
    Checked against a build and make check run-built-tests=no for
    m68k-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index fd8e645..82bea8e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h b/sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h
index 3c43e55..f39009f 100644
--- a/sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/m68k/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT int _code, struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS _code,
 #define GET_PC(ctx)	((void *) (ctx)->sc_pc)
@@ -23,3 +28,11 @@
 #define GET_STACK(ctx)	((void *) (ctx)->sc_usp)
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.gregs[R_PC];
+}
+
+#endif /* _SIGCONTEXTINFO_H  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=2b065622b1496f214c88800178e14a9ca6d0eded

commit 2b065622b1496f214c88800178e14a9ca6d0eded
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:01:58 2017 -0200

    nptl: alpha: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the alpha modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function
    
    The default syscall_cancel.c should be fine for alpha and GCC 6.1
    generates the following cod for syscall_cancel.os:
    
    ---
    0000000000000000 <__GI___syscall_cancel_arch>:
       0:   00 00 bb 27     ldah    gp,0(t12)
       4:   00 00 bd 23     lda     gp,0(gp)
       8:   f0 ff de 23     lda     sp,-16(sp)
       c:   00 04 f1 47     mov     a1,v0
      10:   00 00 5e b7     stq     ra,0(sp)
    
    0000000000000014 <__syscall_cancel_arch_start>:
      14:   00 00 30 a0     ldl     t0,0(a0)
      18:   01 00 e1 43     sextl   t0,t0
      1c:   01 90 20 44     and     t0,0x4,t0
      20:   0f 00 20 f4     bne     t0,60 <__syscall_cancel_arch_end+0x20>
      24:   10 04 f2 47     mov     a2,a0
      28:   11 04 f3 47     mov     a3,a1
      2c:   12 04 f4 47     mov     a4,a2
      30:   10 00 9e a6     ldq     a4,16(sp)
      34:   13 04 f5 47     mov     a5,a3
      38:   18 00 be a6     ldq     a5,24(sp)
      3c:   83 00 00 00     callsys
    
    0000000000000040 <__syscall_cancel_arch_end>:
      40:   21 05 e0 43     negq    v0,t0
      44:   00 00 5e a7     ldq     ra,0(sp)
      48:   c0 04 61 46     cmovne  a3,t0,v0
      4c:   10 00 de 23     lda     sp,16(sp)
      50:   01 80 fa 6b     ret
      54:   00 00 fe 2f     unop
      58:   1f 04 ff 47     nop
      5c:   00 00 fe 2f     unop
      60:   00 00 7d a7     ldq     t12,0(gp)
      64:   00 40 5b 6b     jsr     ra,(t12),68 <__syscall_cancel_arch_end+0x28>
      68:   1f 04 ff 47     nop
      6c:   00 00 fe 2f     unop
    ---
    
    Checked on alpha-linux-gnu, no regression found.
    
    	* sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 00173f4..fd8e645 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,8 +1,12 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h (ucontext_get_pc,
 	ucontext_get_mask): New functions.
 	* sysdeps/unix/sysv/linux/ia64/syscall_cancel.S: New file.
+
 	* sysdeps/unix/sysv/linux/s390/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h b/sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h
index e8b74d5..a61c289 100644
--- a/sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/alpha/sigcontextinfo.h
@@ -15,6 +15,11 @@
    License along with the GNU C Library.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT int _code, struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS _code,
 #define GET_PC(ctx)	((void *) (ctx)->sc_pc)
@@ -22,3 +27,11 @@
 #define GET_STACK(ctx)	((void *) (ctx)->sc_regs[30])
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.sc_pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=74fff3eca0b9efb62ed598a20ece79f9e677a9bb

commit 74fff3eca0b9efb62ed598a20ece79f9e677a9bb
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Jan 16 17:01:44 2017 -0200

    nptl: ia64: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the s390 modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function. a workaround
    for mismatched sigcontext::sa_flags defiition between kernel and
    GLIBC (tracked by BZ#21634), and an arch specific syscall_cancel
    implementation.
    
    IA64 requires an arch-specific syscall_cancel implemetantion because
    {INLINE,INTERNAL}_SYSCALL is implemented by branching to a gate
    DSO (similar to i386) which renders the pointer comparison in
    SIGCANCEL handler wrong.  This incurs in performance penalty due the
    use of a break instruction insteaf of a eds one, however
    cancellable syscalls should potentially incur in syscalls blocking.
    
    Checked with a ia64-linux-gnu build with run-tests-built=no.
    
    	* sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h (ucontext_get_pc,
    	ucontext_get_mask): New functions.
    	* sysdeps/unix/sysv/linux/ia64/syscall_cancel.S: New file.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 3394218..00173f4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h (ucontext_get_pc,
+	ucontext_get_mask): New functions.
+	* sysdeps/unix/sysv/linux/ia64/syscall_cancel.S: New file.
 	* sysdeps/unix/sysv/linux/s390/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h b/sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h
index 37a6190..927cb3c 100644
--- a/sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/ia64/sigcontextinfo.h
@@ -15,6 +15,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT siginfo_t *_si, struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS _si,
 #define GET_PC(ctx)	((ctx)->sc_ip)
@@ -23,3 +28,22 @@
 
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+/* Different that other architectures, SPARC pass a sigcontext_t struct
+   in third argument for sa_sigaction handler with SA_SIGINFO.  */
+static inline uintptr_t
+ucontext_get_pc (const struct sigcontext *sigctx)
+{
+  return sigctx->sc_ip;
+}
+
+static inline sigset_t *
+ucontext_get_mask (const struct sigcontext *sigctx)
+{
+  /* IA64 sigcontext::sa_mask is a sigset_t since Linux 2.6.12 (initial
+     git repository build).  */
+  return (sigset_t *) &sigctx->sc_mask;
+}
+#define UCONTEXT_SIGMASK(ctx) ucontext_get_mask (ctx)
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/ia64/syscall_cancel.S b/sysdeps/unix/sysv/linux/ia64/syscall_cancel.S
new file mode 100644
index 0000000..9c6c9d3
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/ia64/syscall_cancel.S
@@ -0,0 +1,94 @@
+/* Cancellable syscall wrapper.  Linux/IA64 version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#undef ret
+
+/* long int __syscall_cancel_arch (int *cancelhandling, long int nr,
+				   long int arg1, long int arg2, long int arg3,
+				   long int arg4, long int arg5, long int arg6)
+*/
+
+ENTRY (__syscall_cancel_arch)
+	.prologue 14, 40
+	.mmi
+	.save ar.pfs, r41
+	alloc r41 = ar.pfs, 8, 4, 8, 0
+	.vframe r42
+	mov r42 = r12
+	.save rp, r40
+	mov r40 = b0
+	.body
+	;;
+
+	.global __syscall_cancel_arch_start	
+	.type __syscall_cancel_arch_start,@function	
+__syscall_cancel_arch_start:
+
+	;;
+	.mmi
+	nop 0
+	ld4.acq r14 = [r32]
+	nop 0
+	;;
+	.mib
+	nop 0
+	tbit.z p6, p7 = r14, 2
+	.pred.safe_across_calls p1-p63
+	(p7) br.call.dpnt.many b0 = __syscall_do_cancel#
+	.pred.safe_across_calls p1-p5,p16-p63
+	;;
+	.mmi
+	mov r15 = r33
+	mov r49 = r39
+	mov r48 = r38
+	.mmi
+	mov r47 = r37
+	mov r46 = r36
+	mov r45 = r35
+	;;
+	.mmi
+	nop 0
+	mov r44 = r34
+	nop 0
+	;;
+	break 0x100000
+	;;
+
+	.global __syscall_cancel_arch_end	
+	.type __syscall_cancel_arch_end,@function	
+__syscall_cancel_arch_end:
+
+	;;
+	.mmi
+	cmp.ne p6, p7 = -1, r10
+	nop 0
+	mov ar.pfs = r41
+	;;
+	.mmi
+	nop 0
+	(p7) sub r8 = r0, r8
+	mov b0 = r40
+	.mmb
+	nop 0
+	.restore sp
+	mov r12 = r42
+	br.ret.sptk.many b0
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=02a1736299eb711745898cdc5690e92fe3e83ce4

commit 02a1736299eb711745898cdc5690e92fe3e83ce4
Author: Adhemerval Zanella <adhemerval.zanella@linaro.com>
Date:   Wed Aug 12 10:51:38 2015 -0300

    nptl: s390: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the s390 modifications required for the BZ#12683 fix.
    It basically adds the required ucontext_get_pc function.
    
    The built cancelable syscall wrapper for s390 using GCC 7.2.1 and
    default configuration flags shows the wrappers on expected corrected
    places:
    
    ---
    __GI___syscall_cancel_arch:
    .LFB39:
            .cfi_startproc
            stm     %r6,%r15,24(%r15)
            .cfi_offset 6, -72
            .cfi_offset 7, -68
            .cfi_offset 8, -64
            .cfi_offset 9, -60
            .cfi_offset 10, -56
            .cfi_offset 11, -52
            .cfi_offset 12, -48
            .cfi_offset 13, -44
            .cfi_offset 14, -40
            .cfi_offset 15, -36
            ahi     %r15,-96
            .cfi_def_cfa_offset 192
    
            .global __syscall_cancel_arch_start
    .type __syscall_cancel_arch_start,@function
    __syscall_cancel_arch_start:
            l       %r0,0(%r2)
            tml     %r0,4
            jne     .L5
            lr      %r1,%r3
            lr      %r2,%r4
            lr      %r3,%r5
            lr      %r4,%r6
            l       %r5,192(%r15)
            l       %r6,196(%r15)
            l       %r7,200(%r15)
            svc    0
    
            .global __syscall_cancel_arch_end
    .type __syscall_cancel_arch_end,@function
    __syscall_cancel_arch_end:
            l       %r4,152(%r15)
            lm      %r6,%r15,120(%r15)
            .cfi_remember_state
            .cfi_restore 15
            .cfi_restore 14
            .cfi_restore 13
            .cfi_restore 12
            .cfi_restore 11
            .cfi_restore 10
            .cfi_restore 9
            .cfi_restore 8
            .cfi_restore 7
            .cfi_restore 6
            .cfi_def_cfa_offset 96
            br      %r4
    .L5:
            .cfi_restore_state
            brasl   %r14,__syscall_do_cancel
            .cfi_endproc
    ---
    
    The s390x version also shows similar placement:
    
    ---
    __GI___syscall_cancel_arch:
            .cfi_startproc
            stmg    %r6,%r15,48(%r15)
            .cfi_offset 6, -112
            .cfi_offset 7, -104
            .cfi_offset 8, -96
            .cfi_offset 9, -88
            .cfi_offset 10, -80
            .cfi_offset 11, -72
            .cfi_offset 12, -64
            .cfi_offset 13, -56
            .cfi_offset 14, -48
            .cfi_offset 15, -40
            aghi    %r15,-160
            .cfi_def_cfa_offset 320
    
            .global __syscall_cancel_arch_start
    .type __syscall_cancel_arch_start,@function
    __syscall_cancel_arch_start:
            l       %r0,0(%r2)
            tmll    %r0,4
            jne     .L5
            lgr     %r1,%r3
            lgr     %r2,%r4
            lgr     %r3,%r5
            lgr     %r4,%r6
            lg      %r5,320(%r15)
            lg      %r6,328(%r15)
            lg      %r7,336(%r15)
            svc    0
    
            .global __syscall_cancel_arch_end
    .type __syscall_cancel_arch_end,@function
    __syscall_cancel_arch_end:
            lg      %r4,272(%r15)
            lmg     %r6,%r15,208(%r15)
            .cfi_remember_state
            .cfi_restore 15
            .cfi_restore 14
            .cfi_restore 13
            .cfi_restore 12
            .cfi_restore 11
            .cfi_restore 10
            .cfi_restore 9
            .cfi_restore 8
            .cfi_restore 7
            .cfi_restore 6
            .cfi_def_cfa_offset 160
            br      %r4
    .L5:
            .cfi_restore_state
            brasl   %r14,__syscall_do_cancel
            .cfi_endproc
    ---
    
    Checked with a s390-linux-gnu and s390x-linux-gnu build with
    run-tests-built=no.
    
    	* sysdeps/unix/sysv/linux/s390/sigcontextinfo.h (ucontext_get_pc):
    	New function.

diff --git a/ChangeLog b/ChangeLog
index c8bdc46..3394218 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,7 +1,9 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/s390/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/arm/Makefile (CFLAGS-syscall_cancel.c): New
-	rule.
 	* sysdeps/unix/sysv/linux/arm/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/s390/sigcontextinfo.h b/sysdeps/unix/sysv/linux/s390/sigcontextinfo.h
index 90ead3f..676a8bd 100644
--- a/sysdeps/unix/sysv/linux/s390/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/s390/sigcontextinfo.h
@@ -16,7 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
 #include <signal.h>
+#include <stdint.h>
 
 #define SIGCONTEXT struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS
@@ -25,3 +29,16 @@
 #define GET_STACK(ctx)	((void *)((ctx)->sregs->regs.gprs[15]))
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+#ifdef __s390x__
+  return uc->uc_mcontext.psw.addr;
+#else
+  /* We have 31bit addresses, remove bit 0.  */
+  return uc->uc_mcontext.psw.addr & 0x7FFFFFFF;
+#endif
+}
+
+#endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=1d4e4b6d5208f6695742799c61324e6bf04896d1

commit 1d4e4b6d5208f6695742799c61324e6bf04896d1
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Fri May 8 17:12:31 2015 -0300

    nptl: arm: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the ARM modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function and adjust
    the generic syscall_cancel build.
    
    For ARM we need to build syscall_cancel in ARM mode (-marm) to avoid
    INTERNAL_SYSCALL to issue the syscall through the helper gate
    __libc_do_syscall (which invalidates the mark checks on SIGCANCEL
    handler).
    
    Checked on arm-linux-gnueabihf.
    
    	* sysdeps/unix/sysv/linux/arm/Makefile (CFLAGS-syscall_cancel.c): New
    	rule.
    	* sysdeps/unix/sysv/linux/arm/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 9f6e2da..c8bdc46 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,10 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/arm/Makefile (CFLAGS-syscall_cancel.c): New
+	rule.
+	* sysdeps/unix/sysv/linux/arm/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h (ucontext_get_pc):
 	New function.
 
diff --git a/sysdeps/unix/sysv/linux/arm/Makefile b/sysdeps/unix/sysv/linux/arm/Makefile
index 4adc35d..8f01b52 100644
--- a/sysdeps/unix/sysv/linux/arm/Makefile
+++ b/sysdeps/unix/sysv/linux/arm/Makefile
@@ -30,6 +30,9 @@ endif
 ifeq ($(subdir),nptl)
 libpthread-sysdep_routines += libc-do-syscall
 libpthread-shared-only-routines += libc-do-syscall
+
+# INLINE_SYSCALL uses the helper __libc_do_syscall in thumb mode.
+CFLAGS-syscall_cancel.c += -marm
 endif
 
 ifeq ($(subdir),resolv)
diff --git a/sysdeps/unix/sysv/linux/arm/sigcontextinfo.h b/sysdeps/unix/sysv/linux/arm/sigcontextinfo.h
index d3313af..8132a95 100644
--- a/sysdeps/unix/sysv/linux/arm/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/arm/sigcontextinfo.h
@@ -16,6 +16,10 @@
    License along with the GNU C Library.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
 #include <sys/ucontext.h>
 
 #define SIGCONTEXT siginfo_t *_si, ucontext_t *
@@ -46,3 +50,11 @@
   (act)->sa_flags |= SA_SIGINFO; \
   (sigaction) (sig, act, oact); \
 })
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.arm_pc;
+}
+
+#endif /* _SIGCONTEXTINFO_H  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=73d0ac79798bb632e792d048f5b048a0005b6b9a

commit 73d0ac79798bb632e792d048f5b048a0005b6b9a
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Wed May 6 17:51:29 2015 -0300

    nptl: aarch64: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the aarch64 modifications required for the BZ#12683.
    It basically adds the required ucontext_get_pc function.
    
    The built cancelable syscall wrapper for aarch64 using GCC 7.2.1 and
    default configuration flags shows an expected optimized version:
    
    ---
    __GI___syscall_cancel_arch:
    .LFB38:
            .cfi_startproc
            .global __syscall_cancel_arch_start
    .type __syscall_cancel_arch_start,@function
    __syscall_cancel_arch_start:
            ldr     w9, [x0]
            tbnz    x9, 2, .L7
            mov     x8, x1
            mov     x0, x2
            mov     x1, x3
            mov     x2, x4
            mov     x3, x5
            mov     x4, x6
            mov     x5, x7
            svc     0       // syscall nr
            .global __syscall_cancel_arch_end
    .type __syscall_cancel_arch_end,@function
    __syscall_cancel_arch_end:
            ret
    ---
    
    A similar code is obtained with GCC 5.3.1, so I see to need to provide
    an arch-specific syscall_cancel.S for aarch64.
    
    Checked on aarch64-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/aarch64/sysdep-cancel.h (PSEUDO): Redefine
    	to call __syscall_cancel function for cancellable syscalls.
    	(__pthread_get_ip): Add implementation.
    	* sysdeps/unix/sysv/linux/aarch64/sysdep.h (SYSCALL_CANCEL_ERROR): Add
    	definition.
    	(SYSCALL_CANCEL_ERRNO): Likewise.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 58a82cb..9f6e2da 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S: New file.
 	* sysdeps/unix/sysv/linux/powerpc/sysdep-cancel.h
 	(__pthread_get_pc): New function.
diff --git a/sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h b/sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h
index 7793d11..4213be9 100644
--- a/sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/aarch64/sigcontextinfo.h
@@ -16,6 +16,9 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
 #include <stdint.h>
 #include <sys/ucontext.h>
 
@@ -33,3 +36,11 @@
   (act)->sa_flags |= SA_SIGINFO; \
   (sigaction) (sig, act, oact); \
 })
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.pc;
+}
+
+#endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=13574fb36a66ee974c5a4836b8c989c646b41ed5

commit 13574fb36a66ee974c5a4836b8c989c646b41ed5
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Fri Sep 18 18:14:19 2015 -0300

    nptl: powerpc: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patches adds the powerpc64 modification required for the BZ#12683.
    It basically adds the required __pthread_get_pc function and a arch
    specific syscall_cancel implementation.
    
    The powerpc requires an arch-specific syscall_cancel because
    INTERNAL_SYSCALL_NCS adds a mfcr just after the sc instruction to get
    the CR0.SO bit information from kernel (which signals the error
    return status).  So for cancelled syscalls with side effects,
    __pthread_get_pc will point to mcfr and thus invalidating the checks
    on sigcancel_handler.
    
    Checked on powerpc64le-linux-gnu and powerpc-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/powerpc/sysdep-cancel.h
    	(__pthread_get_pc): New function.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 9e222b1..58a82cb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/powerpc/sysdep-cancel.h
+	(__pthread_get_pc): New function.
+
 	* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_BIT_SET): Remove macro.
 	* sysdeps/unix/sysv/linux/i386/Makefile
 	[$(subdir) = elf] (sysdep-rtld_routines): Add libc-do-syscall object.
diff --git a/sysdeps/unix/sysv/linux/powerpc/sigcontextinfo.h b/sysdeps/unix/sysv/linux/powerpc/sigcontextinfo.h
index b8b5997..aada838 100644
--- a/sysdeps/unix/sysv/linux/powerpc/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/powerpc/sigcontextinfo.h
@@ -15,7 +15,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
 #include <signal.h>
+#include <stdint.h>
 
 #define SIGCONTEXT struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS
@@ -24,3 +28,15 @@
 #define GET_STACK(ctx)	((void *)((ctx)->regs->gpr[1]))
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+#ifdef __powerpc64__
+  return uc->uc_mcontext.gp_regs[PT_NIP];
+#else
+  return uc->uc_mcontext.uc_regs->gregs[PT_NIP];
+#endif
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S b/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
new file mode 100644
index 0000000..bda2199
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/powerpc/syscall_cancel.S
@@ -0,0 +1,64 @@
+/* Cancellable syscall wrapper.  Linux/powerpc version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int [r3] __syscall_cancel_arch (int *cancelhandling [r3],
+					long int nr   [r4],
+					long int arg1 [r5],
+					long int arg2 [r6],
+					long int arg3 [r7],
+					long int arg4 [r8],
+					long int arg5 [r9],
+					long int arg6 [r10])  */
+
+ENTRY (__syscall_cancel_arch)
+
+	.globl __syscall_cancel_arch_start
+	.type  __syscall_cancel_arch_start,@function
+__syscall_cancel_arch_start:
+
+	/* if (*cancelhandling & CANCELED_BITMASK)
+	     __syscall_do_cancel()  */
+	lwz     r0,0(r3)
+	rldicl. r0,r0,62,63
+	beq     1f
+	b       __syscall_do_cancel
+	nop
+1:
+	/* Issue a 6 argument syscall, the nr [r4] being the syscall
+	   number.  */
+	mr      r0,r4
+	mr      r3,r5
+	mr      r4,r6
+	mr      r5,r7
+	mr      r6,r8
+	mr      r7,r9
+	mr      r8,r10
+	ABORT_TRANSACTION
+	sc
+
+	.globl __syscall_cancel_arch_end
+	.type  __syscall_cancel_arch_end,@function
+__syscall_cancel_arch_end:
+
+	bnslr+
+	neg	r3,r3
+	blr
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=c04e3ab498ffd2c715b08c905ec7418bd03e821c

commit c04e3ab498ffd2c715b08c905ec7418bd03e821c
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon May 4 16:30:13 2015 -0300

    nptl: i386: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patch adds the i386 modifications required for the BZ#12683.
    It basically provides the required ucontext_get_pc symbol, add the
    cancelable syscall wrapper and fix a thread atomic update macro.
    
    On i386 an arch-specific cancellation implementation is required
    because depending of the glibc configuration and underlying kernel
    the syscall may be done using a vDSO symbol (__kernel_vsyscall).
    By using the vDSO symbol the resulting PC value for an interrupted
    syscall points to an adress outside the expected markers in
    __syscall_cancel_arch.  It has been discussed in LKML [1] on how
    kernel could help userland to accomplish it, but afaik discussion
    was stalled.
    
    Also, since glibc supports i486, the old 'int 0x80' should be used
    in the syscall wrapper.  One option could make minimum default chip
    to pentium II (which implements sysenter) or add a runtime check
    on syscall_cancel.S to use 'int 0x80' or sysenter.
    
    Similar to x86_64, it also remove bogus arch-specific
    THREAD_ATOMIC_BIT_SET where it always reference to current thread
    instead of the one referenced by input 'descr' argument.
    
    Checked on i686-linux-gnu.
    
    	* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
    	THREAD_ATOMIC_BIT_SET): Remove macro.
    	* sysdeps/unix/sysv/linux/i386/Makefile
    	[$(subdir) = elf] (sysdep-rtld_routines): Add libc-do-syscall object.
    	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid): Use
    	cancellable futex syscall macro.
    	* sysdeps/unix/sysv/linux/i386/syscall_cancel.S: New file.
    	* sysdeps/unix/sysv/linux/i386/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    
    [1] https://lkml.org/lkml/2016/3/8/1105
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index c6ac668..9e222b1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,14 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_BIT_SET): Remove macro.
+	* sysdeps/unix/sysv/linux/i386/Makefile
+	[$(subdir) = elf] (sysdep-rtld_routines): Add libc-do-syscall object.
+	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid): Use
+	cancellable futex syscall macro.
+	* sysdeps/unix/sysv/linux/i386/syscall_cancel.S: New file.
+	* sysdeps/unix/sysv/linux/i386/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+
 	* sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h (__syscall_arg_t):
 	Define type for x32.
 	(__SSC): Add platform specific macro.
diff --git a/sysdeps/i386/nptl/tls.h b/sysdeps/i386/nptl/tls.h
index f9a6b11..afc2f88 100644
--- a/sysdeps/i386/nptl/tls.h
+++ b/sysdeps/i386/nptl/tls.h
@@ -383,17 +383,6 @@ tls_fill_user_desc (union user_desc_init *desc,
 	      abort (); })
 
 
-/* Atomic set bit.  */
-#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
-  (void) ({ if (sizeof ((descr)->member) == 4)				      \
-	      asm volatile (LOCK_PREFIX "orl %1, %%gs:%P0"		      \
-			    :: "i" (offsetof (struct pthread, member)),	      \
-			       "ir" (1 << (bit)));			      \
-	    else							      \
-	      /* Not necessary for other sizes in the moment.  */	      \
-	      abort (); })
-
-
 /* Set the stack guard field in TCB head.  */
 #define THREAD_SET_STACK_GUARD(value) \
   THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
diff --git a/sysdeps/unix/sysv/linux/i386/Makefile b/sysdeps/unix/sysv/linux/i386/Makefile
index 4080b8c..bae2e4b 100644
--- a/sysdeps/unix/sysv/linux/i386/Makefile
+++ b/sysdeps/unix/sysv/linux/i386/Makefile
@@ -6,7 +6,7 @@ sysdep_routines += ioperm iopl vm86
 endif
 
 ifeq ($(subdir),elf)
-sysdep-dl-routines += libc-do-syscall
+sysdep-rtld_routines += libc-do-syscall
 sysdep-others += lddlibc4
 install-bin += lddlibc4
 endif
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 197bb1f..e54d1ea 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -223,7 +223,7 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
   do {					\
     __typeof (tid) __tid;		\
     while ((__tid = (tid)) != 0)	\
-      lll_futex_wait (&(tid), __tid, LLL_SHARED);\
+      lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED);\
   } while (0)
 
 extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
diff --git a/sysdeps/unix/sysv/linux/i386/sigcontextinfo.h b/sysdeps/unix/sysv/linux/i386/sigcontextinfo.h
index 7e2764e..8547d9e 100644
--- a/sysdeps/unix/sysv/linux/i386/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/i386/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 #define SIGCONTEXT struct sigcontext
 #define SIGCONTEXT_EXTRA_ARGS
 #define GET_PC(ctx)	((void *) ctx.eip)
@@ -48,3 +53,11 @@ do {									      \
 		      "i" (sizeof (struct sigcontext) / 4)		      \
 		    : "cc", "edi");					      \
 } while (0)
+
+static inline uintptr_t
+ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.gregs[REG_EIP];
+}
+
+#endif /* _SIGCONTEXTINFO_H  */
diff --git a/sysdeps/unix/sysv/linux/i386/syscall_cancel.S b/sysdeps/unix/sysv/linux/i386/syscall_cancel.S
new file mode 100644
index 0000000..5596b3e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/syscall_cancel.S
@@ -0,0 +1,107 @@
+/* Cancellable syscall wrapper.  Linux/i686 version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long int [eax] __syscall_cancel_arch (int *cancelhandling [SP],
+					 long int nr   [SP+4],
+					 long int arg1 [SP+8],
+					 long int arg2 [SP+12],
+					 long int arg3 [SP+16],
+					 long int arg4 [SP+20],
+					 long int arg5 [SP+24],
+					 long int arg6 [SP+28])  */
+
+ENTRY (__syscall_cancel_arch)
+	pushl %ebp
+	cfi_def_cfa_offset (8)
+	cfi_offset (ebp, -8)
+	pushl %edi
+	cfi_def_cfa_offset (12)
+	cfi_offset (edi, -12)
+	pushl %esi
+	cfi_def_cfa_offset (16)
+	cfi_offset (esi, -16)
+	pushl %ebx
+	cfi_def_cfa_offset (20)
+	cfi_offset (ebx, -20)
+
+	.global __syscall_cancel_arch_start
+	.type   __syscall_cancel_arch_start, @function
+__syscall_cancel_arch_start:
+
+	/* if (*cancelhandling & CANCELED_BITMASK)
+	     __syscall_do_cancel()  */
+	testb	$4, (%eax)
+	jne     1f
+
+	/* Issue a 6 argument syscall, the nr [%eax] being the syscall
+	   number.  */
+	movl    24(%esp), %eax
+	movl    28(%esp), %ebx
+	movl    32(%esp), %ecx
+	movl    36(%esp), %edx
+	movl    40(%esp), %esi
+	movl    44(%esp), %edi
+	movl    48(%esp), %ebp
+
+	/* We can not use the vDSO helper for syscall (__kernel_vsyscall)
+	   because the returned PC from kernel will indicate whether the
+	   interrupted syscall have any side-effects that need to be reported
+	   back to program.  And the signal handler (sigcancel_handler at
+	   nptl-init.c) checks the PC agains the __syscall_cancel_arch_*
+	   marks.  */
+	int	$128
+
+	.global __syscall_cancel_arch_end
+	.type   __syscall_cancel_arch_end, @function
+__syscall_cancel_arch_end:
+
+	popl %ebx
+	cfi_restore (ebx)
+	cfi_def_cfa_offset (16)
+	popl %esi
+	cfi_restore (esi)
+	cfi_def_cfa_offset (12)
+	popl %edi
+	cfi_restore (edi)
+	cfi_def_cfa_offset (8)
+	popl %ebp
+	cfi_restore (ebp)
+	cfi_def_cfa_offset (4)
+        ret
+
+1:
+	/* Although the __syscall_do_cancel do not return, we need to stack
+	   being set correctly for unwind.  */
+	popl %ebx
+	cfi_restore (ebx)
+	cfi_def_cfa_offset (16)
+	popl %esi
+	cfi_restore (esi)
+	cfi_def_cfa_offset (12)
+	popl %edi
+	cfi_restore (edi)
+	cfi_def_cfa_offset (8)
+	popl %ebp
+	cfi_restore (ebp)
+	cfi_def_cfa_offset (4)
+	jmp __syscall_do_cancel
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=3458e3cb7e54bf11e9f8035baaa06eaa29b6a95d

commit 3458e3cb7e54bf11e9f8035baaa06eaa29b6a95d
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Sat May 9 14:20:26 2015 -0300

    nptl: x32: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patches adds the x32 modification required for the BZ#12683.
    It basically adjust the syscall size used to pass the arguments to
    the syscall cancel wrappers by zero extending pointers type while
    preserting values for default types (such as off_t).
    
    Checked on x86_64-linux-gnu-x32.
    
    	* sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h (__syscall_arg_t):
    	Define type for x32.
    	(__SSC): Add platform specific macro.
    	* include/libc-pointer-arith.h (__integer_if_pointer_type_sub,
    	__integer_if_pointer_type, cast_to_integer): Parametrize integer type
    	cast.
    	(cast_to_uinteger): New macro.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index f232185..c6ac668 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,13 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h (__syscall_arg_t):
+	Define type for x32.
+	(__SSC): Add platform specific macro.
+	* include/libc-pointer-arith.h (__integer_if_pointer_type_sub,
+	__integer_if_pointer_type, cast_to_integer): Parametrize integer type
+	cast.
+	(cast_to_uinteger): New macro.
+
 	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: Remove file.
 	* sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: Remove file.
 	* sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: Remove file.
diff --git a/include/libc-pointer-arith.h b/include/libc-pointer-arith.h
index 715cbc1..4cacb6c 100644
--- a/include/libc-pointer-arith.h
+++ b/include/libc-pointer-arith.h
@@ -25,17 +25,23 @@
 # define __pointer_type(type) (__builtin_classify_type ((type) 0) == 5)
 
 /* intptr_t if P is true, or T if P is false.  */
-# define __integer_if_pointer_type_sub(T, P) \
+# define __integer_if_pointer_type_sub(T, P, INTTYPE) \
   __typeof__ (*(0 ? (__typeof__ (0 ? (T *) 0 : (void *) (P))) 0 \
-		  : (__typeof__ (0 ? (intptr_t *) 0 : (void *) (!(P)))) 0))
+		  : (__typeof__ (0 ? (INTTYPE *) 0 : (void *) (!(P)))) 0))
 
 /* intptr_t if EXPR has a pointer type, or the type of EXPR otherwise.  */
-# define __integer_if_pointer_type(expr) \
+# define __integer_if_pointer_type(expr, inttype) \
   __integer_if_pointer_type_sub(__typeof__ ((__typeof__ (expr)) 0), \
-				__pointer_type (__typeof__ (expr)))
+				__pointer_type (__typeof__ (expr)), \
+				inttype)
 
 /* Cast an integer or a pointer VAL to integer with proper type.  */
-# define cast_to_integer(val) ((__integer_if_pointer_type (val)) (val))
+# define cast_to_integer(val) \
+  ((__integer_if_pointer_type (val, intptr_t)) (val))
+
+/* Cast an integer or a pointer VAL to unsigned integer with proper type.  */
+# define cast_to_uinteger(val) \
+  ((__integer_if_pointer_type (val, uintptr_t)) (val))
 
 /* Align a value by rounding down to closest size.
    e.g. Using size of 4096, we get this behavior:
diff --git a/sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h b/sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h
index 04c73a2..dedff6c 100644
--- a/sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h
+++ b/sysdeps/unix/sysv/linux/x86_64/x32/sysdep.h
@@ -18,6 +18,19 @@
 #ifndef _LINUX_X32_SYSDEP_H
 #define _LINUX_X32_SYSDEP_H 1
 
+#ifndef __ASSEMBLER__
+#include <libc-internal.h>
+#include <libc-diag.h>
+
+typedef long long int __syscall_arg_t;
+
+/* Syscall arguments for x32 follows x86_64 size, however pointers are 32
+   bits in size.  The idea is to zero extend pointer types while cast to
+   signed 64 bit default arguments.  */
+#define __SSC(__x) ((__syscall_arg_t) cast_to_uinteger (__x))
+
+#endif
+
 /* There is some commonality.  */
 #include <sysdeps/unix/sysv/linux/x86_64/sysdep.h>
 #include <sysdeps/x86_64/x32/sysdep.h>

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=7610024edb7255f89cba5f14723d47312cc508c3

commit 7610024edb7255f89cba5f14723d47312cc508c3
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Mon Sep 29 09:48:34 2014 -0300

    nptl: x86_64: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patches adds the x86_64 modification required for the BZ#12683.
    It basically provide the required ucontext_get_pc symbol and remove
    the arch-specific libc-cancellation implementations.
    
    It also remove bogus arch-specific THREAD_ATOMIC_BIT_SET where it
    always reference to current thread instead of the one referenced by
    input 'descr' argument. It works as long the input is the self thread
    pointer, however it creates wrong code is it used along with a
    description to a different one (as on nptl/pthread_cancel.c).
    
    The code generated create an additional load to reference to TLS segment,
    for instance the code:
    
      THREAD_ATOMIC_BIT_SET (THREAD_SELF, cancelhandling, CANCELED_BIT);
    
    Compiles to:
    
      lock;orl $4, %fs:776
    
    Where with patch changes it now compiles to:
    
      mov %fs:16,%rax
      lock;orl $4, 776(%rax)
    
    If some usage indeed proves to be a hotspot we can add an extra macro
    with a more descriptive name (THREAD_ATOMIC_BIT_SET_SELF for instance)
    where x86_64 might optimize it.  In fact all x86_64 THREAD_ATOMIC_* macros
    do not respect the input descr and possible will fail when used with
    a 'descr' difference than THREAD_SELF.
    
    Checked on x86_64-linux-gnu.
    
    	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: Remove file.
    	* sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: Remove file.
    	* sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: Remove file.
    	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid):
    	Use cancellable futex wait call.
    	* sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h (ucontext_get_pc):
    	New function.
    	* sysdeps/x86_64/nptl/tcb-offsets.sym (TCB_CANCELING_BITMASK):
    	Remove.
    	* sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
    	THREAD_ATOMIC_BIT_SET): Remove macros.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index f107fa7..f232185 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,17 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: Remove file.
+	* sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: Remove file.
+	* sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: Remove file.
+	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid):
+	Use cancellable futex wait call.
+	* sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h (ucontext_get_pc):
+	New function.
+	* sysdeps/x86_64/nptl/tcb-offsets.sym (TCB_CANCELING_BITMASK):
+	Remove.
+	* sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
+	THREAD_ATOMIC_BIT_SET): Remove macros.
+
 	* io/creat.c (LIBC_CANCEL_HANDLED): Remove macro.
 	* io/ppoll.c (LIBC_CANCEL_HANDLED): Likewise.
 	* misc/pselect.c (LIBC_CANCEL_HANDLED): Likewise.
diff --git a/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
deleted file mode 100644
index ed804df..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/cancellation.S
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (C) 2009-2017 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <tcb-offsets.h>
-#include <kernel-features.h>
-#include "lowlevellock.h"
-
-#define PTHREAD_UNWIND JUMPTARGET(__pthread_unwind)
-#if IS_IN (libpthread)
-# if defined SHARED && !defined NO_HIDDEN
-#  undef PTHREAD_UNWIND
-#  define PTHREAD_UNWIND __GI___pthread_unwind
-# endif
-#else
-# ifndef SHARED
-	.weak __pthread_unwind
-# endif
-#endif
-
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#else
-# if FUTEX_WAIT == 0
-#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	%fs:PRIVATE_FUTEX, reg
-# else
-#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	%fs:PRIVATE_FUTEX, reg ; \
-	orl	$FUTEX_WAIT, reg
-# endif
-#endif
-
-/* It is crucial that the functions in this file don't modify registers
-   other than %rax and %r11.  The syscall wrapper code depends on this
-   because it doesn't explicitly save the other registers which hold
-   relevant values.  */
-	.text
-
-	.hidden __pthread_enable_asynccancel
-ENTRY(__pthread_enable_asynccancel)
-	movl	%fs:CANCELHANDLING, %eax
-2:	movl	%eax, %r11d
-	orl	$TCB_CANCELTYPE_BITMASK, %r11d
-	cmpl	%eax, %r11d
-	je	1f
-
-	lock
-	cmpxchgl %r11d, %fs:CANCELHANDLING
-	jnz	2b
-
-	andl	$(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
-	cmpl	$(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
-	je	3f
-
-1:	ret
-
-3:	subq	$8, %rsp
-	cfi_adjust_cfa_offset(8)
-	LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
-	lock
-	orl	$TCB_EXITING_BITMASK, %fs:CANCELHANDLING
-	mov	%fs:CLEANUP_JMP_BUF, %RDI_LP
-	call	PTHREAD_UNWIND
-	hlt
-END(__pthread_enable_asynccancel)
-
-
-	.hidden __pthread_disable_asynccancel
-ENTRY(__pthread_disable_asynccancel)
-	testl	$TCB_CANCELTYPE_BITMASK, %edi
-	jnz	1f
-
-	movl	%fs:CANCELHANDLING, %eax
-2:	movl	%eax, %r11d
-	andl	$~TCB_CANCELTYPE_BITMASK, %r11d
-	lock
-	cmpxchgl %r11d, %fs:CANCELHANDLING
-	jnz	2b
-
-	movl	%r11d, %eax
-3:	andl	$(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
-	cmpl	$TCB_CANCELING_BITMASK, %eax
-	je	4f
-1:	ret
-
-	/* Performance doesn't matter in this loop.  We will
-	   delay until the thread is canceled.  And we will unlikely
-	   enter the loop twice.  */
-4:	mov	%fs:0, %RDI_LP
-	movl	$__NR_futex, %eax
-	xorq	%r10, %r10
-	addq	$CANCELHANDLING, %rdi
-	LOAD_PRIVATE_FUTEX_WAIT (%esi)
-	syscall
-	movl	%fs:CANCELHANDLING, %eax
-	jmp	3b
-END(__pthread_disable_asynccancel)
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
deleted file mode 100644
index dc9d822..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2017 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
deleted file mode 100644
index 8422939..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2017 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index cbf6597..bb6d9ee 100644
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -232,10 +232,10 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
    afterwards.  The kernel up to version 3.16.3 does not use the private futex
    operations for futex wake-up when the clone terminates.  */
 #define lll_wait_tid(tid) \
-  do {					\
-    __typeof (tid) __tid;		\
-    while ((__tid = (tid)) != 0)	\
-      lll_futex_wait (&(tid), __tid, LLL_SHARED);\
+  do {									      \
+    __typeof (tid) __tid;						      \
+    while ((__tid = (tid)) != 0)					      \
+      lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED);		      \
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
diff --git a/sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h b/sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h
index 131f96b..d9a44e1 100644
--- a/sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h
+++ b/sysdeps/unix/sysv/linux/x86_64/sigcontextinfo.h
@@ -15,6 +15,9 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
 #include <stdint.h>
 
 #define SIGCONTEXT siginfo_t *_si, ucontext_t *
@@ -28,3 +31,11 @@
 
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+static inline
+uintptr_t ucontext_get_pc (const ucontext_t *uc)
+{
+  return uc->uc_mcontext.gregs[REG_RIP];
+}
+
+#endif
diff --git a/sysdeps/x86_64/nptl/tcb-offsets.sym b/sysdeps/x86_64/nptl/tcb-offsets.sym
index 8a25c48..b225e5b 100644
--- a/sysdeps/x86_64/nptl/tcb-offsets.sym
+++ b/sysdeps/x86_64/nptl/tcb-offsets.sym
@@ -19,7 +19,6 @@ PRIVATE_FUTEX		offsetof (tcbhead_t, private_futex)
 -- Not strictly offsets, but these values are also used in the TCB.
 TCB_CANCELSTATE_BITMASK	 CANCELSTATE_BITMASK
 TCB_CANCELTYPE_BITMASK	 CANCELTYPE_BITMASK
-TCB_CANCELING_BITMASK	 CANCELING_BITMASK
 TCB_CANCELED_BITMASK	 CANCELED_BITMASK
 TCB_EXITING_BITMASK	 EXITING_BITMASK
 TCB_CANCEL_RESTMASK	 CANCEL_RESTMASK
diff --git a/sysdeps/x86_64/nptl/tls.h b/sysdeps/x86_64/nptl/tls.h
index 9b8ad82..4594fe9 100644
--- a/sysdeps/x86_64/nptl/tls.h
+++ b/sysdeps/x86_64/nptl/tls.h
@@ -315,17 +315,6 @@ typedef struct
 	      abort (); })
 
 
-/* Atomic set bit.  */
-# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
-  (void) ({ if (sizeof ((descr)->member) == 4)				      \
-	      asm volatile (LOCK_PREFIX "orl %1, %%fs:%P0"		      \
-			    :: "i" (offsetof (struct pthread, member)),	      \
-			       "ir" (1 << (bit)));			      \
-	    else							      \
-	      /* Not necessary for other sizes in the moment.  */	      \
-	      abort (); })
-
-
 /* Set the stack guard field in TCB head.  */
 # define THREAD_SET_STACK_GUARD(value) \
     THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=141320a320b63f2f2bab370ca0877513947f765e

commit 141320a320b63f2f2bab370ca0877513947f765e
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date:   Fri Sep 18 18:26:35 2015 -0300

    nptl: Fix Race conditions in pthread cancellation (BZ#12683)
    
    This patches fixes some race conditions in NPTL cancellation code by
    redefining how cancellable syscalls are defined and handled.  Current
    approach is to enable asynchronous cancellation prior to making the syscall
    and restore the previous cancellation type once the syscall returns.
    
    As decribed in BZ#12683, this approach shows 2 important problems:
    
      1. Cancellation can act after the syscall has returned from kernel, but
         before userspace saves the return value.  It might result in a resource
         leak if the syscall allocated a resource or a side effect (partial
         read/write), and there is no way to program handle it with cancellation
         handlers.
    
      2. If a signal is handled while the thread is blocked at a cancellable
         syscall, the entire signal handler runs with asynchronous cancellation
         enabled.  This can lead to issues if the signal handler call functions
         which are async-signal-safe but not async-cancel-safe.
    
    For cancellation to work correctly, there are 5 points at which the
    cancellation signal could arrive:
    
      1. Before the final "testcancel" and before the syscall is made.
      2. Between the "testcancel" and the syscall.
      3. While the syscall is blocked and no side effects have yet taken place.
      4. While the syscall is blocked but with some side effects already having
         taken place (e.g. a partial read or write).
      5. After the syscall has returned.
    
    And GLIBC wants to act on cancellation in cases 1, 2, and 3 but not in case
    4 or 5.  The proposed solution follows:
    
      * Handling case 1 is trivial: do a conditional branch based on whether the
        thread has received a cancellation request;
      * Case 2 can be caught by the signal handler determining that the saved
        program counter (from the ucontext_t) is in some address range beginning
        just before the "testcancel" and ending with the syscall instruction.
      * In this case, except for certain syscalls that ALWAYS fail with EINTR
        even for non-interrupting signals, the kernel will reset the program
        counter to point at the syscall instruction during signal handling, so
        that the syscall is restarted when the signal handler returns. So, from
        the signal handler's standpoint, this looks the same as case 2, and thus
        it's taken care of.
      * In this case, the kernel cannot restart the syscall; when it's
        interrupted by a signal, the kernel must cause the syscall to return
        with whatever partial result it obtained (e.g. partial read or write).
      * In this case, the saved program counter points just after the syscall
        instruction, so the signal handler won't act on cancellation.
        This one is equal to 4. since the program counter is past the syscall
        instruction already.
    
    Another case that needs handling is syscalls that fail with EINTR even
    when the signal handler is non-interrupting. In this case, the syscall
    wrapper code can just check the cancellation flag when the errno result
    is EINTR, and act on cancellation if it's set.
    
    The proposed GLIBC adjustments are:
    
      1. Remove the enable_asynccancel/disable_asynccancel function usage in
         syscall definition and instead make them call a common symbol that will
         check if cancellation is enabled (__syscall_cancel at
         nptl/libc-cancellation.c), call the arch-specific cancellable
         entry-point (__syscall_cancel_arch) and cancel the thread when required.
    
      2. Provide a arch-specific symbol that contains global markers. These
         markers will be used in SIGCANCEL handler to check if the interruption
         has been called in a valid syscall and if the syscalls has been
         completed or not.
         A default version is provided (sysdeps/unix/sysv/linux/syscall_cancel.c),
         however the markers may not be set on correct expected places depeding
         of how INTERNAL_SYSCALL_NCS is implemented by the underlying architecture.
         In this case arch-specific implementation should be provided.
    
      3. Rewrite SIGCANCEL asynchronous handler to check for both cancelling type
         and if current IP from signal handler falls between the global markes
         and act accordingly (sigcancel_handler at nptl/nptl-init.c).
    
      4. Adjust nptl/pthread_cancel.c to send an signal instead of acting
         directly. This avoid synchronization issues when updating the
         cancellation status and also focus the logic on signal handler and
         cancellation syscall code.
    
      5. Adjust pthread code to replace CANCEL_ASYNC/CANCEL_RESET calls to
         appropriated cancelable futex syscalls.
    
      6. Adjust libc code to replace LIBC_CANCEL_ASYNC/LIBC_CANCEL_RESET to
         appropriated cancelable syscalls.
    
      7. Adjust 'lowlevellock-futex.h' arch-specific implementations to provide
         cancelable futex calls (used in libpthread code).
    
    This patch adds the proposed changes to NPTL.  The code leaves all the ports
    broken without further patches in the list.
    
    	* nptl/Makefile [routines]: Add syscall_cancel object.
    	[libpthread-routines]: Remove cancellation object.
    	(CFLAGS-cancellation.c): Remove rule.
    	(CFLAGS-syscall_cancel.c): New rule.
    	(tests): Add tst-cancel28.
    	* nptl/Versions [GLIBC_PRIVATE] (libc): Add __syscall_cancel,
    	__syscall_cancel_arch_start, and __syscall_cancel_arch_end.
    	* nptl/cancellation.c: Remove file.
    	* nptl/descr.h (CANCELING_BIT): Remove define.
    	(CANCELING_BITMASK): Likewise.
    	(CANCEL_RESTMASK): Adjust value with CANCELED_BIT remove.
    	* nptl/libc-cancellation.c (__syscall_cancel): Add non-cancellable
    	implementation for loader and cancellable one for libc.
    	(__syscall_do_cancel): New function: cancel call for syscall wrappers.
    	* nptl/lll_timedlock_wait.c (__lll_timedlock_wait): Use cancellable
    	futex operation.
    	(__lll_timedwait_tid): Likewise.
    	* nptl/nptl-init.c (sigcancel_handler): Rewrite function to avoid race
    	conditions.
    	(__pthread_initialize_minimal_internal): Add SA_RESTART to SIGCANCEL
    	handler.
    	* nptl/pt-system.c [LIBC_CANCEL_HANDLED]: Remove definition.
    	* io/creat.c (LIBC_CANCEL_HANDLED): Likewise.
    	* io/ppoll.c [ppoll] (LIBC_CANCEL_HANDLED): Likewise.
    	* misc/pselect [__pselect] (LIBC_CANCEL_HANDLED): Likewise.
    	* sysdeps/posix/pause.c (LIBC_CANCEL_HANDLED): Likewise.
    	* sysdeps/unix/sysv/linux/generic/creat.c (LIBC_CANCEL_HANDLED):
    	Likewise.
    	* nptl/pthreadP.h (__do_cancel): Rewrite to both disable asynchronous
    	cancellation and setting the thread as cancelled.
    	(CANCEL_ASYNC): Remove definition.
    	(CANCEL_RESET): Likewise.
    	(LIBC_CANCEL_ASYNC): Likewise.
    	(LIBC_CANCEL_RESET): Likewise.
    	(LIBC_CANCEL_HANDLED): Likewise.
    	(__syscall_cancel_arch): Add prototype.
    	(__pthread_enable_asynccancel): Remove prototype.
    	(__pthread_disable_asynccancel): Likewise.
    	(__libc_enable_asynccancel): Likewise.
    	(__libc_disable_asynccancel): Likewise.
    	(__librt_enable_asynccancel): Likewise.
    	(__librt_disable_asynccancel): Likewise.
    	(__syscall_cancel_arch): Add prototype.
    	(__syscall_do_cancel): Likewise.
    	* nptl/pthread_cancel.c (pthread_cancel): Rewrite to just set
    	CANCELLED_BIT and call __pthread_kill.
    	* nptl/pthread_create.c (start_thread): Likewise.
    	* nptl/pthread_timedjoin.c (pthread_timedjoin_np): Likewise.
    	* nptl/sem_timedwait.c (sem_timedwait):  Likewise.
    	* nptl/sem_wait.c (__new_sem_wait): Likewise.
    	* nptl/sem_waitcommon.c (futex_abstimed_wait): Likewise.
    	* sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Likewise.
    	* sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise.
    	* sysdeps/posix/sigpause.c (do_sigpause): Likewise.
    	* sysdeps/posix/sigwait.c (__sigwait): Likewise.
    	* sysdeps/posix/waitid.c (__waitid): Likewise.
    	* sysdeps/nptl/lowlevellock.h (lll_wait_tid): Likewise.
    	* sysdeps/posix/open64.c (__libc_open64): Likewise.
    	* sysdeps/unix/sysv/linux/sigwait.c (__sigwait): Likewise.
    	* nptl/pthread_exit.c (pthread_exit): Rewrite to set EXITING_BIT
    	before call __pthread_unwind.
    	* nptl/pthread_join.c (pthread_join): Remove CANCEL_ASYNC/CANCEL_RESET
    	usage.
    	* rt/Makefile [CFLAGS-librt-cancellation.c]: Remove rule.
    	* sysdeps/generic/sysdep-cancel.h (LIBC_CANCEL_ASYNC): Remove define.
    	(LIBC_CANCEL_RESET): Likewise.
    	(LIBC_CANCEL_HANDLED): Likewise.
    	* sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep):
    	Likewise.
    	* sysdeps/unix/sysv/linux/fcntl.c (__libc_fcntl): Likewise.
    	* sysdeps/unix/sysv/linux/generic/wordsize-32/fcntl.c (__libc_fcntl):
    	Likewise.
    	* sysdeps/nptl/Makefile [$(subdir) = rt] (librt-sysdep_routines):
    	Remove librt-cancellation object.
    	[$(subdir) = rt] (librt-cancellation.c): Remove rule.
    	* sysdeps/nptl/librt-cancellation.c: Remove file.
    	* sysdeps/unix/sysv/linux/futex-internal.h (lll_futex_wait_cancel):
    	Use lll_futex_timed_wait_cancel.
    	(futex_reltimed_wait_cancelable): Likewise.
    	(futex_abstimed_wait_cancelable)): Use
    	lll_futex_timed_wait_bitset_cancel.
    	* sysdeps/unix/sysv/linux/lowlevellock-futex.h
    	(lll_futex_wait_cancel): New macro.
    	(lll_futex_timed_wait_cancel): Likewise.
    	(lll_futex_timed_wait_bitset_cancel): Likewise.
    	* sysdeps/unix/sysdep.h (SYSCALL_CANCEL): New macro: cancelable
    	syscall calls.
    	(INTERNAL_SYSCALL_NCS_CALL): New macro.
    	(__syscall_cancel): New prototype.
    	* sysdeps/unix/sysv/linux/socketcall.h (SOCKETCALL): Use __SSC macros.
    	(SOCKETCALL_CANCEL): Use SYSCALL_CANCEL macros.
    	* sysdeps/generic/sysdep-cancel.h (LIBC_CANCEL_ASYNC): Remove define.
    	(LIBC_CANCEL_RESET): Likewise.
    	(LIBC_CANCEL_HANDLED): Likewise.
    	* sysdeps/unix/sysv/linux/pthread_kill.c (__pthread_kill): Allow
    	SIGCANCEL to be sent.
    	* nptl/tst-cancel28.c: New file.
    	* sysdeps/unix/sysv/linux/syscall_cancel.c: Likewise.
    	* support/temp_file.c (create_temp_fifo): New function.
    	* support/temp_file.h (create_temp_fifo): Likewise.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index c664b1f..f107fa7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,90 @@
 2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
 
+	* io/creat.c (LIBC_CANCEL_HANDLED): Remove macro.
+	* io/ppoll.c (LIBC_CANCEL_HANDLED): Likewise.
+	* misc/pselect.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/posix/pause.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/posix/sigpause.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/unix/sysv/linux/creat.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/unix/sysv/linux/creat64.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/unix/sysv/linux/sigwait.c (LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/unix/sysv/linux/sigwaitinfo.c (LIBC_CANCEL_HANDLED): Likewise.
+	* manual/llio.texi: Adjust comments regarding pthread_enable_asynccancel and
+	pthread_disable_asynccancel.
+	* nptl/Makefile (routines): Add syscall_cancel object.
+	(libpthread-routines): Remove cancellation object.
+	(CFLAGS-cancellation.c): Remove rule.
+	(CFLAGS-syscall_cancel.c): New rule.
+	(tests): Add tst-cancel28.
+	* nptl/Versions [GLIBC_PRIVATE] (libc): Add __syscall_cancel,
+	__syscall_cancel_arch_start, and __syscall_cancel_arch_end.
+	* nptl/cancellation.c: Remove file.
+	* sysdeps/nptl/librt-cancellation.c: Likewise.
+	* nptl/descr.h (CANCELING_BIT, CANCELING_BITMASK): Remove define.
+	(CANCELED_BIT, EXITING_BIT, TERMINATED_BIT, SETXID_BIT,
+	CANCEL_RESTMASK): Adjust value with CANCELED_BIT removal.
+	* nptl/libc-cancellation.c (__syscall_cancel): Add non-cancellable
+	implementation for loader and cancellable one for libc.
+	(__syscall_do_cancel): New function: cancel call for syscall wrappers.
+	* nptl/lll_timedlock_wait.c (__lll_timedlock_wait): Use cancellable
+	futex operation.
+	* sysdeps/nptl/lowlevellock.h (lll_wait_tid): Likewise.
+	* sysdeps/sparc/sparc32/lowlevellock.c (__lll_timedwait_tid): Likewise.
+	* nptl/lll_timedwait_tid.c (__lll_timedwait_tid): Likewise.
+	* nptl/nptl-init.c (sigcancel_handler): Rewrite function to avoid race
+	conditions.
+	(__pthread_initialize_minimal_internal): Add SA_RESTART to SIGCANCEL
+	handler.
+	* nptl/pthreadP.h (__do_cancel): Rewrite to both disable asynchronous
+	cancellation and setting the thread as cancelled.
+	(CANCEL_ASYNC, CANCEL_RESET, LIBC_CANCEL_ASYNC, LIBC_CANCEL_RESET,
+	LIBC_CANCEL_HANDLED): Remove macros.
+	(__syscall_cancel_arch, __syscall_do_cancel): New prototypes.
+	(__pthread_enable_asynccancel, __pthread_disable_asynccancel,
+	__libc_enable_asynccancel, __libc_disable_asynccancel,
+	__librt_enable_asynccancel, __librt_disable_asynccancel): Remove
+	prototypes.
+	* nptl/pthread_cancel.c (pthread_cancel): Rewrite to just set
+	CANCELLED_BIT and call __pthread_kill.
+	* nptl/pthread_create.c (start_thread): Likewise.
+	* nptl/pthread_exit.c (__pthread_exit): Likewise.
+	* nptl/pthread_join.c (__pthread_join): Likewise.
+	* nptl/pthread_timedjoin.c (pthread_timedjoin_np): Likewise.
+	* nptl/sem_wait.c (__old_sem_wait): Likewise.
+	* sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Likewise.
+	* sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise.
+	* nptl/tst-cancel28.c: New file.
+	* sysdeps/unix/sysv/linux/syscall_cancel.c: Likewise.
+	* rt/Makefile [CFLAGS-librt-cancellation.c]: Remove rule.
+	* support/temp_file.c (support_create_temp_fifo): New function.
+	* support/temp_file.h (support_create_temp_fifo): New prototype.
+	* sysdeps/generic/sigcontextinfo.h (ucontext_get_pc): Likewise.
+	* sysdeps/generic/sysdep-cancel.h (LIBC_CANCEL_ASYNC): Remove define.
+	(LIBC_CANCEL_RESET): Likewise.
+	(LIBC_CANCEL_HANDLED): Likewise.
+	* sysdeps/nptl/Makefile [$(subdir) = rt] (librt-sysdep_routines):
+	Remove librt-cancellation object.
+	[$(subdir) = rt] (librt-cancellation.c): Remove rule.
+	* sysdeps/posix/open64.c (__libc_open64): Do not call cancelation macros.
+	* sysdeps/posix/sigwait.c (__sigwait): Likewise.
+	* sysdeps/posix/waitid.c (__waitid): Likewise.
+	* sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep): Likewise.
+	* sysdeps/unix/sysdep.h (SYSCALL_CANCEL): Rewrite to call __syscall_cancel.
+	(INTERNAL_SYSCALL_NCS_CALL, __INTERNAL_SYSCALL_NCS*, __SYSCALL_CANCEL*): New
+	macros.
+	* sysdeps/unix/sysv/linux/futex-internal.h (futex_wait_cancel,
+	futex_reltimed_wait_cancelable, futex_abstimed_wait_cancelable): Use
+	cancelable low level futex wrappers.
+	* sysdeps/unix/sysv/linux/lowlevellock-futex.h (lll_futex_syscall_cp,
+	lll_futex_wait_cancel, lll_futex_timed_wait_cancel,
+	lll_futex_timed_wait_bitset_cancel): New macros.
+	* sysdeps/unix/sysv/linux/pthread_kill.c (__pthread_kill): Allow
+	SIGCANCEL to be sent.
+	* sysdeps/unix/sysv/linux/socketcall.h (SOCKETCALL): Use __SSC macros.
+	(SOCKETCALL_CANCEL): Use SYSCALL_CANCEL macros.
+	(__SOCKETCALL_CANCEL*): New macros.
+	* sysdeps/unix/sysv/linux/sysdep.h (SYSCALL_CANCEL_RET): New macro.
+
 	* debug/tst-backtrace5.c (handle_signal): Check for syscall
 	instead of read.
 	(fn): Issue the read syscall instead of call the cancellable
diff --git a/io/creat.c b/io/creat.c
index 4e095d6..031cc78 100644
--- a/io/creat.c
+++ b/io/creat.c
@@ -27,6 +27,3 @@ creat (const char *file, mode_t mode)
 {
   return __open (file, O_WRONLY|O_CREAT|O_TRUNC, mode);
 }
-
-/* __open handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
diff --git a/io/ppoll.c b/io/ppoll.c
index e2919a7..f8cb3a4 100644
--- a/io/ppoll.c
+++ b/io/ppoll.c
@@ -70,7 +70,5 @@ ppoll (struct pollfd *fds, nfds_t nfds, const struct timespec *timeout,
 }
 
 #ifndef ppoll
-/* __poll handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
 libc_hidden_def (ppoll);
 #endif
diff --git a/manual/llio.texi b/manual/llio.texi
index 8b2f599..20f494f 100644
--- a/manual/llio.texi
+++ b/manual/llio.texi
@@ -2436,13 +2436,13 @@ aiocb64}, since the LFS transparently replaces the old interface.
 @c     sigemptyset ok
 @c     sigaddset ok
 @c     setjmp ok
-@c     CANCEL_ASYNC -> pthread_enable_asynccancel ok
+@c     __pthread_setcanceltype ok
 @c      do_cancel ok
 @c       pthread_unwind ok
 @c        Unwind_ForcedUnwind or longjmp ok [@ascuheap @acsmem?]
 @c     lll_lock @asulock @aculock
 @c     lll_unlock @asulock @aculock
-@c     CANCEL_RESET -> pthread_disable_asynccancel ok
+@c     __pthread_setcanceltype ok
 @c      lll_futex_wait ok
 @c     ->start_routine ok -----
 @c     call_tls_dtors @asulock @ascuheap @aculock @acsmem
diff --git a/misc/pselect.c b/misc/pselect.c
index 2292219..17492f0 100644
--- a/misc/pselect.c
+++ b/misc/pselect.c
@@ -73,6 +73,4 @@ __pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
 }
 #ifndef __pselect
 weak_alias (__pselect, pselect)
-/* __select handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
 #endif
diff --git a/nptl/Makefile b/nptl/Makefile
index 36bf25f..1b90cbc 100644
--- a/nptl/Makefile
+++ b/nptl/Makefile
@@ -30,7 +30,7 @@ install-lib-ldscripts := libpthread.so
 
 routines = alloca_cutoff forward libc-lowlevellock libc-cancellation \
 	   libc-cleanup libc_pthread_init libc_multiple_threads \
-	   register-atfork unregister-atfork
+	   register-atfork unregister-atfork syscall_cancel
 shared-only-routines = forward
 
 # We need to provide certain routines for compatibility with existing
@@ -116,7 +116,6 @@ libpthread-routines = nptl-init vars events version pt-interp \
 		      cleanup cleanup_defer cleanup_compat \
 		      cleanup_defer_compat unwind \
 		      pt-longjmp pt-cleanup\
-		      cancellation \
 		      lowlevellock \
 		      lll_timedlock_wait lll_timedwait_tid \
 		      pt-fork pt-vfork \
@@ -165,7 +164,6 @@ CFLAGS-pthread_setcanceltype.c = -fexceptions -fasynchronous-unwind-tables
 
 # These are internal functions which similar functionality as setcancelstate
 # and setcanceltype.
-CFLAGS-cancellation.c = -fasynchronous-unwind-tables
 CFLAGS-libc-cancellation.c = -fasynchronous-unwind-tables
 
 # Calling pthread_exit() must cause the registered cancel handlers to
@@ -224,6 +222,8 @@ CFLAGS-fsync.c = -fexceptions -fasynchronous-unwind-tables
 
 CFLAGS-pt-system.c = -fexceptions
 
+CFLAGS-syscall_cancel.c = -fexceptions -fasynchronous-unwind-tables
+
 LDLIBS-tst-once5 = -lstdc++
 CFLAGS-tst-thread_local1.o = -std=gnu++11
 LDLIBS-tst-thread_local1 = -lstdc++
@@ -272,7 +272,7 @@ tests = tst-attr1 tst-attr2 tst-attr3 tst-default-attr \
 	tst-cancel11 tst-cancel12 tst-cancel13 tst-cancel14 tst-cancel15 \
 	tst-cancel16 tst-cancel17 tst-cancel18 tst-cancel19 tst-cancel20 \
 	tst-cancel21 tst-cancel22 tst-cancel23 tst-cancel24 tst-cancel25 \
-	tst-cancel26 tst-cancel27 \
+	tst-cancel26 tst-cancel27 tst-cancel28 \
 	tst-cancel-self tst-cancel-self-cancelstate \
 	tst-cancel-self-canceltype tst-cancel-self-testcancel \
 	tst-cleanup0 tst-cleanup1 tst-cleanup2 tst-cleanup3 tst-cleanup4 \
diff --git a/nptl/Versions b/nptl/Versions
index 0ae5def..734d47a 100644
--- a/nptl/Versions
+++ b/nptl/Versions
@@ -36,6 +36,9 @@ libc {
     __libc_pthread_init;
     __libc_current_sigrtmin_private; __libc_current_sigrtmax_private;
     __libc_allocate_rtsig_private;
+    __syscall_cancel;
+    __syscall_cancel_arch_start;
+    __syscall_cancel_arch_end;
   }
 }
 
diff --git a/nptl/cancellation.c b/nptl/cancellation.c
deleted file mode 100644
index f3a3805..0000000
--- a/nptl/cancellation.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <setjmp.h>
-#include <stdlib.h>
-#include "pthreadP.h"
-#include <futex-internal.h>
-
-
-/* The next two functions are similar to pthread_setcanceltype() but
-   more specialized for the use in the cancelable functions like write().
-   They do not need to check parameters etc.  */
-int
-attribute_hidden
-__pthread_enable_asynccancel (void)
-{
-  struct pthread *self = THREAD_SELF;
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      int newval = oldval | CANCELTYPE_BITMASK;
-
-      if (newval == oldval)
-	break;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					      oldval);
-      if (__glibc_likely (curval == oldval))
-	{
-	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
-	    {
-	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-	      __do_cancel ();
-	    }
-
-	  break;
-	}
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-
-  return oldval;
-}
-
-
-void
-attribute_hidden
-__pthread_disable_asynccancel (int oldtype)
-{
-  /* If asynchronous cancellation was enabled before we do not have
-     anything to do.  */
-  if (oldtype & CANCELTYPE_BITMASK)
-    return;
-
-  struct pthread *self = THREAD_SELF;
-  int newval;
-
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      newval = oldval & ~CANCELTYPE_BITMASK;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					      oldval);
-      if (__glibc_likely (curval == oldval))
-	break;
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-
-  /* We cannot return when we are being canceled.  Upon return the
-     thread might be things which would have to be undone.  The
-     following loop should loop until the cancellation signal is
-     delivered.  */
-  while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
-			   == CANCELING_BITMASK, 0))
-    {
-      futex_wait_simple ((unsigned int *) &self->cancelhandling, newval,
-			 FUTEX_PRIVATE);
-      newval = THREAD_GETMEM (self, cancelhandling);
-    }
-}
diff --git a/nptl/descr.h b/nptl/descr.h
index c83b17b..c3e1cc0 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -278,23 +278,20 @@ struct pthread
   /* Bit set if asynchronous cancellation mode is selected.  */
 #define CANCELTYPE_BIT		1
 #define CANCELTYPE_BITMASK	(0x01 << CANCELTYPE_BIT)
-  /* Bit set if canceling has been initiated.  */
-#define CANCELING_BIT		2
-#define CANCELING_BITMASK	(0x01 << CANCELING_BIT)
-  /* Bit set if canceled.  */
-#define CANCELED_BIT		3
+  /* Bit set if threads is canceled.  */
+#define CANCELED_BIT		2
 #define CANCELED_BITMASK	(0x01 << CANCELED_BIT)
   /* Bit set if thread is exiting.  */
-#define EXITING_BIT		4
+#define EXITING_BIT		3
 #define EXITING_BITMASK		(0x01 << EXITING_BIT)
   /* Bit set if thread terminated and TCB is freed.  */
-#define TERMINATED_BIT		5
+#define TERMINATED_BIT		4
 #define TERMINATED_BITMASK	(0x01 << TERMINATED_BIT)
   /* Bit set if thread is supposed to change XID.  */
-#define SETXID_BIT		6
+#define SETXID_BIT		5
 #define SETXID_BITMASK		(0x01 << SETXID_BIT)
   /* Mask for the rest.  Helps the compiler to optimize.  */
-#define CANCEL_RESTMASK		0xffffff80
+#define CANCEL_RESTMASK		0xffffffc0
 
 #define CANCEL_ENABLED_AND_CANCELED(value) \
   (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK	      \
diff --git a/nptl/libc-cancellation.c b/nptl/libc-cancellation.c
index cb675ce..b013435 100644
--- a/nptl/libc-cancellation.c
+++ b/nptl/libc-cancellation.c
@@ -16,9 +16,50 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#include <setjmp.h>
+#include <stdlib.h>
 #include "pthreadP.h"
 
+/* Cancellation function called by all cancellable syscalls.  */
+long int
+__syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
+		  __syscall_arg_t a2, __syscall_arg_t a3,
+		  __syscall_arg_t a4, __syscall_arg_t a5,
+		  __syscall_arg_t a6)
+{
+  pthread_t self = (pthread_t) THREAD_SELF;
+  volatile struct pthread *pd = (volatile struct pthread *) self;
+  long int result;
 
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include <nptl/cancellation.c>
+  /* If cancellation is not enabled, call the syscall directly.  */
+  if (pd->cancelhandling & CANCELSTATE_BITMASK)
+    {
+      INTERNAL_SYSCALL_DECL (err);
+      result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6);
+      if (INTERNAL_SYSCALL_ERROR_P (result, err))
+	return -INTERNAL_SYSCALL_ERRNO (result, err);
+      return result;
+    }
+
+  /* Call the arch-specific entry points that contains the globals markers
+     to be checked by SIGCANCEL handler.  */
+  result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
+			          a6);
+
+  if ((result == -EINTR)
+      && (pd->cancelhandling & CANCELED_BITMASK)
+      && !(pd->cancelhandling & CANCELSTATE_BITMASK))
+    __syscall_do_cancel ();
+
+  return result;
+}
+libc_hidden_def (__syscall_cancel)
+
+/* Since __do_cancel is a always inline function, this creates a symbol the
+   arch-specific symbol can call to cancel the thread.  */
+void
+__cleanup_fct_attribute attribute_hidden __attribute ((noreturn))
+__syscall_do_cancel (void)
+{
+  __do_cancel ();
+}
diff --git a/nptl/lll_timedlock_wait.c b/nptl/lll_timedlock_wait.c
index 604953c..630c028 100644
--- a/nptl/lll_timedlock_wait.c
+++ b/nptl/lll_timedlock_wait.c
@@ -52,7 +52,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
         return ETIMEDOUT;
 
       /* If *futex == 2, wait until woken or timeout.  */
-      lll_futex_timed_wait (futex, 2, &rt, private);
+      lll_futex_timed_wait_cancel (futex, 2, &rt, private);
     }
 
   return 0;
diff --git a/nptl/lll_timedwait_tid.c b/nptl/lll_timedwait_tid.c
index e4c8de0..950d1aa 100644
--- a/nptl/lll_timedwait_tid.c
+++ b/nptl/lll_timedwait_tid.c
@@ -62,7 +62,8 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
          The kernel up to version 3.16.3 does not use the private futex
          operations for futex wake-up when the clone terminates.
       */
-      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
+      if (lll_futex_timed_wait_cancel (tidp, tid, &rt, LLL_SHARED)
+	  == -ETIMEDOUT)
         return ETIMEDOUT;
     }
 
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 869e926..33ee7c4 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -38,6 +38,7 @@
 #include <kernel-features.h>
 #include <libc-pointer-arith.h>
 #include <pthread-pids.h>
+#include <sigcontextinfo.h>
 
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
 /* Pointer to the corresponding variable in libc.  */
@@ -176,49 +177,65 @@ __nptl_set_robust (struct pthread *self)
 
 
 #ifdef SIGCANCEL
+
+extern const char __syscall_cancel_arch_start[1];
+extern const char __syscall_cancel_arch_end[1];
+
+/* Workaround for architectures which either does not define the mask
+   as a sigset (alpha) or does not call sa_sigaction with a ucontext_t
+   as third argument (sparc).  */
+# ifndef UCONTEXT_SIGMASK
+#  define UCONTEXT_SIGMASK(cxt) \
+  &(((ucontext_t*) (cxt))->uc_sigmask)
+# endif
+
 /* For asynchronous cancellation we use a signal.  This is the handler.  */
 static void
 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
 {
+  INTERNAL_SYSCALL_DECL (err);
+  pid_t pid = INTERNAL_SYSCALL_CALL (getpid, err);
+
   /* Safety check.  It would be possible to call this function for
      other signals and send a signal from another process.  This is not
      correct and might even be a security problem.  Try to catch as
      many incorrect invocations as possible.  */
   if (sig != SIGCANCEL
-      || si->si_pid != __getpid()
+      || si->si_pid != pid
       || si->si_code != SI_TKILL)
     return;
 
   struct pthread *self = THREAD_SELF;
+  volatile struct pthread *pd = (volatile struct pthread *) self;
 
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-  while (1)
+  if (((pd->cancelhandling & (CANCELSTATE_BITMASK)) != 0)
+      || ((pd->cancelhandling & CANCELED_BITMASK) == 0))
+    return;
+
+  /* Add SIGCANCEL on ignored sigmask to avoid the handler to be called
+     again.  */
+  sigset_t *set = UCONTEXT_SIGMASK (ctx);
+  __sigaddset (set, SIGCANCEL);
+
+  /* Check if asynchronous cancellation mode is set and if interrupted
+     instruction pointer falls within the cancellable syscall bridge.  For
+     interruptable syscalls that might generate external side-effects (partial
+     reads or writes, for instance), the kernel will set the IP to after
+     '__syscall_cancel_arch_end', thus disabling the cancellation and allowing
+     the process to handle such conditions.  */
+  uintptr_t pc = ucontext_get_pc (ctx);
+  if (pd->cancelhandling & CANCELTYPE_BITMASK
+      || (pc >= (uintptr_t) __syscall_cancel_arch_start
+          && pc < (uintptr_t) __syscall_cancel_arch_end))
     {
-      /* We are canceled now.  When canceled by another thread this flag
-	 is already set but if the signal is directly send (internally or
-	 from another process) is has to be done here.  */
-      int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
-
-      if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
-	/* Already canceled or exiting.  */
-	break;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-					      oldval);
-      if (curval == oldval)
-	{
-	  /* Set the return value.  */
-	  THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
-	  /* Make sure asynchronous cancellation is still enabled.  */
-	  if ((newval & CANCELTYPE_BITMASK) != 0)
-	    /* Run the registered destructors and terminate the thread.  */
-	    __do_cancel ();
-
-	  break;
-	}
-
-      oldval = curval;
+      THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
+
+      INTERNAL_SYSCALL_CALL (rt_sigprocmask, err, SIG_SETMASK, set, NULL,
+			     _NSIG / 8);
+
+      __do_cancel ();
+      return;
     }
 }
 #endif
@@ -373,7 +390,10 @@ __pthread_initialize_minimal_internal (void)
      cannot install the handler we do not abort.  Maybe we should, but
      it is only asynchronous cancellation which is affected.  */
   sa.sa_sigaction = sigcancel_handler;
-  sa.sa_flags = SA_SIGINFO;
+  /* The signal handle should be non-interruptible to avoid the risk of
+     spurious EINTR caused by SIGCANCEL sent to process or if pthread_cancel
+     is called while cancellation is disabled in the target thread.  */
+  sa.sa_flags = SA_SIGINFO | SA_RESTART;
   (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
 # endif
 
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 713000e..35d99f6 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -291,51 +291,37 @@ __do_cancel (void)
 {
   struct pthread *self = THREAD_SELF;
 
-  /* Make sure we get no more cancellations.  */
-  THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+  /* Make sure we get no more cancellations by clearing the cancel
+     state.  */
+  int oldval = THREAD_GETMEM (self, cancelhandling);
+  while (1)
+    {
+      int newval = (oldval | CANCELSTATE_BITMASK);
+      newval &= ~(CANCELTYPE_BITMASK);
+      if (oldval == newval)
+	break;
+
+      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
+					  oldval);
+      if (__glibc_likely (curval == oldval))
+	break;
+      oldval = curval;
+    }
+
+  THREAD_SETMEM (self, result, PTHREAD_CANCELED);
 
   __pthread_unwind ((__pthread_unwind_buf_t *)
 		    THREAD_GETMEM (self, cleanup_jmp_buf));
 }
 
 
-/* Set cancellation mode to asynchronous.  */
-#define CANCEL_ASYNC() \
-  __pthread_enable_asynccancel ()
-/* Reset to previous cancellation mode.  */
-#define CANCEL_RESET(oldtype) \
-  __pthread_disable_asynccancel (oldtype)
-
-#if IS_IN (libc)
-/* Same as CANCEL_ASYNC, but for use in libc.so.  */
-# define LIBC_CANCEL_ASYNC() \
-  __libc_enable_asynccancel ()
-/* Same as CANCEL_RESET, but for use in libc.so.  */
-# define LIBC_CANCEL_RESET(oldtype) \
-  __libc_disable_asynccancel (oldtype)
-# define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __SYMBOL_PREFIX "__libc_enable_asynccancel"); \
-  __asm (".globl " __SYMBOL_PREFIX "__libc_disable_asynccancel")
-#elif IS_IN (libpthread)
-# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
-# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
-# define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __SYMBOL_PREFIX "__pthread_enable_asynccancel"); \
-  __asm (".globl " __SYMBOL_PREFIX "__pthread_disable_asynccancel")
-#elif IS_IN (librt)
-# define LIBC_CANCEL_ASYNC() \
-  __librt_enable_asynccancel ()
-# define LIBC_CANCEL_RESET(val) \
-  __librt_disable_asynccancel (val)
-# define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __SYMBOL_PREFIX "__librt_enable_asynccancel"); \
-  __asm (".globl " __SYMBOL_PREFIX "__librt_disable_asynccancel")
-#else
-# define LIBC_CANCEL_ASYNC()	0 /* Just a dummy value.  */
-# define LIBC_CANCEL_RESET(val)	((void)(val)) /* Nothing, but evaluate it.  */
-# define LIBC_CANCEL_HANDLED()	/* Nothing.  */
-#endif
+extern long int __syscall_cancel_arch (volatile int *, __syscall_arg_t nr,
+     __syscall_arg_t arg1, __syscall_arg_t arg2, __syscall_arg_t arg3,
+     __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel_arch);
 
+extern void __syscall_do_cancel (void)
+     __cleanup_fct_attribute attribute_hidden __attribute ((__noreturn__));
 
 /* Internal prototypes.  */
 
@@ -503,8 +489,6 @@ extern int __pthread_kill (pthread_t threadid, int signo);
 extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
 extern int __pthread_join (pthread_t threadid, void **thread_return);
 extern int __pthread_setcanceltype (int type, int *oldtype);
-extern int __pthread_enable_asynccancel (void) attribute_hidden;
-extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden;
 extern void __pthread_testcancel (void);
 
 #if IS_IN (libpthread)
@@ -540,15 +524,6 @@ extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond,
 extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize,
 				     cpu_set_t *cpuset);
 
-/* The two functions are in libc.so and not exported.  */
-extern int __libc_enable_asynccancel (void) attribute_hidden;
-extern void __libc_disable_asynccancel (int oldtype) attribute_hidden;
-
-
-/* The two functions are in librt.so and not exported.  */
-extern int __librt_enable_asynccancel (void) attribute_hidden;
-extern void __librt_disable_asynccancel (int oldtype) attribute_hidden;
-
 #if IS_IN (libpthread)
 /* Special versions which use non-exported functions.  */
 extern void __pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
index 742dfe6..04d9b89 100644
--- a/nptl/pthread_cancel.c
+++ b/nptl/pthread_cancel.c
@@ -37,67 +37,23 @@ __pthread_cancel (pthread_t th)
 #ifdef SHARED
   pthread_cancel_init ();
 #endif
-  int result = 0;
-  int oldval;
-  int newval;
-  do
-    {
-    again:
-      oldval = pd->cancelhandling;
-      newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
 
-      /* Avoid doing unnecessary work.  The atomic operation can
-	 potentially be expensive if the bug has to be locked and
-	 remote cache lines have to be invalidated.  */
-      if (oldval == newval)
-	break;
+  THREAD_ATOMIC_BIT_SET (pd, cancelhandling, CANCELED_BIT);
 
-      /* If the cancellation is handled asynchronously just send a
-	 signal.  We avoid this if possible since it's more
-	 expensive.  */
-      if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
-	{
-	  /* Mark the cancellation as "in progress".  */
-	  if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
-						    oldval | CANCELING_BITMASK,
-						    oldval))
-	    goto again;
-
-#ifdef SIGCANCEL
-	  /* The cancellation handler will take care of marking the
-	     thread as canceled.  */
-	  pid_t pid = __getpid ();
-
-	  INTERNAL_SYSCALL_DECL (err);
-	  int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid,
-					   SIGCANCEL);
-	  if (INTERNAL_SYSCALL_ERROR_P (val, err))
-	    result = INTERNAL_SYSCALL_ERRNO (val, err);
-#else
-          /* It should be impossible to get here at all, since
-             pthread_setcanceltype should never have allowed
-             PTHREAD_CANCEL_ASYNCHRONOUS to be set.  */
-          abort ();
-#endif
-
-	  break;
-	}
-
-	/* A single-threaded process should be able to kill itself, since
-	   there is nothing in the POSIX specification that says that it
-	   cannot.  So we set multiple_threads to true so that cancellation
-	   points get executed.  */
-	THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
+  /* A single-threaded process should be able to kill itself, since there is
+     nothing in the POSIX specification that says that it cannot.  So we set
+     multiple_threads to true so that cancellation points get executed.  */
+  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
-	__pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
+  __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
 #endif
-    }
-  /* Mark the thread as canceled.  This has to be done
-     atomically since other bits could be modified as well.  */
-  while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
-					       oldval));
 
-  return result;
+  /* Avoid signaling when thread attempts cancel itself (pthread_kill
+     is expensive).  */
+  if (pd == THREAD_SELF && !(pd->cancelhandling & CANCELTYPE_BITMASK))
+    return 0;
+
+  return __pthread_kill (th, SIGCANCEL);
 }
 weak_alias (__pthread_cancel, pthread_cancel)
 
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 51ae60d..64526e2 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -412,7 +412,7 @@ START_THREAD_DEFN
   /* If the parent was running cancellation handlers while creating
      the thread the new thread inherited the signal mask.  Reset the
      cancellation signal mask.  */
-  if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
+  if (__glibc_unlikely (pd->parent_cancelhandling & CANCELED_BITMASK))
     {
       INTERNAL_SYSCALL_DECL (err);
       sigset_t mask;
@@ -444,7 +444,8 @@ START_THREAD_DEFN
 	 have ownership (see CONCURRENCY NOTES above).  */
       if (__glibc_unlikely (pd->stopped_start))
 	{
-	  int oldtype = CANCEL_ASYNC ();
+	  int ct;
+	  __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
 
 	  /* Get the lock the parent locked to force synchronization.  */
 	  lll_lock (pd->lock, LLL_PRIVATE);
@@ -454,7 +455,7 @@ START_THREAD_DEFN
 	  /* And give it up right away.  */
 	  lll_unlock (pd->lock, LLL_PRIVATE);
 
-	  CANCEL_RESET (oldtype);
+	  __pthread_setcanceltype (ct, NULL);
 	}
 
       LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
diff --git a/nptl/pthread_exit.c b/nptl/pthread_exit.c
index 7209769..82ef4f8 100644
--- a/nptl/pthread_exit.c
+++ b/nptl/pthread_exit.c
@@ -23,9 +23,14 @@
 void
 __pthread_exit (void *value)
 {
-  THREAD_SETMEM (THREAD_SELF, result, value);
+  struct pthread *self = THREAD_SELF;
 
-  __do_cancel ();
+  THREAD_SETMEM (self, result, value);
+
+  THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+
+  __pthread_unwind ((__pthread_unwind_buf_t *)
+		    THREAD_GETMEM (self, cleanup_jmp_buf));
 }
 weak_alias (__pthread_exit, pthread_exit)
 
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index afc8c37..8255a7d 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -52,7 +52,7 @@ __pthread_join (pthread_t threadid, void **thread_return)
     return EINVAL;
 
   struct pthread *self = THREAD_SELF;
-  int result = 0;
+  int result = 0, ct;
 
   LIBC_PROBE (pthread_join, 1, threadid);
 
@@ -62,12 +62,12 @@ __pthread_join (pthread_t threadid, void **thread_return)
   pthread_cleanup_push (cleanup, &pd->joinid);
 
   /* Switch to asynchronous cancellation.  */
-  int oldtype = CANCEL_ASYNC ();
+  __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
 
   if ((pd == self
        || (self->joinid == pd
 	   && (pd->cancelhandling
-	       & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+	       & (CANCELED_BITMASK | EXITING_BITMASK
 		  | TERMINATED_BITMASK)) == 0))
       && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
     /* This is a deadlock situation.  The threads are waiting for each
@@ -89,9 +89,7 @@ __pthread_join (pthread_t threadid, void **thread_return)
     /* Wait for the child.  */
     lll_wait_tid (pd->tid);
 
-
-  /* Restore cancellation mode.  */
-  CANCEL_RESET (oldtype);
+  __pthread_setcanceltype (ct, NULL);
 
   /* Remove the handler.  */
   pthread_cleanup_pop (0);
diff --git a/nptl/pthread_timedjoin.c b/nptl/pthread_timedjoin.c
index 567c171..75a9cfe 100644
--- a/nptl/pthread_timedjoin.c
+++ b/nptl/pthread_timedjoin.c
@@ -35,7 +35,7 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
 {
   struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
-  int result;
+  int result, ct;
 
   /* Make sure the descriptor is valid.  */
   if (INVALID_NOT_TERMINATED_TD_P (pd))
@@ -72,15 +72,12 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
   pthread_cleanup_push (cleanup, &pd->joinid);
 
   /* Switch to asynchronous cancellation.  */
-  int oldtype = CANCEL_ASYNC ();
-
+  __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
 
   /* Wait for the child.  */
   result = lll_timedwait_tid (pd->tid, abstime);
 
-
-  /* Restore cancellation mode.  */
-  CANCEL_RESET (oldtype);
+  __pthread_setcanceltype (ct, NULL);
 
   /* Remove the handler.  */
   pthread_cleanup_pop (0);
diff --git a/nptl/sem_wait.c b/nptl/sem_wait.c
index 625bf08..b6f4312 100644
--- a/nptl/sem_wait.c
+++ b/nptl/sem_wait.c
@@ -56,14 +56,8 @@ __old_sem_wait (sem_t *sem)
       if (atomic_decrement_if_positive (futex) > 0)
 	return 0;
 
-      /* Enable asynchronous cancellation.  Required by the standard.  */
-      int oldtype = __pthread_enable_asynccancel ();
-
       /* Always assume the semaphore is shared.  */
-      err = lll_futex_wait (futex, 0, LLL_SHARED);
-
-      /* Disable asynchronous cancellation.  */
-      __pthread_disable_asynccancel (oldtype);
+      err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
     }
   while (err == 0 || err == -EWOULDBLOCK);
 
diff --git a/nptl/tst-cancel28.c b/nptl/tst-cancel28.c
new file mode 100644
index 0000000..e086e9b
--- /dev/null
+++ b/nptl/tst-cancel28.c
@@ -0,0 +1,94 @@
+/* Check side-effect act for cancellable syscalls (BZ #12683).
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This testcase checks if there is resource leakage if the syscall has
+   returned from kernelspace, but before userspace saves the return
+   value.  The 'leaker' thread should be able to close the file descriptor
+   if the resource is already allocated, meaning that if the cancellation
+   signal arrives *after* the open syscal return from kernel, the
+   side-effect should be visible to application.  */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <support/xthread.h>
+#include <support/check.h>
+#include <support/temp_file.h>
+
+static void *
+writeopener (void *arg)
+{
+  int fd;
+  for (;;)
+    {
+      fd = open (arg, O_WRONLY);
+      close (fd);
+    }
+  return NULL;
+}
+
+static void *
+leaker (void *arg)
+{
+  int fd = open (arg, O_RDONLY);
+  pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, 0);
+  close (fd);
+  return NULL;
+}
+
+
+#define ITER_COUNT 1000
+#define MAX_FILENO 1024
+
+static int
+do_test (void)
+{
+  struct stat st;
+  int i;
+
+  char *name = NULL;
+  support_create_temp_fifo ("tst-cancel28", &name);
+
+  srand (1);
+
+  xpthread_create (NULL, writeopener, name);
+  for (i = 0; i < ITER_COUNT; i++)
+    {
+      pthread_t td = xpthread_create (NULL, leaker, name);
+      struct timespec ts =
+	{ .tv_nsec = rand () % 100000, .tv_sec = 0 };
+      nanosleep (&ts, NULL);
+      /* Ignore pthread_cancel result because it might be the
+	 case when pthread_cancel is called when thread is already
+	 exited.  */
+      pthread_cancel (td);
+      xpthread_join (td);
+    }
+
+  for (i = STDERR_FILENO+1; i < MAX_FILENO; i++)
+    if (!fstat (i, &st))
+      FAIL_EXIT1 ("leaked fd %d", i);
+
+  return 0;
+}
+
+#define TIMEOUT 10
+#include <support/test-driver.c>
diff --git a/rt/Makefile b/rt/Makefile
index 9740dc2..232e543 100644
--- a/rt/Makefile
+++ b/rt/Makefile
@@ -64,7 +64,6 @@ CFLAGS-aio_suspend.c = -fexceptions
 CFLAGS-mq_timedreceive.c = -fexceptions -fasynchronous-unwind-tables
 CFLAGS-mq_timedsend.c = -fexceptions -fasynchronous-unwind-tables
 CFLAGS-clock_nanosleep.c = -fexceptions -fasynchronous-unwind-tables
-CFLAGS-librt-cancellation.c = -fasynchronous-unwind-tables
 
 LDFLAGS-rt.so = -Wl,--enable-new-dtags,-z,nodelete
 
diff --git a/support/temp_file.c b/support/temp_file.c
index 547263a..dad13dc 100644
--- a/support/temp_file.c
+++ b/support/temp_file.c
@@ -86,6 +86,29 @@ create_temp_file (const char *base, char **filename)
   return fd;
 }
 
+int
+support_create_temp_fifo (const char *base, char **fifoname)
+{
+  char *fname = xasprintf ("%s/%sXXXXXX", test_dir, base);
+  mktemp (fname);
+
+  int fd = mkfifo (fname, 0600);
+  if (fd == -1)
+    {
+      printf ("cannot open temporary fifo '%s': %m\n", fname);
+      free (fname);
+      return -1;
+    }
+
+  add_temp_file (fname);
+  if (fifoname != NULL)
+    *fifoname = fname;
+  else
+    free (fname);
+
+  return fd;
+}
+
 char *
 support_create_temp_directory (const char *base)
 {
diff --git a/support/temp_file.h b/support/temp_file.h
index 3b8563e..66bfd1c 100644
--- a/support/temp_file.h
+++ b/support/temp_file.h
@@ -32,6 +32,12 @@ void add_temp_file (const char *name);
    *FILENAME.  */
 int create_temp_file (const char *base, char **filename);
 
+/* Create a temporary fifo.  Return the opened file descriptor on
+   success, or -1 on failure.  Write the file name to *FILENAME if
+   FILENAME is not NULL.  In this case, the caller is expected to free
+   *FILENAME.  */
+int support_create_temp_fifo (const char *name, char **fifoname);
+
 /* Create a temporary directory and schedule it for deletion.  BASE is
    used as a prefix for the unique directory name, which the function
    returns.  The caller should free this string.  */
diff --git a/sysdeps/generic/sigcontextinfo.h b/sysdeps/generic/sigcontextinfo.h
index d25a787..2ca20e9 100644
--- a/sysdeps/generic/sigcontextinfo.h
+++ b/sysdeps/generic/sigcontextinfo.h
@@ -16,6 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
 /* In general we cannot provide any information.  */
 #define SIGCONTEXT struct sigcontext *
 #define SIGCONTEXT_EXTRA_ARGS
@@ -24,3 +29,13 @@
 #define GET_STACK(ctx)	((void *) 0)
 #define CALL_SIGHANDLER(handler, signo, ctx) \
   (handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+/* Obtain the Program Counter from third argument in signal handler set
+   with SA_SIGINFO.  */
+static inline uintptr_t
+ucontext_get_pc (void *ctx)
+{
+  return 0;
+}
+
+#endif
diff --git a/sysdeps/generic/sysdep-cancel.h b/sysdeps/generic/sysdep-cancel.h
index ba6a1e0..5c84b44 100644
--- a/sysdeps/generic/sysdep-cancel.h
+++ b/sysdeps/generic/sysdep-cancel.h
@@ -3,6 +3,3 @@
 /* No multi-thread handling enabled.  */
 #define SINGLE_THREAD_P (1)
 #define RTLD_SINGLE_THREAD_P (1)
-#define LIBC_CANCEL_ASYNC()	0 /* Just a dummy value.  */
-#define LIBC_CANCEL_RESET(val)	((void)(val)) /* Nothing, but evaluate it.  */
-#define LIBC_CANCEL_HANDLED()	/* Nothing.  */
diff --git a/sysdeps/nptl/Makefile b/sysdeps/nptl/Makefile
index 4f4f4ff..708cab5 100644
--- a/sysdeps/nptl/Makefile
+++ b/sysdeps/nptl/Makefile
@@ -21,8 +21,7 @@ libpthread-sysdep_routines += errno-loc
 endif
 
 ifeq ($(subdir),rt)
-librt-sysdep_routines += timer_routines librt-cancellation
-CFLAGS-librt-cancellation.c += -fexceptions -fasynchronous-unwind-tables
+librt-sysdep_routines += timer_routines
 
 tests += tst-mqueue8x
 CFLAGS-tst-mqueue8x.c += -fexceptions
diff --git a/sysdeps/nptl/aio_misc.h b/sysdeps/nptl/aio_misc.h
index 47b1a36..817a75c 100644
--- a/sysdeps/nptl/aio_misc.h
+++ b/sysdeps/nptl/aio_misc.h
@@ -34,22 +34,18 @@
 
 #define AIO_MISC_WAIT(result, futex, timeout, cancel)			      \
   do {									      \
-    volatile unsigned int *futexaddr = &futex;				      \
+    unsigned int *futexaddr = (unsigned int *)&futex;			      \
     unsigned int oldval = futex;					      \
 									      \
     if (oldval != 0)							      \
       {									      \
 	pthread_mutex_unlock (&__aio_requests_mutex);			      \
 									      \
-	int oldtype;							      \
-	if (cancel)							      \
-	  oldtype = LIBC_CANCEL_ASYNC ();				      \
-									      \
 	int status;							      \
 	do								      \
 	  {								      \
-	    status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
-					  timeout, FUTEX_PRIVATE);	      \
+	    status = futex_reltimed_wait_cancelable (futexaddr, oldval,	      \
+						     timeout, FUTEX_PRIVATE); \
 	    if (status != EAGAIN)					      \
 	      break;							      \
 									      \
@@ -57,9 +53,6 @@
 	  }								      \
 	while (oldval != 0);						      \
 									      \
-	if (cancel)							      \
-	  LIBC_CANCEL_RESET (oldtype);					      \
-									      \
 	if (status == EINTR)						      \
 	  result = EINTR;						      \
 	else if (status == ETIMEDOUT)					      \
diff --git a/sysdeps/nptl/gai_misc.h b/sysdeps/nptl/gai_misc.h
index 8b2a2c1..df6b434 100644
--- a/sysdeps/nptl/gai_misc.h
+++ b/sysdeps/nptl/gai_misc.h
@@ -35,22 +35,18 @@
 
 #define GAI_MISC_WAIT(result, futex, timeout, cancel) \
   do {									      \
-    volatile unsigned int *futexaddr = &futex;				      \
+    unsigned int *futexaddr = (unsigned int *)&futex;			      \
     unsigned int oldval = futex;					      \
 									      \
     if (oldval != 0)							      \
       {									      \
 	pthread_mutex_unlock (&__gai_requests_mutex);			      \
 									      \
-	int oldtype;							      \
-	if (cancel)							      \
-	  oldtype = LIBC_CANCEL_ASYNC ();				      \
-									      \
 	int status;							      \
 	do								      \
 	  {								      \
-	    status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
-					  timeout, FUTEX_PRIVATE);	      \
+	    status = futex_reltimed_wait_cancelable (futexaddr, oldval,	      \
+						     timeout, FUTEX_PRIVATE); \
 	    if (status != EAGAIN)					      \
 	      break;							      \
 									      \
@@ -58,9 +54,6 @@
 	  }								      \
 	while (oldval != 0);						      \
 									      \
-	if (cancel)							      \
-	  LIBC_CANCEL_RESET (oldtype);					      \
-									      \
 	if (status == EINTR)						      \
 	  result = EINTR;						      \
 	else if (status == ETIMEDOUT)					      \
diff --git a/sysdeps/nptl/librt-cancellation.c b/sysdeps/nptl/librt-cancellation.c
deleted file mode 100644
index 1932242..0000000
--- a/sysdeps/nptl/librt-cancellation.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <nptl/pthreadP.h>
-
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include <nptl/cancellation.c>
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 54e3c28..0d806bc 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -180,12 +180,13 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
    wake-up when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero by the kernel
    afterwards.  The kernel up to version 3.16.3 does not use the private futex
-   operations for futex wake-up when the clone terminates.  */
+   operations for futex wake-up when the clone terminates.
+   Both lll_wait_tid and lll_timewait_tid acts as cancellation points.  */
 #define lll_wait_tid(tid) \
   do {					\
     __typeof (tid) __tid;		\
     while ((__tid = (tid)) != 0)	\
-      lll_futex_wait (&(tid), __tid, LLL_SHARED);\
+      lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED);\
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
diff --git a/sysdeps/posix/open64.c b/sysdeps/posix/open64.c
index dd1b4d6..8e6f2b9 100644
--- a/sysdeps/posix/open64.c
+++ b/sysdeps/posix/open64.c
@@ -34,16 +34,8 @@ __libc_open64 (const char *file, int oflag, ...)
       va_end (arg);
     }
 
-  if (SINGLE_THREAD_P)
-    return __libc_open (file, oflag | O_LARGEFILE, mode);
-
-  int oldtype = LIBC_CANCEL_ASYNC ();
-
-  int result = __libc_open (file, oflag | O_LARGEFILE, mode);
-
-  LIBC_CANCEL_RESET (oldtype);
-
-  return result;
+  /* __libc_open should be a cancellation point.  */
+  return __libc_open (file, oflag | O_LARGEFILE, mode);
 }
 weak_alias (__libc_open64, __open64)
 libc_hidden_weak (__open64)
diff --git a/sysdeps/posix/pause.c b/sysdeps/posix/pause.c
index 7c17f49..f875939 100644
--- a/sysdeps/posix/pause.c
+++ b/sysdeps/posix/pause.c
@@ -38,5 +38,3 @@ __libc_pause (void)
   return __sigsuspend (&set);
 }
 weak_alias (__libc_pause, pause)
-
-LIBC_CANCEL_HANDLED ();		/* sigsuspend handles our cancellation.  */
diff --git a/sysdeps/posix/sigpause.c b/sysdeps/posix/sigpause.c
index a61e056..d58e3e8 100644
--- a/sysdeps/posix/sigpause.c
+++ b/sysdeps/posix/sigpause.c
@@ -70,6 +70,3 @@ __xpg_sigpause (int sig)
   return __sigpause (sig, 1);
 }
 strong_alias (__xpg_sigpause, __libc___xpg_sigpause)
-
-/* __sigsuspend handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
diff --git a/sysdeps/posix/sigwait.c b/sysdeps/posix/sigwait.c
index d807dbf..f86cce2 100644
--- a/sysdeps/posix/sigwait.c
+++ b/sysdeps/posix/sigwait.c
@@ -88,13 +88,8 @@ __sigwait (const sigset_t *set, int *sig)
   if (SINGLE_THREAD_P)
     return do_sigwait (set, sig);
 
-  int oldtype = LIBC_CANCEL_ASYNC ();
-
-  int result = do_sigwait (set, sig);
-
-  LIBC_CANCEL_RESET (oldtype);
-
-  return result;
+  /* do_sigwait should be a cancellation point.  */
+  return do_sigwait (set, sig);
 }
 libc_hidden_def (__sigwait)
 weak_alias (__sigwait, sigwait)
diff --git a/sysdeps/posix/waitid.c b/sysdeps/posix/waitid.c
index 2c23cdb..9925700 100644
--- a/sysdeps/posix/waitid.c
+++ b/sysdeps/posix/waitid.c
@@ -151,16 +151,7 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
 int
 __waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
 {
-  if (SINGLE_THREAD_P)
-    return do_waitid (idtype, id, infop, options);
-
-  int oldtype = LIBC_CANCEL_ASYNC ();
-
-  int result = do_waitid (idtype, id, infop, options);
-
-  LIBC_CANCEL_RESET (oldtype);
-
-  return result;
+  return do_waitid (idtype, id, infop, options);
 }
 weak_alias (__waitid, waitid)
 strong_alias (__waitid, __libc_waitid)
diff --git a/sysdeps/sparc/sparc32/lowlevellock.c b/sysdeps/sparc/sparc32/lowlevellock.c
index e502bf6..af3b227 100644
--- a/sysdeps/sparc/sparc32/lowlevellock.c
+++ b/sysdeps/sparc/sparc32/lowlevellock.c
@@ -122,7 +122,8 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
 
       /* Wait until thread terminates.  The kernel so far does not use
 	 the private futex operations for this.  */
-      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
+      if (lll_futex_timed_wait_cancel (tidp, tid, &rt, LLL_SHARED)
+	  == -ETIMEDOUT)
 	return ETIMEDOUT;
     }
 
diff --git a/sysdeps/unix/sysdep.h b/sysdeps/unix/sysdep.h
index c308547..10669ae 100644
--- a/sysdeps/unix/sysdep.h
+++ b/sysdeps/unix/sysdep.h
@@ -24,6 +24,9 @@
 #define	SYSCALL__(name, args)	PSEUDO (__##name, name, args)
 #define	SYSCALL(name, args)	PSEUDO (name, name, args)
 
+#ifndef __ASSEMBLER__
+# include <errno.h>
+
 #define __SYSCALL_CONCAT_X(a,b)     a##b
 #define __SYSCALL_CONCAT(a,b)       __SYSCALL_CONCAT_X (a, b)
 
@@ -57,6 +60,29 @@
 #define INTERNAL_SYSCALL_CALL(...) \
   __INTERNAL_SYSCALL_DISP (__INTERNAL_SYSCALL, __VA_ARGS__)
 
+#define __INTERNAL_SYSCALL_NCS0(name, err) \
+  INTERNAL_SYSCALL_NCS (name, err, 0)
+#define __INTERNAL_SYSCALL_NCS1(name, err, a1) \
+  INTERNAL_SYSCALL_NCS (name, err, 1, a1)
+#define __INTERNAL_SYSCALL_NCS2(name, err, a1, a2) \
+  INTERNAL_SYSCALL_NCS (name, err, 2, a1, a2)
+#define __INTERNAL_SYSCALL_NCS3(name, err, a1, a2, a3) \
+  INTERNAL_SYSCALL_NCS (name, err, 3, a1, a2, a3)
+#define __INTERNAL_SYSCALL_NCS4(name, err, a1, a2, a3, a4) \
+  INTERNAL_SYSCALL_NCS (name, err, 4, a1, a2, a3, a4)
+#define __INTERNAL_SYSCALL_NCS5(name, err, a1, a2, a3, a4, a5) \
+  INTERNAL_SYSCALL_NCS (name, err, 5, a1, a2, a3, a4, a5)
+#define __INTERNAL_SYSCALL_NCS6(name, err, a1, a2, a3, a4, a5, a6) \
+  INTERNAL_SYSCALL_NCS (name, err, 6, a1, a2, a3, a4, a5, a6)
+#define __INTERNAL_SYSCALL_NCS7(name, err, a1, a2, a3, a4, a5, a6, a7) \
+  INTERNAL_SYSCALL_NCS (name, err, 7, a1, a2, a3, a4, a5, a6, a7)
+
+/* Issue a syscall defined by syscall number plus any other argument required.
+   It is similar to INTERNAL_SYSCALL_NCS macro, but without the need to pass
+   the expected argument number as third parameter.  */
+#define INTERNAL_SYSCALL_NCS_CALL(...) \
+  __INTERNAL_SYSCALL_DISP (__INTERNAL_SYSCALL_NCS, __VA_ARGS__)
+
 #define __INLINE_SYSCALL0(name) \
   INLINE_SYSCALL (name, 0)
 #define __INLINE_SYSCALL1(name, a1) \
@@ -88,19 +114,65 @@
 #define INLINE_SYSCALL_CALL(...) \
   __INLINE_SYSCALL_DISP (__INLINE_SYSCALL, __VA_ARGS__)
 
-#define SYSCALL_CANCEL(...) \
-  ({									     \
-    long int sc_ret;							     \
-    if (SINGLE_THREAD_P) 						     \
-      sc_ret = INLINE_SYSCALL_CALL (__VA_ARGS__); 			     \
-    else								     \
-      {									     \
-	int sc_cancel_oldtype = LIBC_CANCEL_ASYNC ();			     \
-	sc_ret = INLINE_SYSCALL_CALL (__VA_ARGS__);			     \
-        LIBC_CANCEL_RESET (sc_cancel_oldtype);				     \
-      }									     \
-    sc_ret;								     \
+
+/* Cancellation macros.  */
+#ifndef __SSC
+typedef long int __syscall_arg_t;
+# define __SSC(__x) ((__syscall_arg_t) (__x))
+#endif
+
+long int __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
+			   __syscall_arg_t arg2, __syscall_arg_t arg3,
+			   __syscall_arg_t arg4, __syscall_arg_t arg5,
+			   __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel);
+
+#define __SYSCALL_CANCEL0(name) \
+  (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL1(name, a1) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL2(name, a1, a2) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0)
+#define __SYSCALL_CANCEL3(name, a1, a2, a3) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0)
+#define __SYSCALL_CANCEL4(name, a1, a2, a3, a4) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+		     __SSC(a4), 0, 0)
+#define __SYSCALL_CANCEL5(name, a1, a2, a3, a4, a5) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+		     __SSC(a4), __SSC(a5), 0)
+#define __SYSCALL_CANCEL6(name, a1, a2, a3, a4, a5, a6) \
+  (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+		     __SSC(a4), __SSC(a5), __SSC(a6))
+
+#define __SYSCALL_CANCEL_NARGS_X(a,b,c,d,e,f,g,h,n,...) n
+#define __SYSCALL_CANCEL_NARGS(...) \
+  __SYSCALL_CANCEL_NARGS_X (__VA_ARGS__,7,6,5,4,3,2,1,0,)
+#define __SYSCALL_CANCEL_CONCAT_X(a,b)     a##b
+#define __SYSCALL_CANCEL_CONCAT(a,b)       __SYSCALL_CANCEL_CONCAT_X (a, b)
+#define __SYSCALL_CANCEL_DISP(b,...) \
+  __SYSCALL_CANCEL_CONCAT (b,__SYSCALL_CANCEL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __SYSCALL_CANCEL_CALL(...) \
+  __SYSCALL_CANCEL_DISP (__SYSCALL_CANCEL, __VA_ARGS__)
+
+#define SYSCALL_CANCEL_NCS(name, nr, args...) \
+  __SYSCALL_CANCEL_CALL (name, nr, args)
+
+
+/* The loader does not need to handle thread cancellation, use direct
+   syscall instead.  */
+#if IS_IN (rtld)
+# define SYSCALL_CANCEL(...) INLINE_SYSCALL_CALL (__VA_ARGS__)
+#else
+# define SYSCALL_CANCEL(...) \
+  ({									\
+    long int sc_ret = __SYSCALL_CANCEL_CALL (__VA_ARGS__);		\
+    SYSCALL_CANCEL_RET ((sc_ret));					\
   })
+#endif
+
+#endif /* __ASSEMBLER__  */
 
 /* Machine-dependent sysdep.h files are expected to define the macro
    PSEUDO (function_name, syscall_name) to emit assembly code to define the
diff --git a/sysdeps/unix/sysv/linux/clock_nanosleep.c b/sysdeps/unix/sysv/linux/clock_nanosleep.c
index 93bc4cf..b9a835a 100644
--- a/sysdeps/unix/sysv/linux/clock_nanosleep.c
+++ b/sysdeps/unix/sysv/linux/clock_nanosleep.c
@@ -28,27 +28,13 @@ int
 __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
 		   struct timespec *rem)
 {
-  INTERNAL_SYSCALL_DECL (err);
-  int r;
-
   if (clock_id == CLOCK_THREAD_CPUTIME_ID)
     return EINVAL;
   if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
     clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
 
-  if (SINGLE_THREAD_P)
-    r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
-  else
-    {
-      int oldstate = LIBC_CANCEL_ASYNC ();
-
-      r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
-			    rem);
-
-      LIBC_CANCEL_RESET (oldstate);
-    }
-
-  return (INTERNAL_SYSCALL_ERROR_P (r, err)
-	  ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+  /* If the call is interrupted by a signal handler or encounters an error,
+     it returns a positive value similar to errno.  */
+  return -SYSCALL_CANCEL_NCS (clock_nanosleep, clock_id, flags, req, rem);
 }
 weak_alias (__clock_nanosleep, clock_nanosleep)
diff --git a/sysdeps/unix/sysv/linux/creat.c b/sysdeps/unix/sysv/linux/creat.c
index 31e0248..124d734 100644
--- a/sysdeps/unix/sysv/linux/creat.c
+++ b/sysdeps/unix/sysv/linux/creat.c
@@ -35,6 +35,4 @@ __creat (const char *file, mode_t mode)
 }
 weak_alias (__creat, creat)
 
-LIBC_CANCEL_HANDLED ();
-
 #endif
diff --git a/sysdeps/unix/sysv/linux/creat64.c b/sysdeps/unix/sysv/linux/creat64.c
index 709c660..2404fea 100644
--- a/sysdeps/unix/sysv/linux/creat64.c
+++ b/sysdeps/unix/sysv/linux/creat64.c
@@ -37,5 +37,3 @@ weak_alias (__creat64, creat64)
 strong_alias (__creat64, __creat)
 weak_alias (__creat64, creat)
 #endif
-
-LIBC_CANCEL_HANDLED ();
diff --git a/sysdeps/unix/sysv/linux/futex-internal.h b/sysdeps/unix/sysv/linux/futex-internal.h
index 1386807..a73fec8 100644
--- a/sysdeps/unix/sysv/linux/futex-internal.h
+++ b/sysdeps/unix/sysv/linux/futex-internal.h
@@ -83,10 +83,7 @@ static __always_inline int
 futex_wait_cancelable (unsigned int *futex_word, unsigned int expected,
 		       int private)
 {
-  int oldtype;
-  oldtype = __pthread_enable_asynccancel ();
-  int err = lll_futex_timed_wait (futex_word, expected, NULL, private);
-  __pthread_disable_asynccancel (oldtype);
+  int err = lll_futex_timed_wait_cancel (futex_word, expected, NULL, private);
   switch (err)
     {
     case 0:
@@ -137,10 +134,7 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
 				unsigned int expected,
 			        const struct timespec *reltime, int private)
 {
-  int oldtype;
-  oldtype = __pthread_enable_asynccancel ();
-  int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
-  __pthread_disable_asynccancel (oldtype);
+  int err = lll_futex_timed_wait_cancel (futex_word, expected, reltime, private);
   switch (err)
     {
     case 0:
@@ -200,11 +194,9 @@ futex_abstimed_wait_cancelable (unsigned int *futex_word,
      despite them being valid.  */
   if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))
     return ETIMEDOUT;
-  int oldtype;
-  oldtype = __pthread_enable_asynccancel ();
-  int err = lll_futex_timed_wait_bitset (futex_word, expected, abstime,
-					 FUTEX_CLOCK_REALTIME, private);
-  __pthread_disable_asynccancel (oldtype);
+  int err = lll_futex_timed_wait_bitset_cancel (futex_word, expected, abstime,
+						FUTEX_CLOCK_REALTIME,
+						private);
   switch (err)
     {
     case 0:
diff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
index bb4fbae..ba6bd99 100644
--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h
+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
@@ -89,6 +89,12 @@
      ? -INTERNAL_SYSCALL_ERRNO (__ret, __err) : 0);                     \
   })
 
+#define lll_futex_syscall_cp(...)					\
+  ({                                                                    \
+    long int __ret = __SYSCALL_CANCEL_CALL (__VA_ARGS__);		\
+    __ret;								\
+  })
+
 #define lll_futex_wait(futexp, val, private) \
   lll_futex_timed_wait (futexp, val, NULL, private)
 
@@ -140,6 +146,32 @@
 					 private),                      \
 		     nr_wake, nr_move, mutex, val)
 
-#endif  /* !__ASSEMBLER__  */
+/* Cancellable futex macros.  */
+#define lll_futex_wait_cancel(futexp, val, private) \
+  lll_futex_timed_wait_cancel (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait_cancel(futexp, val, timeout, private)	\
+  ({									\
+    long int __ret;							\
+    int __op = FUTEX_WAIT;						\
+    __ret = lll_futex_syscall_cp (futex, futexp,			\
+				  __lll_private_flag (__op, private),	\
+				  val, timeout);			\
+    __ret;								\
+  })
+
+#define lll_futex_timed_wait_bitset_cancel(futexp, val, timeout,	\
+                                           clockbit, private)		\
+  ({									\
+    long int __ret;							\
+    int __op = FUTEX_WAIT_BITSET | clockbit;				\
+    __ret = lll_futex_syscall_cp (futex, futexp,			\
+				  __lll_private_flag (__op, private), 	\
+				  val, timeout, 0,			\
+				  FUTEX_BITSET_MATCH_ANY);		\
+    __ret;								\
+  })
+
+# endif  /* !__ASSEMBLER__  */
 
 #endif  /* lowlevellock-futex.h */
diff --git a/sysdeps/unix/sysv/linux/pthread_kill.c b/sysdeps/unix/sysv/linux/pthread_kill.c
index cd6f16e..01377b0 100644
--- a/sysdeps/unix/sysv/linux/pthread_kill.c
+++ b/sysdeps/unix/sysv/linux/pthread_kill.c
@@ -42,9 +42,8 @@ __pthread_kill (pthread_t threadid, int signo)
     /* Not a valid thread handle.  */
     return ESRCH;
 
-  /* Disallow sending the signal we use for cancellation, timers,
-     for the setxid implementation.  */
-  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
+  /* Disallow sending the signal we use for setxid implementation.  */
+  if (signo == SIGSETXID)
     return EINVAL;
 
   /* We have a special syscall to do the work.  */
diff --git a/sysdeps/unix/sysv/linux/sigwait.c b/sysdeps/unix/sysv/linux/sigwait.c
index 443c3ad..86d6a54 100644
--- a/sysdeps/unix/sysv/linux/sigwait.c
+++ b/sysdeps/unix/sysv/linux/sigwait.c
@@ -37,6 +37,3 @@ __sigwait (const sigset_t *set, int *sig)
 libc_hidden_def (__sigwait)
 weak_alias (__sigwait, sigwait)
 strong_alias (__sigwait, __libc_sigwait)
-
-/* __sigtimedwait handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
diff --git a/sysdeps/unix/sysv/linux/sigwaitinfo.c b/sysdeps/unix/sysv/linux/sigwaitinfo.c
index 75ddd52..738c0d4 100644
--- a/sysdeps/unix/sysv/linux/sigwaitinfo.c
+++ b/sysdeps/unix/sysv/linux/sigwaitinfo.c
@@ -28,6 +28,3 @@ __sigwaitinfo (const sigset_t *set, siginfo_t *info)
 libc_hidden_def (__sigwaitinfo)
 weak_alias (__sigwaitinfo, sigwaitinfo)
 strong_alias (__sigwaitinfo, __libc_sigwaitinfo)
-
-/* __sigtimedwait handles cancellation.  */
-LIBC_CANCEL_HANDLED ();
diff --git a/sysdeps/unix/sysv/linux/socketcall.h b/sysdeps/unix/sysv/linux/socketcall.h
index 75f70d0..e5a3a7a 100644
--- a/sysdeps/unix/sysv/linux/socketcall.h
+++ b/sysdeps/unix/sysv/linux/socketcall.h
@@ -87,16 +87,39 @@
   })
 
 
-#if IS_IN (libc)
-# define __pthread_enable_asynccancel  __libc_enable_asynccancel
-# define __pthread_disable_asynccancel __libc_disable_asynccancel
-#endif
+#define __SOCKETCALL_CANCEL1(__name, __a1) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [1]) { (long int) __a1 }))
+#define __SOCKETCALL_CANCEL2(__name, __a1, __a2) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [2]) { (long int) __a1, (long int) __a2 }))
+#define __SOCKETCALL_CANCEL3(__name, __a1, __a2, __a3) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [3]) { (long int) __a1, (long int) __a2, (long int) __a3 }))
+#define __SOCKETCALL_CANCEL4(__name, __a1, __a2, __a3, __a4) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [4]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+                       (long int) __a4 }))
+#define __SOCKETCALL_CANCEL5(__name, __a1, __a2, __a3, __a4, __a5) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [5]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+                       (long int) __a4, (long int) __a5 }))
+#define __SOCKETCALL_CANCEL6(__name, __a1, __a2, __a3, __a4, __a5, __a6) \
+  SYSCALL_CANCEL_NCS (socketcall, __name, \
+     ((long int [6]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+                       (long int) __a4, (long int) __a5, (long int) __a6 }))
+
+#define __SOCKETCALL_CANCEL(...) __SOCKETCALL_DISP (__SOCKETCALL_CANCEL,\
+						    __VA_ARGS__)
 
 #define SOCKETCALL_CANCEL(name, args...)				\
   ({									\
-    int oldtype = LIBC_CANCEL_ASYNC ();					\
-    long int sc_ret = __SOCKETCALL (SOCKOP_##name, args);		\
-    LIBC_CANCEL_RESET (oldtype);					\
+    long int sc_ret = __SOCKETCALL_CANCEL (SOCKOP_##name, args);	\
+    if (sc_ret > -4096UL)						\
+      {									\
+        __set_errno (-sc_ret);						\
+        sc_ret = -1L;							\
+      }									\
     sc_ret;								\
   })
 
diff --git a/sysdeps/unix/sysv/linux/syscall_cancel.c b/sysdeps/unix/sysv/linux/syscall_cancel.c
new file mode 100644
index 0000000..ac08bb7
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/syscall_cancel.c
@@ -0,0 +1,62 @@
+/* Default cancellation syscall bridge.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthreadP.h>
+
+#define ADD_LABEL(__label)		\
+  asm volatile (			\
+    ".global " __label "\t\n"		\
+    ".type " __label ",\%function\t\n" 	\
+    __label ":\n");
+
+/* This is the generic version of the cancellable syscall code which
+   adds the label guards (__syscall_cancel_arch_{start,end}) used
+   on SIGCANCEL handler (sigcancel_handler at nptl-init.c) to check if
+   the cancelled syscall have side-effects that need to be signaled to
+   program.
+
+   An important contrainst should be observed when using this generic
+   implementation: the __syscall_cancel_arch_end should point to the
+   immediate next instruction after the syscall one.  This is because
+   kernel will signal interrupted syscall with side effects by setting
+   the signal frame Program Counter right after the syscall instruction.
+
+   If the INTERNAL_SYSCALL_NCS macro use more instructions to get the
+   error condition from kernel (as for powerpc and sparc), uses an
+   out of the line helper (as for ARM thumb), or uses a kernel helper
+   gate (as for i686 or ia64) the architecture should either adjust the
+   macro or provide a custom __syscall_cancel_arch implementation.   */
+long int
+__syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
+		       __syscall_arg_t a1, __syscall_arg_t a2,
+		       __syscall_arg_t a3, __syscall_arg_t a4,
+		       __syscall_arg_t a5, __syscall_arg_t a6)
+{
+  ADD_LABEL ("__syscall_cancel_arch_start");
+  if (__glibc_unlikely (*ch & CANCELED_BITMASK))
+    __syscall_do_cancel();
+
+  INTERNAL_SYSCALL_DECL(err);
+  long int result = INTERNAL_SYSCALL_NCS (nr, err, 6, a1, a2, a3, a4, a5, a6);
+  ADD_LABEL ("__syscall_cancel_arch_end");
+  if (INTERNAL_SYSCALL_ERROR_P (result, err))
+    return -INTERNAL_SYSCALL_ERRNO (result, err);
+  return result;
+}
+libc_hidden_def (__syscall_cancel_arch)
diff --git a/sysdeps/unix/sysv/linux/sysdep.h b/sysdeps/unix/sysv/linux/sysdep.h
index 1c24766..22d70c0 100644
--- a/sysdeps/unix/sysv/linux/sysdep.h
+++ b/sysdeps/unix/sysv/linux/sysdep.h
@@ -27,6 +27,23 @@
     -1l;					\
   })
 
+/* Check error from cancellable syscall and set errno accordingly.
+   Linux uses a negative return value to indicate syscall errors
+   and since version 2.1 the return value of a system call might be
+   negative even if the call succeeded (e.g., the `lseek' system call
+   might return a large offset).
+   Current contract is kernel make sure the no syscall returns a value
+   in -1 .. -4095 as a valid result so we can savely test with -4095.  */
+#define SYSCALL_CANCEL_RET(__ret)		\
+  ({						\
+    if (__ret > -4096UL)			\
+      {						\
+	__set_errno (-__ret);			\
+	__ret = -1;				\
+      }						\
+    __ret;					\
+   })
+
 /* Provide a dummy argument that can be used to force register
    alignment for register pairs if required by the syscall ABI.  */
 #ifdef __ASSUME_ALIGNED_REGISTER_PAIRS

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=95da05fe1c4a6eb9e5119eae019142c7405bb175

commit 95da05fe1c4a6eb9e5119eae019142c7405bb175
Author: Adhemerval Zanella <adhemerval.zanella@linaro.com>
Date:   Mon Sep 21 15:55:58 2015 -0700

    nptl: Fix testcases for new pthread cancellation mechanism
    
    With upcoming fix for BZ#12683, pthread cancellation does not act for:
    
      1. If syscall is blocked but with some side effects already having
         taken place (e.g. a partial read or write).
      2. After the syscall has returned.
    
    The main change is due the fact programs need to act in syscalls with
    side-effects (for instance, to avoid leak of allocated resources or
    handle partial read/write).
    
    This patch changes the NPTL testcase that assumes the old behavior and
    also remove the tst-cancel-wrappers.sh test (which checks for symbols
    that will not exist anymore).  For tst-cancel{2,3} case it remove the
    pipe close because it might cause the write syscall to return with
    side effects if the close is executed before the pthread cancel.
    
    It also changes how to call the read syscall on tst-backtrace{5,6}
    to use syscall instead of read cancelable syscall to avoid need to
    handle the cancelable bridge function calls.  It requires a change
    on powerpc syscall implementation to create a stackframe, since
    powerpc backtrace rely on such information.
    
    Checked on i686-linux-gnu, x86_64-linux-gnu, x86_64-linux-gnux32,
    aarch64-linux-gnu, arm-linux-gnueabihf, powerpc64le-linux-gnu,
    powerpc-linux-gnu, sparcv9-linux-gnu, and sparc64-linux-gnu.
    
    	* debug/tst-backtrace5.c (handle_signal): Check for syscall
    	instead of read.
    	(fn): Issue the read syscall instead of call the cancellable
    	syscall.
    	* nptl/Makefile [$(run-built-tests) = yes] (tests-special): Remove
    	tst-cancel-wrappers.sh.
    	* nptl/tst-cancel-wrappers.sh: Remove file.
    	* nptl/tst-cancel2.c (do_test): Do not close pipe.
    	* nptl/tst-cancel3.c (do_test): Likewise.
    	* nptl/tst-cancel4.c (tf_write): Handle cancelled syscall with
    	side-effects.
    	(tf_send): Likewise.
    	* sysdeps/unix/sysv/linux/powerpc/syscall.S (syscall): Create stack
    	frame.
    
    Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>

diff --git a/ChangeLog b/ChangeLog
index 910bf67..c664b1f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+2017-12-07  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+	* debug/tst-backtrace5.c (handle_signal): Check for syscall
+	instead of read.
+	(fn): Issue the read syscall instead of call the cancellable
+	syscall.
+	* nptl/Makefile [$(run-built-tests) = yes] (tests-special): Remove
+	tst-cancel-wrappers.sh.
+	* nptl/tst-cancel-wrappers.sh: Remove file.
+	* nptl/tst-cancel2.c (do_test): Do not close pipe.
+	* nptl/tst-cancel3.c (do_test): Likewise.
+	* nptl/tst-cancel4.c (tf_write): Handle cancelled syscall with
+	side-effects.
+	(tf_send): Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/syscall.S (syscall): Create stack
+	frame.
+
 2017-12-07  H.J. Lu  <hongjiu.lu@intel.com>
 
 	* sysdeps/x86_64/fpu/multiarch/Makefile (libm-sysdep_routines):
diff --git a/debug/tst-backtrace5.c b/debug/tst-backtrace5.c
index 0b85e44..0ef2627 100644
--- a/debug/tst-backtrace5.c
+++ b/debug/tst-backtrace5.c
@@ -23,6 +23,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/types.h>
+#include <sys/syscall.h>
 #include <signal.h>
 #include <unistd.h>
 
@@ -33,7 +34,7 @@
 #endif
 
 /* The backtrace should include at least handle_signal, a signal
-   trampoline, read, 3 * fn, and do_test.  */
+   trampoline, syscall, 3 * fn, and do_test.  */
 #define NUM_FUNCTIONS 7
 
 void
@@ -71,15 +72,17 @@ handle_signal (int signum)
     }
   /* Do not check name for signal trampoline.  */
   i = 2;
-  if (!match (symbols[i++], "read"))
+
+  if (match (symbols[i], "__kernel_vsyscall"))
+    i++;
+
+  /* We are using syscall(...) instead of read.  */
+  if (!match (symbols[i++], "syscall"))
     {
-      /* Perhaps symbols[2] is __kernel_vsyscall?  */
-      if (!match (symbols[i++], "read"))
-	{
-	  FAIL ();
-	  return;
-	}
+      FAIL ();
+      return;
     }
+
   for (; i < n - 1; i++)
     if (!match (symbols[i], "fn"))
       {
@@ -123,8 +126,11 @@ fn (int c, int flags)
       _exit (0);
     }
 
-  /* In the parent.  */
-  read (pipefd[0], r, 1);
+  /* To avoid need to handle cancellation syscall backtrace (which call
+     the function using both the generic bridge (__syscall_cancel) and
+     the architecture defined one (__syscall_cancel_arch), issue the
+     syscall directly.  */
+  syscall (__NR_read, pipefd[0], r, 1);
 
   return 0;
 }
diff --git a/nptl/Makefile b/nptl/Makefile
index 11e6ecd..36bf25f 100644
--- a/nptl/Makefile
+++ b/nptl/Makefile
@@ -445,8 +445,7 @@ tests-reverse += tst-cancel5 tst-cancel23 tst-vfork1x tst-vfork2x
 ifeq ($(run-built-tests),yes)
 tests-special += $(objpfx)tst-stack3-mem.out $(objpfx)tst-oddstacklimit.out
 ifeq ($(build-shared),yes)
-tests-special += $(objpfx)tst-tls6.out $(objpfx)tst-cleanup0-cmp.out \
-		 $(objpfx)tst-cancel-wrappers.out
+tests-special += $(objpfx)tst-tls6.out $(objpfx)tst-cleanup0-cmp.out
 endif
 endif
 
@@ -670,7 +669,7 @@ $(objpfx)$(multidir)/crtn.o: $(objpfx)crtn.o $(objpfx)$(multidir)/
 endif
 
 generated += libpthread_nonshared.a \
-	     multidir.mk tst-atfork2.mtrace tst-cancel-wrappers.out \
+	     multidir.mk tst-atfork2.mtrace \
 	     tst-tls6.out
 
 generated += $(objpfx)tst-atfork2.mtrace \
@@ -682,18 +681,6 @@ LDFLAGS-pthread.so += -e __nptl_main
 $(objpfx)pt-interp.os: $(common-objpfx)runtime-linker.h
 endif
 
-ifeq ($(run-built-tests),yes)
-ifeq (yes,$(build-shared))
-$(objpfx)tst-cancel-wrappers.out: tst-cancel-wrappers.sh
-	$(SHELL) $< '$(NM)' \
-		    $(common-objpfx)libc_pic.a \
-		    $(common-objpfx)libc.a \
-		    $(objpfx)libpthread_pic.a \
-		    $(objpfx)libpthread.a > $@; \
-	$(evaluate-test)
-endif
-endif
-
 tst-exec4-ARGS = $(host-test-program-cmd)
 
 $(objpfx)tst-execstack: $(libdl)
diff --git a/nptl/tst-cancel-wrappers.sh b/nptl/tst-cancel-wrappers.sh
deleted file mode 100644
index 1795e3d..0000000
--- a/nptl/tst-cancel-wrappers.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/sh
-# Test whether all cancelable functions are cancelable.
-# Copyright (C) 2002-2017 Free Software Foundation, Inc.
-# This file is part of the GNU C Library.
-# Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
-
-# The GNU C Library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-
-# The GNU C Library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with the GNU C Library; if not, see
-# <http://www.gnu.org/licenses/>.
-
-NM="$1"; shift
-while [ $# -gt 0 ]; do
-  ( $NM -P $1; echo 'end[end]:' ) | gawk ' BEGIN {
-C["accept"]=1
-C["close"]=1
-C["connect"]=1
-C["creat"]=1
-C["fcntl"]=1
-C["fdatasync"]=1
-C["fsync"]=1
-C["msgrcv"]=1
-C["msgsnd"]=1
-C["msync"]=1
-C["nanosleep"]=1
-C["open"]=1
-C["open64"]=1
-C["pause"]=1
-C["poll"]=1
-C["pread"]=1
-C["pread64"]=1
-C["pselect"]=1
-C["pwrite"]=1
-C["pwrite64"]=1
-C["read"]=1
-C["readv"]=1
-C["recv"]=1
-C["recvfrom"]=1
-C["recvmsg"]=1
-C["select"]=1
-C["send"]=1
-C["sendmsg"]=1
-C["sendto"]=1
-C["sigpause"]=1
-C["sigsuspend"]=1
-C["sigwait"]=1
-C["sigwaitinfo"]=1
-C["tcdrain"]=1
-C["wait"]=1
-C["waitid"]=1
-C["waitpid"]=1
-C["write"]=1
-C["writev"]=1
-C["__xpg_sigpause"]=1
-}
-/:$/ {
-  if (seen)
-    {
-      if (!seen_enable || !seen_disable)
-	{
-	  printf "in '$1'(%s) %s'\''s cancellation missing\n", object, seen
-	  ret = 1
-	}
-    }
-  seen=""
-  seen_enable=""
-  seen_disable=""
-  object=gensub(/^.*\[(.*)\]:$/, "\\1", 1, $0)
-  next
-}
-{
-  if (C[$1] && $2 ~ /^[TW]$/)
-    seen=$1
-  else if ($1 ~ /^([.]|)__(libc|pthread)_enable_asynccancel$/ && $2 == "U")
-    seen_enable=1
-  else if ($1 ~ /^([.]|)__(libc|pthread)_disable_asynccancel$/ && $2 == "U")
-    seen_disable=1
-}
-END {
-  exit ret
-}' || exit
-  shift
-done
diff --git a/nptl/tst-cancel2.c b/nptl/tst-cancel2.c
index bf7946c..25a6143 100644
--- a/nptl/tst-cancel2.c
+++ b/nptl/tst-cancel2.c
@@ -73,9 +73,6 @@ do_test (void)
       return 1;
     }
 
-  /* This will cause the write in the child to return.  */
-  close (fd[0]);
-
   if (pthread_join (th, &r) != 0)
     {
       puts ("join failed");
diff --git a/nptl/tst-cancel3.c b/nptl/tst-cancel3.c
index 228c478..c462a7e 100644
--- a/nptl/tst-cancel3.c
+++ b/nptl/tst-cancel3.c
@@ -75,9 +75,6 @@ do_test (void)
       return 1;
     }
 
-  /* This will cause the read in the child to return.  */
-  close (fd[0]);
-
   if (pthread_join (th, &r) != 0)
     {
       puts ("join failed");
diff --git a/nptl/tst-cancel4.c b/nptl/tst-cancel4.c
index 7a560a1..4f21131 100644
--- a/nptl/tst-cancel4.c
+++ b/nptl/tst-cancel4.c
@@ -166,6 +166,10 @@ tf_write  (void *arg)
   char buf[WRITE_BUFFER_SIZE];
   memset (buf, '\0', sizeof (buf));
   s = write (fd, buf, sizeof (buf));
+  /* The write can return a value higher than 0 (meaning partial write)
+     due to the SIGCANCEL, but the thread may still be pending
+     cancellation.  */
+  pthread_testcancel ();
 
   pthread_cleanup_pop (0);
 
@@ -742,6 +746,10 @@ tf_send (void *arg)
   char mem[700000];
 
   send (tempfd2, mem, arg == NULL ? sizeof (mem) : 1, 0);
+  /* Thez send can return a value higher than 0 (meaning partial send)
+     due to the SIGCANCEL, but the thread may still be pending
+     cancellation.  */
+  pthread_testcancel ();
 
   pthread_cleanup_pop (0);
 
diff --git a/sysdeps/unix/sysv/linux/powerpc/syscall.S b/sysdeps/unix/sysv/linux/powerpc/syscall.S
index 0522ccd..9fa64d9 100644
--- a/sysdeps/unix/sysv/linux/powerpc/syscall.S
+++ b/sysdeps/unix/sysv/linux/powerpc/syscall.S
@@ -19,6 +19,14 @@
 
 ENTRY (syscall)
 	ABORT_TRANSACTION
+	/* Creates a minimum stack frame so backtrace.  */
+#ifdef __powerpc64__
+	stdu r1, -FRAME_MIN_SIZE (r1)
+	cfi_adjust_cfa_offset (FRAME_MIN_SIZE)
+#else
+	stwu r1,-16(1)
+	cfi_def_cfa_offset (16)
+#endif
 	mr   r0,r3
 	mr   r3,r4
 	mr   r4,r5
@@ -26,6 +34,12 @@ ENTRY (syscall)
 	mr   r6,r7
 	mr   r7,r8
 	mr   r8,r9
+#ifdef __powerpc64__
+	addi r1, r1, FRAME_MIN_SIZE
+#else
+	addi r1,r1,16
+#endif
+        cfi_def_cfa_offset (0)
 	sc
 	PSEUDO_RET
 PSEUDO_END (syscall)

-----------------------------------------------------------------------


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]