This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] nptl: Consolidate pthread_{timed,try}join{_np}


This patch consolidates the pthread_join and gnu extensions to avoid
code duplication.  The function pthread_join, pthread_tryjoin_np, and
pthread_timedjoin_np are now based on pthread_timedjoin_ex.

It also fixes some inconsistencies on ESRCH, EINVAL, EDEADLK handling
(where each implementation differs from each other) and also on
clenup handler (which now always use a CAS).

Checked on i686-linux-gnu and x86_64-linux-gnu.

	* nptl/pthreadP.h (__pthread_timedjoin_np): Define.
	* nptl/pthread_join.c (pthread_join): Use __pthread_timedjoin_np.
	* nptl/pthread_tryjoin.c (pthread_tryjoin): Likewise.
	* nptl/pthread_timedjoin.c (cleanup): Use CAS on argument setting.
	(pthread_timedjoin_np): Define internal symbol and common code from
	pthread_join.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (__lll_timedwait_tid):
	Remove superflous checks.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (__lll_timedwait_tid):
	Likewise.
---
 ChangeLog                                     | 13 ++++
 nptl/pthreadP.h                               |  3 +
 nptl/pthread_join.c                           | 94 +--------------------------
 nptl/pthread_timedjoin.c                      | 80 +++++++++++++++--------
 nptl/pthread_tryjoin.c                        | 43 ++----------
 sysdeps/unix/sysv/linux/i386/lowlevellock.h   |  7 +-
 sysdeps/unix/sysv/linux/x86_64/lowlevellock.h |  7 +-
 7 files changed, 75 insertions(+), 172 deletions(-)

diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 713000e..2646b8f 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -506,6 +506,8 @@ extern int __pthread_setcanceltype (int type, int *oldtype);
 extern int __pthread_enable_asynccancel (void) attribute_hidden;
 extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden;
 extern void __pthread_testcancel (void);
+extern int __pthread_timedjoin_ex (pthread_t, void **, const struct timespec *,
+				   bool);
 
 #if IS_IN (libpthread)
 hidden_proto (__pthread_mutex_init)
@@ -524,6 +526,7 @@ hidden_proto (__pthread_setcancelstate)
 hidden_proto (__pthread_testcancel)
 hidden_proto (__pthread_mutexattr_init)
 hidden_proto (__pthread_mutexattr_settype)
+hidden_proto (__pthread_timedjoin_ex)
 #endif
 
 extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond);
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index afc8c37..47379a6 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -16,103 +16,11 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
-#include <errno.h>
-#include <stdlib.h>
-
-#include <atomic.h>
 #include "pthreadP.h"
 
-#include <stap-probe.h>
-
-
-static void
-cleanup (void *arg)
-{
-  /* If we already changed the waiter ID, reset it.  The call cannot
-     fail for any reason but the thread not having done that yet so
-     there is no reason for a loop.  */
-  (void) atomic_compare_and_exchange_bool_acq ((struct pthread **) arg, NULL,
-					       THREAD_SELF);
-}
-
-
 int
 __pthread_join (pthread_t threadid, void **thread_return)
 {
-  struct pthread *pd = (struct pthread *) threadid;
-
-  /* Make sure the descriptor is valid.  */
-  if (INVALID_NOT_TERMINATED_TD_P (pd))
-    /* Not a valid thread handle.  */
-    return ESRCH;
-
-  /* Is the thread joinable?.  */
-  if (IS_DETACHED (pd))
-    /* We cannot wait for the thread.  */
-    return EINVAL;
-
-  struct pthread *self = THREAD_SELF;
-  int result = 0;
-
-  LIBC_PROBE (pthread_join, 1, threadid);
-
-  /* During the wait we change to asynchronous cancellation.  If we
-     are canceled the thread we are waiting for must be marked as
-     un-wait-ed for again.  */
-  pthread_cleanup_push (cleanup, &pd->joinid);
-
-  /* Switch to asynchronous cancellation.  */
-  int oldtype = CANCEL_ASYNC ();
-
-  if ((pd == self
-       || (self->joinid == pd
-	   && (pd->cancelhandling
-	       & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
-		  | TERMINATED_BITMASK)) == 0))
-      && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
-    /* This is a deadlock situation.  The threads are waiting for each
-       other to finish.  Note that this is a "may" error.  To be 100%
-       sure we catch this error we would have to lock the data
-       structures but it is not necessary.  In the unlikely case that
-       two threads are really caught in this situation they will
-       deadlock.  It is the programmer's problem to figure this
-       out.  */
-    result = EDEADLK;
-  /* Wait for the thread to finish.  If it is already locked something
-     is wrong.  There can only be one waiter.  */
-  else if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
-								   self,
-								   NULL), 0))
-    /* There is already somebody waiting for the thread.  */
-    result = EINVAL;
-  else
-    /* Wait for the child.  */
-    lll_wait_tid (pd->tid);
-
-
-  /* Restore cancellation mode.  */
-  CANCEL_RESET (oldtype);
-
-  /* Remove the handler.  */
-  pthread_cleanup_pop (0);
-
-
-  if (__glibc_likely (result == 0))
-    {
-      /* We mark the thread as terminated and as joined.  */
-      pd->tid = -1;
-
-      /* Store the return value if the caller is interested.  */
-      if (thread_return != NULL)
-	*thread_return = pd->result;
-
-
-      /* Free the TCB.  */
-      __free_tcb (pd);
-    }
-
-  LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
-
-  return result;
+  return __pthread_timedjoin_ex (threadid, thread_return, NULL, true);
 }
 weak_alias (__pthread_join, pthread_join)
diff --git a/nptl/pthread_timedjoin.c b/nptl/pthread_timedjoin.c
index 567c171..6b894f2 100644
--- a/nptl/pthread_timedjoin.c
+++ b/nptl/pthread_timedjoin.c
@@ -21,21 +21,25 @@
 #include <atomic.h>
 #include "pthreadP.h"
 
+#include <stap-probe.h>
+
 
 static void
 cleanup (void *arg)
 {
-  *(void **) arg = NULL;
+  /* If we already changed the waiter ID, reset it.  The call cannot
+     fail for any reason but the thread not having done that yet so
+     there is no reason for a loop.  */
+  struct pthread *self = THREAD_SELF;
+  atomic_compare_exchange_weak_acquire (&arg, &self, NULL);
 }
 
 
 int
-pthread_timedjoin_np (pthread_t threadid, void **thread_return,
-		      const struct timespec *abstime)
+__pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
+			const struct timespec *abstime, bool block)
 {
-  struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
-  int result;
 
   /* Make sure the descriptor is valid.  */
   if (INVALID_NOT_TERMINATED_TD_P (pd))
@@ -47,8 +51,17 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
     /* We cannot wait for the thread.  */
     return EINVAL;
 
-  self = THREAD_SELF;
-  if (pd == self || self->joinid == pd)
+  struct pthread *self = THREAD_SELF;
+  int result = 0;
+
+  LIBC_PROBE (pthread_join, 1, threadid);
+
+  if ((pd == self
+       || (self->joinid == pd
+	   && (pd->cancelhandling
+	       & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+		  | TERMINATED_BITMASK)) == 0))
+      && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
     /* This is a deadlock situation.  The threads are waiting for each
        other to finish.  Note that this is a "may" error.  To be 100%
        sure we catch this error we would have to lock the data
@@ -60,45 +73,56 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
 
   /* Wait for the thread to finish.  If it is already locked something
      is wrong.  There can only be one waiter.  */
-  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
-							      self, NULL), 0))
+  else if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid,
+								   &self,
+								   NULL)))
     /* There is already somebody waiting for the thread.  */
     return EINVAL;
 
+  if (block)
+    {
+      /* During the wait we change to asynchronous cancellation.  If we
+	 are cancelled the thread we are waiting for must be marked as
+	 un-wait-ed for again.  */
+      pthread_cleanup_push (cleanup, &pd->joinid);
 
-  /* During the wait we change to asynchronous cancellation.  If we
-     are cancelled the thread we are waiting for must be marked as
-     un-wait-ed for again.  */
-  pthread_cleanup_push (cleanup, &pd->joinid);
-
-  /* Switch to asynchronous cancellation.  */
-  int oldtype = CANCEL_ASYNC ();
-
-
-  /* Wait for the child.  */
-  result = lll_timedwait_tid (pd->tid, abstime);
-
+      int oldtype = CANCEL_ASYNC ();
 
-  /* Restore cancellation mode.  */
-  CANCEL_RESET (oldtype);
+      if (abstime != NULL)
+	result = lll_timedwait_tid (pd->tid, abstime);
+      else
+	lll_wait_tid (pd->tid);
 
-  /* Remove the handler.  */
-  pthread_cleanup_pop (0);
+      CANCEL_RESET (oldtype);
 
+      pthread_cleanup_pop (0);
+    }
 
-  /* We might have timed out.  */
-  if (result == 0)
+  if (__glibc_likely (result == 0))
     {
+      /* We mark the thread as terminated and as joined.  */
+      pd->tid = -1;
+
       /* Store the return value if the caller is interested.  */
       if (thread_return != NULL)
 	*thread_return = pd->result;
 
-
       /* Free the TCB.  */
       __free_tcb (pd);
     }
   else
     pd->joinid = NULL;
 
+  LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
+
   return result;
 }
+hidden_def (__pthread_timedjoin_ex)
+
+int
+__pthread_timedjoin_np (pthread_t threadid, void **thread_return,
+			const struct timespec *abstime)
+{
+  return __pthread_timedjoin_ex (threadid, thread_return, abstime, true);
+}
+weak_alias (__pthread_timedjoin_np, pthread_timedjoin_np)
diff --git a/nptl/pthread_tryjoin.c b/nptl/pthread_tryjoin.c
index 6a3b62e..e7c7595 100644
--- a/nptl/pthread_tryjoin.c
+++ b/nptl/pthread_tryjoin.c
@@ -26,47 +26,12 @@
 int
 pthread_tryjoin_np (pthread_t threadid, void **thread_return)
 {
-  struct pthread *self;
-  struct pthread *pd = (struct pthread *) threadid;
-
-  /* Make sure the descriptor is valid.  */
-  if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
-    /* Not a valid thread handle.  */
-    return ESRCH;
-
-  /* Is the thread joinable?.  */
-  if (IS_DETACHED (pd))
-    /* We cannot wait for the thread.  */
-    return EINVAL;
-
-  self = THREAD_SELF;
-  if (pd == self || self->joinid == pd)
-    /* This is a deadlock situation.  The threads are waiting for each
-       other to finish.  Note that this is a "may" error.  To be 100%
-       sure we catch this error we would have to lock the data
-       structures but it is not necessary.  In the unlikely case that
-       two threads are really caught in this situation they will
-       deadlock.  It is the programmer's problem to figure this
-       out.  */
-    return EDEADLK;
-
   /* Return right away if the thread hasn't terminated yet.  */
+  struct pthread *pd = (struct pthread *) threadid;
   if (pd->tid != 0)
     return EBUSY;
 
-  /* Wait for the thread to finish.  If it is already locked something
-     is wrong.  There can only be one waiter.  */
-  if (atomic_compare_and_exchange_bool_acq (&pd->joinid, self, NULL))
-    /* There is already somebody waiting for the thread.  */
-    return EINVAL;
-
-  /* Store the return value if the caller is interested.  */
-  if (thread_return != NULL)
-    *thread_return = pd->result;
-
-
-  /* Free the TCB.  */
-  __free_tcb (pd);
-
-  return 0;
+  /* If pd->tid != 0 then lll_wait_tid will not block on futex
+     operation.  */
+  return __pthread_timedjoin_ex (threadid, thread_return, NULL, false);
 }
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 197bb1f..098c64b 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -237,12 +237,7 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
   ({									      \
     int __result = 0;							      \
     if ((tid) != 0)							      \
-      {									      \
-	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
-	  __result = EINVAL;						      \
-	else								      \
-	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
-      }									      \
+      __result = __lll_timedwait_tid (&(tid), (abstime));		      \
     __result; })
 
 
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index cbf6597..0b56a2e 100644
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -249,12 +249,7 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
   ({									      \
     int __result = 0;							      \
     if ((tid) != 0)							      \
-      {									      \
-	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
-	  __result = EINVAL;						      \
-	else								      \
-	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
-      }									      \
+      __result = __lll_timedwait_tid (&(tid), (abstime));		      \
     __result; })
 
 extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
-- 
2.7.4


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]