This is the mail archive of the
libc-alpha@sources.redhat.com
mailing list for the glibc project.
[RFC/PATCH] RT-NPTL-2.2 3/5
- From: "Hu, Boris" <boris dot hu at intel dot com>
- To: <libc-alpha at sources dot redhat dot com>
- Cc: "Robustmutexes" <robustmutexes at lists dot osdl dot org>
- Date: Thu, 29 Apr 2004 17:52:46 +0800
- Subject: [RFC/PATCH] RT-NPTL-2.2 3/5
i386/lowlevellock.h | 148 ++++++++++++++++++++++++-
i386/pthread_cond_broadcast.c | 87 ++++++++++++++
i386/pthread_cond_signal.c | 69 +++++++++++
i386/pthread_cond_timedwait.c | 198 +++++++++++++++++++++++++++++++++
i386/pthread_cond_wait.c | 167 ++++++++++++++++++++++++++++
internaltypes.h | 19 +++
lowlevelrtlock.c | 247
++++++++++++++++++++++++++++++++++++++++++
pthread_mutex_cond_lock.c | 16 ++
8 files changed, 947 insertions(+), 4 deletions(-)
---
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/internaltypes.h:1.
1.1.1.2.3 Fri Mar 26 02:41:40 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/internaltypes.h
Sat Apr 17 11:54:04 2004
@@ -48,7 +48,17 @@
#define ATTR_FLAG_SCHED_SET 0x0020
#define ATTR_FLAG_POLICY_SET 0x0040
+#define PRIOCEILING_MASK 0x01fc0000
+#define PRIOCEILING_OFFSET 18
+#define MIN_USER_RT_PRIO 1
+#define MAX_USER_RT_PRIO 100
+/* Bit set if fast-path mode. */
+#define FULOCK_FASTPATH_MODE 0x04000000
+/* Bit set if serial mode. */
+#define FULOCK_SERIAL_MODE 0x02000000
+
+#define NON_MUTEX_KIND_MASK 0xfffc0000
/* Mutex attribute data structure. */
struct pthread_mutexattr
{
@@ -56,7 +66,14 @@
Bit 31 is set if the mutex is to be shared between processes.
- Bit 0 to 30 contain one of the PTHREAD_MUTEX_ values to identify
+ Flags for realtime mutex extension.
+ Bit 30 and 29 for mutex protocol attributes.
+ Bit 28 and 27 for mutex robustness attributes.
+ Bit 26 for fulock fast-path and KCO mode switching flag.
+ Bit 25 for fulock serialization switching flag.
+ Bit 18-24 to record priority ceiling value.
+
+ Bit 0 to 17 contain one of the PTHREAD_MUTEX_ values to identify
the type of the mutex. */
int mutexkind;
};
--- /dev/null Thu Apr 29 09:10:11 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/lowlevelrtlock.c
Thu Apr 8 09:22:03 2004
@@ -0,0 +1,247 @@
+/*
+ * (C) 2003 Intel Corporation
+ * Boris Hu <boris.hu@intel.com>
+ *
+ * Distributed under the FSF's LGPL license, v2 or later. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <sys/syscall.h>
+#include <pthread.h>
+#include <pthreadP.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <atomic.h>
+
+#include <linux/fulock.h>
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutex_robust (const pthread_mutex_t *mutex)
+{
+ return (mutex->__data.__kind & ((FULOCK_FL_RM | FULOCK_FL_RM_SUN) >>
1));
+}
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutexattr_robust (const struct pthread_mutexattr *attr)
+{
+ return (attr->mutexkind & ((FULOCK_FL_RM | FULOCK_FL_RM_SUN) >> 1));
+}
+
+
+inline int
+__attribute__ ((always_inline))
+is_mutex_healthy (const pthread_mutex_t *mutex)
+{
+ int state;
+ pthread_mutex_getconsistency_np (mutex, &state);
+ return PTHREAD_MUTEX_ROBUST_CONSISTENT_NP == state;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_trylock (volatile unsigned *vfulock, unsigned tid)
+{
+ unsigned old_value;
+ int result;
+ unsigned flags = __LK_FL (vfulock);
+ INTERNAL_SYSCALL_DECL (err);
+
+ if (flags & FULOCK_FL_KCO)
+ goto kco_mode;
+ restart:
+ result = EBUSY;
+ old_value = atomic_compare_and_exchange_val_acq (vfulock, tid,
VFULOCK_UNLOCKED);
+ if (old_value == VFULOCK_UNLOCKED) /* If it was unlocked, fulock
acquired */
+ result = 0;
+ else if (old_value == VFULOCK_NR)
+ result = ENOTRECOVERABLE;
+ else if ((old_value == VFULOCK_WP) || (flags & FULOCK_FL_RM))
+ {
+ kco_mode:
+ result = INTERNAL_SYSCALL (ufulock_lock, err, 3, vfulock, flags,
0);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err))
+ result = INTERNAL_SYSCALL_ERRNO (result, err);
+
+ switch (result)
+ {
+ case 0:
+ case EBUSY:
+ case ETIMEDOUT:
+ case EOWNERDEAD:
+ case ENOTRECOVERABLE:
+ return result;
+ default:
+ goto restart;
+ }
+ }
+ return result; /* Taken (waiters in kernel)! */
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_timedlock (volatile unsigned *vfulock, unsigned flags,
+ unsigned pid, const struct timespec *rel)
+{
+ int result;
+ struct timeval tv;
+ struct timespec rt, *p = NULL;
+ INTERNAL_SYSCALL_DECL (err);
+ pthread_mutex_t *mutex = vfulock;
+
+ if ((void *)-1 != rel && NULL != rel) {
+ if (rel->tv_nsec < 0 || rel->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ (void) __gettimeofday (&tv, NULL);
+
+ rt.tv_sec = rel->tv_sec - tv.tv_sec;
+ rt.tv_nsec = rel->tv_nsec - tv.tv_usec * 1000;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+ if (rt.tv_sec < 0)
+ return ETIMEDOUT;
+ p = &rt;
+ } else if ((void *)-1 == rel)
+ p = (void *)-1;
+
+restart:
+ if (!(flags & FULOCK_FL_KCO)
+ && ! atomic_compare_and_exchange_bool_acq (vfulock, pid,
VFULOCK_UNLOCKED))
+ return 0;
+
+ int cnt = 0;
+ int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
+ mutex->__data.__spins * 2 + 10);
+ //FIXME: should we respin it again? for pthread_mutex_lock has done
it.
+ //if (cnt++ < max_cnt)
+ // goto restart;
+
+ result = INTERNAL_SYSCALL (ufulock_lock, err, 3, vfulock, flags, p);
+
+ result = INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err) : result;
+
+ mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
+
+ switch (result)
+ {
+ case 0:
+ case EBUSY:
+ case ETIMEDOUT:
+ case EOWNERDEAD:
+ case ENOTRECOVERABLE:
+ return result;
+ default:
+ goto restart;
+ }
+ return 0; /* Lock acquired. */
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_lock (volatile unsigned *vfulock, unsigned flags,
+ unsigned tid)
+{
+ return __lll_rtmutex_timedlock (vfulock, flags, tid, (void *) -1);
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_unlock (volatile unsigned *vfulock, unsigned flags,
+ unsigned tid)
+{
+ int result = EPERM;
+ unsigned old_value;
+ INTERNAL_SYSCALL_DECL (err);
+ unsigned rtflags = __RT_FL (vfulock);
+
+ rtflags |= flags & FULOCK_FL_RM ? FULOCK_SERIAL_MODE : 0;
+
+ if (flags & FULOCK_FL_KCO)
+ goto kco_mode;
+ while (1)
+ {
+ old_value = *vfulock;
+ if (old_value == VFULOCK_NR)
+ {
+ result = ENOTRECOVERABLE;
+ break;
+ }
+ else if (old_value >= VFULOCK_WP)
+ {
+ kco_mode:
+ result = INTERNAL_SYSCALL (ufulock_unlock, err, 3, vfulock,
flags,
+ rtflags & FULOCK_SERIAL_MODE ? 0 : 1);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err)) {
+ result = INTERNAL_SYSCALL_ERRNO (result, err);
+ break;
+ }
+ break;
+ }
+ else if (! atomic_compare_and_exchange_bool_acq (vfulock,
VFULOCK_UNLOCKED,
+ old_value)) {
+ result = 0;
+ break;
+ }
+ }
+ return result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_unlock_nocheck (volatile unsigned *vfulock)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_unlock, err, 3, vfulock, flags,
0);
+ return INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err)
+ : result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_set_consistency (volatile unsigned *vfulock,
+ enum fulock_ctl consistency)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_ctl, err, 3, vfulock,
+ flags, consistency);
+ return INTERNAL_SYSCALL_ERROR_P (result, err)
+ ? INTERNAL_SYSCALL_ERRNO (result, err)
+ : result;
+}
+
+
+inline int
+__attribute__ ((always_inline))
+__lll_rtmutex_get_consistency (volatile unsigned *vfulock,
+ int *state)
+{
+ unsigned flags = __LK_FL (vfulock);
+ int result;
+ INTERNAL_SYSCALL_DECL (err);
+
+ result = INTERNAL_SYSCALL (ufulock_ctl, err, 3, vfulock, flags, 0);
+ if (! INTERNAL_SYSCALL_ERROR_P (result, err))
+ *state = result;
+ return 0;
+}
---
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond
_lock.c:1.1.1.1.2.1 Fri Mar 26 02:41:40 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond
_lock.c Tue Mar 30 09:20:29 2004
@@ -1,7 +1,19 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock(mutex)
+#ifdef USE_FUSYN_ROBUST_MUTEX
+# define LLL_MUTEX_LOCK(mutex, tid) \
+ do { \
+ result = lll_rtmutex_lock (mutex,tid); \
+ if (__builtin_expect (0 != result, 0)) \
+ goto out_err; \
+ } while (0)
+# define LLL_MUTEX_TRYLOCK(mutex, tid) \
+ lll_rtmutex_trylock(mutex, tid)
+#else
+# define LLL_MUTEX_LOCK(mutex, tid) lll_mutex_cond_lock(mutex)
+# define LLL_MUTEX_TRYLOCK(mutex, tid) lll_mutex_cond_trylock(mutex)
+#endif
+
#define __pthread_mutex_lock __pthread_mutex_cond_lock
#define NO_INCR
---
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.
h:1.1.1.1.2.2 Fri Mar 26 02:41:42 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.
h Tue Apr 13 11:37:06 2004
@@ -2,6 +2,9 @@
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+ Redirect lowlevellock to use Fast User SYNchronization(fusyn).
+ Boris Hu <boris.hu@intel.com>, 2003
+
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -35,7 +38,7 @@
#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
-
+#define FUTEX_REQUEUE 3
/* Initializer for compatibility lock. */
#define LLL_MUTEX_LOCK_INITIALIZER (0)
@@ -93,6 +96,23 @@
"i" (offsetof (tcbhead_t, sysinfo)));
\
} while (0)
+#define lll_futex_timed_wait(futex, val, timespec) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAIT, val,
\
+ timespec, 0);
\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+#define lll_futex_requeue(futex, nr_wake, nr_move, mutex) \
+ ({ int ret;
\
+ INTERNAL_SYSCALL_DECL (err);
\
+ ret = INTERNAL_SYSCALL (futex, err, 5, futex, FUTEX_WAKE, INT_MAX,
0, 0);\
+ INTERNAL_SYSCALL_ERROR_P (ret, err) ? INTERNAL_SYSCALL_ERRNO (ret,
err) \
+ : ret; })
+
+//#warning FIXME: requeue feature will be added when the fusyn is
ready.
+
/* Does not preserve %eax and %ecx. */
extern int __lll_mutex_lock_wait (int val, int *__futex)
@@ -370,5 +390,131 @@
#define lll_cond_broadcast(cond) \
__lll_cond_broadcast (cond)
+#include <linux/fulock.h>
+
+#ifdef USE_FUSYN_ROBUST_MUTEX
+//#define SEA_DEBUG 1 // To Enable the debug info
+#ifdef SEA_DEBUG
+/* Indicate location */
+# define SEA_L
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: \n", id, __FILE__,
\
+ __FUNCTION__, __LINE__);
\
+ } while (0)
+
+/* Location-aware printf */
+# define SEA_P(fmt, args...)
\
+ do {
\
+ unsigned id = THREAD_GETMEM (THREAD_SELF, tid);
\
+ printf("[%d] %s:%s() %d line: " fmt "\n",
\
+ id, __FILE__,__FUNCTION__,__LINE__,args);
\
+ } while (0)
+
+#else
+# define SEA_L
+# define SEA_P(fmt, args...)
+#endif
+
+/* Add lll_rtmutex_* to support fusyn */
+#define FUSYN_FL_RT_MASK (0x78000000 | FULOCK_FASTPATH_MODE |
\
+ FULOCK_SERIAL_MODE | PRIOCEILING_MASK)
+#define FUSYN_FL_RT2K_MASK (0x78000000 | FULOCK_FASTPATH_MODE)
+/* Get fusyn flags. */
+#define __LK_FL(fulock)
\
+ ({ unsigned k_flags = 0;
\
+ unsigned flags = ((pthread_mutex_t *)(fulock))->__data.__kind
\
+ & FUSYN_FL_RT2K_MASK;
\
+ k_flags = ((flags << 1) & FULOCK_FL_USER_MK);
\
+ k_flags;})
+/* Get rtnptl flags. */
+#define __RT_FL(fulock)
\
+ ({ unsigned flags = ((pthread_mutex_t *)(fulock))->__data.__kind
\
+ & FUSYN_FL_RT_MASK;
\
+ flags;})
+
+
+extern int is_mutex_robust(const pthread_mutex_t *mutex);
+
+extern int __lll_rtmutex_trylock (volatile unsigned *vfulock, unsigned
tid);
+#define lll_rtmutex_trylock(futex, tid) __lll_rtmutex_trylock(&(futex),
tid)
+
+
+extern int __lll_rtmutex_timedlock (volatile unsigned *vfulock,
unsigned flags,
+ unsigned tid, const struct timespec
*rel);
+#define lll_rtmutex_timedlock(futex, tid, timeout) \
+ __lll_rtmutex_timedlock(&(futex), __LK_FL(&(futex)), tid,
timeout)
+
+
+extern int __lll_rtmutex_lock (volatile unsigned *vfulock, unsigned
flags,
+ unsigned tid);
+#define lll_rtmutex_lock(futex, tid) \
+ __lll_rtmutex_lock(&(futex), __LK_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_unlock (volatile unsigned *vfulock, unsigned
flags,
+ unsigned tid);
+#define lll_rtmutex_unlock(futex, tid) \
+ __lll_rtmutex_unlock(&(futex), __LK_FL(&(futex)), tid)
+
+
+extern int __lll_rtmutex_unlock_nocheck (volatile unsigned *vfulock);
+#define lll_rtmutex_unlock_nocheck(futex) \
+ __lll_rtmutex_unlock_nocheck(&(futex))
+
+
+extern int __lll_rtmutex_set_consistency (volatile unsigned *vfulock,
+ enum fulock_ctl consistency)
;
+#define lll_rtmutex_set_consistency(futex, state) \
+ __lll_rtmutex_set_consistency(&(futex), state)
+
+
+extern int __lll_rtmutex_get_consistency (volatile unsigned *vfulock,
+ int *state);
+#define lll_rtmutex_get_consistency(futex, state) \
+ __lll_rtmutex_get_consistency(&(futex), state)
+
+
+#define CONDVAR_RM_FLAGS FULOCK_FL_RM
+
+#define lll_cmutex_lock(fulock, id) \
+ do {
\
+ while (__lll_rtmutex_lock(&(fulock), CONDVAR_RM_FLAGS, id))
\
+ lll_rtmutex_set_consistency(fulock,
\
+ PTHREAD_MUTEX_ROBUST_CONSISTENT_NP);
\
+ } while (0)
+
+#define lll_cmutex_unlock(fulock, id) \
+ __lll_rtmutex_unlock(&(fulock), CONDVAR_RM_FLAGS, id)
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_cmutex_lock (mutex, tid)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_cmutex_unlock (mutex, tid)
+
+#else /* Normal NPTL */
+
+#define FUSYN_FL_RT_MASK 0
+
+#define LLL_CMUTEX_LOCK(mutex, tid) \
+ lll_mutex_lock(mutex)
+
+#define LLL_CMUTEX_UNLOCK(mutex, tid) \
+ lll_mutex_unlock (mutex)
+
+#define lll_rtmutex_get_consistency(futex, state) 0
+
+#define lll_rtmutex_set_consistency(futex, state) 0
+
+#define lll_rtmutex_unlock_nocheck(futex) 0
+
+#define __LK_FL(fulock) 0
+#define __RT_FL(fulock) 0
+
+
+#endif /* USE_FUSYN_ROBUST_MUTEX */
+
+
#endif /* lowlevellock.h */
--- /dev/null Thu Apr 29 09:10:11 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_
broadcast.c Tue Mar 30 09:20:29 2004
@@ -0,0 +1,87 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+#include <kernel-features.h>
+
+
+int
+__pthread_cond_broadcast (cond)
+ pthread_cond_t *cond;
+{
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are alone. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Are there any waiters to be woken? */
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
+ {
+ /* Yes. Mark them all as woken. */
+ cond->__data.__wakeup_seq = cond->__data.__total_seq;
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The futex syscall operates on a 32-bit word. That is fine,
+ we just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ /* Do not use requeue for pshared condvars. */
+ if (cond->__data.__mutex == (void *) ~0l)
+ goto wake_all;
+
+ /* Wake everybody. */
+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+ if (__builtin_expect (lll_futex_requeue (futex, 1, INT_MAX,
+ &mut->__data.__lock) ==
-EINVAL,
+ 0))
+ {
+ /* The requeue functionality is not available. */
+ wake_all:
+ lll_futex_wake (futex, INT_MAX);
+ }
+
+ /* That's all. */
+ return 0;
+ }
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return 0;
+}
+
+versioned_symbol (libpthread, __pthread_cond_broadcast,
pthread_cond_broadcast,
+ GLIBC_2_3_2);
--- /dev/null Thu Apr 29 09:10:11 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_
signal.c Tue Mar 30 09:20:30 2004
@@ -0,0 +1,69 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+#include <kernel-features.h>
+
+
+int
+__pthread_cond_signal (cond)
+ pthread_cond_t *cond;
+{
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are alone. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Are there any waiters to be woken? */
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
+ {
+ /* Yes. Mark one of them as woken. */
+ ++cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine,
+ we just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ /* Wake one. */
+ lll_futex_wake (futex, 1);
+ }
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return 0;
+}
+
+versioned_symbol (libpthread, __pthread_cond_signal,
pthread_cond_signal,
+ GLIBC_2_3_2);
--- /dev/null Thu Apr 29 09:10:11 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_
timedwait.c Tue Mar 30 09:20:30 2004
@@ -0,0 +1,198 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+
+
+/* Cleanup handler, defined in pthread_cond_wait.c. */
+extern void __condvar_cleanup (void *arg)
+ __attribute__ ((visibility ("hidden")));
+
+struct _condvar_cleanup_buffer
+{
+ int oldtype;
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+};
+
+int
+__pthread_cond_timedwait (cond, mutex, abstime)
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+ const struct timespec *abstime;
+{
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer;
+ int result = 0;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Catch invalid parameters. */
+ if (abstime->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ /* Make sure we are along. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Now we can release the mutex. */
+ int err = __pthread_mutex_unlock_usercnt (mutex, 0);
+ if (err)
+ {
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return err;
+ }
+
+ /* We have one new user of the condvar. */
+ ++cond->__data.__total_seq;
+
+ /* Remember the mutex we are using here. If there is already a
+ different address store this is a bad user bug. Do not store
+ anything for pshared condvars. */
+ if (cond->__data.__mutex != (void *) ~0l)
+ cond->__data.__mutex = mutex;
+
+ /* Prepare structure passed to cancellation handler. */
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+
+ /* Before we block we enable cancellation. Therefore we have to
+ install a cancellation handler. */
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
+
+ /* The current values of the wakeup counter. The "woken" counter
+ must exceed this value. */
+ unsigned long long int val;
+ unsigned long long int seq;
+ val = seq = cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine, we
+ just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ while (1)
+ {
+ struct timespec rt;
+ {
+#ifdef __NR_clock_gettime
+ INTERNAL_SYSCALL_DECL (err);
+ int val;
+ val = INTERNAL_SYSCALL (clock_gettime, err, 2,
+ cond->__data.__clock, &rt);
+# ifndef __ASSUME_POSIX_TIMERS
+ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (val, err), 0))
+ {
+ struct timeval tv;
+ (void) gettimeofday (&tv, NULL);
+
+ /* Convert the absolute timeout value to a relative timeout.
*/
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ }
+ else
+# endif
+ {
+ /* Convert the absolute timeout value to a relative timeout.
*/
+ rt.tv_sec = abstime->tv_sec - rt.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec;
+ }
+#else
+ /* Get the current time. So far we support only one clock. */
+ struct timeval tv;
+ (void) gettimeofday (&tv, NULL);
+
+ /* Convert the absolute timeout value to a relative timeout. */
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+#endif
+ }
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+ /* Did we already time out? */
+ if (__builtin_expect (rt.tv_sec < 0, 0))
+ goto timeout;
+
+ /* Prepare to wait. Release the condvar futex. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* Enable asynchronous cancellation. Required by the standard.
*/
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
+
+ /* Wait until woken by signal or broadcast. Note that we
+ truncate the 'val' value to 32 bits. */
+ err = lll_futex_timed_wait (futex, (unsigned int) val, &rt);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (cbuffer.oldtype);
+
+ /* We are going to look at shared data again, so get the lock.
*/
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Check whether we are eligible for wakeup. */
+ val = cond->__data.__wakeup_seq;
+ if (val > seq && cond->__data.__woken_seq < val)
+ break;
+
+ /* Not woken yet. Maybe the time expired? */
+ if (__builtin_expect(err == -ETIMEDOUT, 0))
+ {
+ timeout:
+ /* Yep. Adjust the counters. */
+ ++cond->__data.__wakeup_seq;
+
+ /* The error value. */
+ result = ETIMEDOUT;
+ break;
+ }
+ }
+
+ /* Another thread woken up. */
+ ++cond->__data.__woken_seq;
+
+ /* We are done with the condvar. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The cancellation handling is back to normal, remove the handler.
*/
+ __pthread_cleanup_pop (&buffer, 0);
+
+ /* Get the mutex before returning. */
+ err = __pthread_mutex_cond_lock (mutex);
+
+ return err ?: result;
+}
+
+versioned_symbol (libpthread, __pthread_cond_timedwait,
pthread_cond_timedwait,
+ GLIBC_2_3_2);
--- /dev/null Thu Apr 29 09:10:11 2004
+++
robustmutexes/rtnptl/src/nptl/sysdeps/unix/sysv/linux/i386/pthread_cond_
wait.c Tue Mar 30 09:20:30 2004
@@ -0,0 +1,167 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ Hacked to add robust featuers to condvar by
+ Boris Hu <boris.hu@intel.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <endian.h>
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread.h>
+#include <pthreadP.h>
+
+#include <shlib-compat.h>
+
+
+struct _condvar_cleanup_buffer
+{
+ int oldtype;
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+};
+
+
+void
+__attribute__ ((visibility ("hidden")))
+__condvar_cleanup (void *arg)
+{
+ struct _condvar_cleanup_buffer *cbuffer =
+ (struct _condvar_cleanup_buffer *) arg;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* We are going to modify shared data. */
+ LLL_CMUTEX_LOCK (cbuffer->cond->__data.__lock, id);
+
+ /* This thread is not waiting anymore. Adjust the sequence counters
+ appropriately. */
+ ++cbuffer->cond->__data.__wakeup_seq;
+ ++cbuffer->cond->__data.__woken_seq;
+
+ /* We are done. */
+ LLL_CMUTEX_UNLOCK (cbuffer->cond->__data.__lock, id);
+
+ /* Wake everybody to make sure no condvar signal gets lost. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+ lll_futex_wake (futex, INT_MAX);
+
+ /* Get the mutex before returning unless asynchronous cancellation
+ is in effect. */
+ __pthread_mutex_cond_lock (cbuffer->mutex);
+}
+
+
+int
+__pthread_cond_wait (cond, mutex)
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+{
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer;
+ int err;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ /* Make sure we are along. */
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Now we can release the mutex. */
+ err = __pthread_mutex_unlock_usercnt (mutex, 0);
+ if (err)
+ {
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+ return err;
+ }
+
+ /* We have one new user of the condvar. */
+ ++cond->__data.__total_seq;
+
+ /* Remember the mutex we are using here. If there is already a
+ different address store this is a bad user bug. Do not store
+ anything for pshared condvars. */
+ if (cond->__data.__mutex != (void *) ~0l)
+ cond->__data.__mutex = mutex;
+
+ /* Prepare structure passed to cancellation handler. */
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+
+ /* Before we block we enable cancellation. Therefore we have to
+ install a cancellation handler. */
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
+
+ /* The current values of the wakeup counter. The "woken" counter
+ must exceed this value. */
+ unsigned long long int val;
+ unsigned long long int seq;
+ val = seq = cond->__data.__wakeup_seq;
+
+ /* The futex syscall operates on a 32-bit word. That is fine, we
+ just use the low 32 bits of the sequence counter. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+
+ do
+ {
+ /* Prepare to wait. Release the condvar futex. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* Enable asynchronous cancellation. Required by the standard.
*/
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
+
+ /* Wait until woken by signal or broadcast. Note that we
+ truncate the 'val' value to 32 bits. */
+ lll_futex_wait (futex, (unsigned int) val);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (cbuffer.oldtype);
+
+ /* We are going to look at shared data again, so get the lock.
*/
+ LLL_CMUTEX_LOCK (cond->__data.__lock, id);
+
+ /* Check whether we are eligible for wakeup. */
+ val = cond->__data.__wakeup_seq;
+ }
+ while (! (val > seq && cond->__data.__woken_seq < val));
+
+ /* Another thread woken up. */
+ ++cond->__data.__woken_seq;
+
+ /* We are done with the condvar. */
+ LLL_CMUTEX_UNLOCK (cond->__data.__lock, id);
+
+ /* The cancellation handling is back to normal, remove the handler.
*/
+ __pthread_cleanup_pop (&buffer, 0);
+
+ /* Get the mutex before returning. */
+ return __pthread_mutex_cond_lock (mutex);
+}
+
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+ GLIBC_2_3_2);
Boris Hu (Hu Jiangtao)
Software Engineer@ICSL
86-021-5257-4545#1277
iNET: 8-752-1277
************************************
There are my thoughts, not my employer's.
************************************
"gpg --recv-keys --keyserver wwwkeys.pgp.net 0FD7685F"
{0FD7685F:CFD6 6F5C A2CB 7881 725B CEA0 956F 9F14 0FD7 685F}