This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch aaribaud/y2038-temp created. glibc-2.26.9000-1304-g9bb849f


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, aaribaud/y2038-temp has been created
        at  9bb849f25802604148e67170674bbd0a7ad8f921 (commit)

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=9bb849f25802604148e67170674bbd0a7ad8f921

commit 9bb849f25802604148e67170674bbd0a7ad8f921
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Wed Sep 6 10:00:42 2017 +0200

    Y2038: add _TIME_BITS support
    
    This makes all previously defined Y2038-proof API types, functions and
    implementations the default when _TIME_BITS==64 and __WORDSIZE==32 (so
    that 64-bit architectures are unaffected).
    
    Note: it is assumed that the API is consistent, i.e. for each API type
    which is enabled here, all API functions which depend on this type are
    enabled and mapped to Y2038-proof implementations.

diff --git a/include/features.h b/include/features.h
index 0e96de0..4d83c0d 100644
--- a/include/features.h
+++ b/include/features.h
@@ -364,6 +364,20 @@
 # define __USE_FILE_OFFSET64	1
 #endif
 
+#if defined _TIME_BITS
+# if _TIME_BITS == 64
+#  if __WORDSIZE == 32
+#   define __USE_TIME_BITS64	1
+#  endif
+# elif __TIME_BITS == 32
+#  if __WORDSIZE > 32
+#   error __TIME_BITS=32 is not compatible with __WORDSIZE > 32
+#  endif
+# else
+#  error Invalid _TIME_BITS value (can only be 32 or 64)
+# endif
+#endif
+
 #if defined _DEFAULT_SOURCE
 # define __USE_MISC	1
 #endif
diff --git a/io/sys/stat.h b/io/sys/stat.h
index 90c403c..c28ba3e 100644
--- a/io/sys/stat.h
+++ b/io/sys/stat.h
@@ -210,14 +210,27 @@ extern int stat (const char *__restrict __file,
 extern int fstat (int __fd, struct stat *__buf) __THROW __nonnull ((2));
 #else
 # ifdef __REDIRECT_NTH
+#  ifdef __USE_TIME_BITS64
+extern int __REDIRECT_NTH (stat, (const char *__restrict __file,
+				  struct stat *__restrict __buf), __stat64_t64)
+     __nonnull ((1, 2));
+extern int __REDIRECT_NTH (fstat, (int __fd, struct stat *__buf), __fstat64_t64)
+     __nonnull ((2));
+#  else
 extern int __REDIRECT_NTH (stat, (const char *__restrict __file,
 				  struct stat *__restrict __buf), stat64)
      __nonnull ((1, 2));
 extern int __REDIRECT_NTH (fstat, (int __fd, struct stat *__buf), fstat64)
      __nonnull ((2));
+#  endif
 # else
-#  define stat stat64
-#  define fstat fstat64
+#  ifdef __USE_TIME_BITS64
+#   define stat stat64_t64
+#   define fstat fstat64_t64
+#  else
+#   define stat stat64
+#   define fstat fstat64
+#  endif
 # endif
 #endif
 #ifdef __USE_LARGEFILE64
@@ -260,12 +273,23 @@ extern int lstat (const char *__restrict __file,
 		  struct stat *__restrict __buf) __THROW __nonnull ((1, 2));
 # else
 #  ifdef __REDIRECT_NTH
+#   ifdef __USE_TIME_BITS64
+extern int __REDIRECT_NTH (lstat,
+			   (const char *__restrict __file,
+			    struct stat *__restrict __buf), __lstat64_t64)
+     __nonnull ((1, 2));
+#   else
 extern int __REDIRECT_NTH (lstat,
 			   (const char *__restrict __file,
 			    struct stat *__restrict __buf), lstat64)
      __nonnull ((1, 2));
+#   endif
 #  else
-#   define lstat lstat64
+#   ifdef __USE_TIME_BITS64
+#    define lstat __lstat64_t64
+#   else
+#    define lstat lstat64
+#   endif
 #  endif
 # endif
 # ifdef __USE_LARGEFILE64
@@ -357,6 +381,15 @@ extern int mkfifoat (int __fd, const char *__path, __mode_t __mode)
 #ifdef __USE_ATFILE
 /* Set file access and modification times relative to directory file
    descriptor.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (utimensat, (int __fd, const char *__path,
+           const struct timespec __times[2], int __flags),
+           __utimensat64) __THROW __nonnull((2));
+# else
+# define utimensat __utimensat64
+# endif
+#endif
 extern int utimensat (int __fd, const char *__path,
 		      const struct timespec __times[2],
 		      int __flags)
@@ -365,6 +398,14 @@ extern int utimensat (int __fd, const char *__path,
 
 #ifdef __USE_XOPEN2K8
 /* Set file access and modification times of the file associated with FD.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (futimens, (int __fd, const struct timespec __times[2]),
+           __futimens64) __THROW;
+# else
+# define futimens __futimens64
+# endif
+#endif
 extern int futimens (int __fd, const struct timespec __times[2]) __THROW;
 #endif
 
@@ -403,6 +444,21 @@ extern int __fxstatat (int __ver, int __fildes, const char *__filename,
      __THROW __nonnull ((3, 4));
 #else
 # ifdef __REDIRECT_NTH
+#  ifdef __USE_TIME_BITS64
+extern int __REDIRECT_NTH (__fxstat, (int __ver, int __fildes,
+				      struct stat *__stat_buf), __fxstat64_t64)
+     __nonnull ((3));
+extern int __REDIRECT_NTH (__xstat, (int __ver, const char *__filename,
+				     struct stat *__stat_buf), __xstat64_t64)
+     __nonnull ((2, 3));
+extern int __REDIRECT_NTH (__lxstat, (int __ver, const char *__filename,
+				      struct stat *__stat_buf), __lxstat64_t64)
+     __nonnull ((2, 3));
+extern int __REDIRECT_NTH (__fxstatat, (int __ver, int __fildes,
+					const char *__filename,
+					struct stat *__stat_buf, int __flag),
+			   __fxstatat64_t64) __nonnull ((3, 4));
+#  else
 extern int __REDIRECT_NTH (__fxstat, (int __ver, int __fildes,
 				      struct stat *__stat_buf), __fxstat64)
      __nonnull ((3));
@@ -416,11 +472,18 @@ extern int __REDIRECT_NTH (__fxstatat, (int __ver, int __fildes,
 					const char *__filename,
 					struct stat *__stat_buf, int __flag),
 			   __fxstatat64) __nonnull ((3, 4));
+#  endif
 
 # else
-#  define __fxstat __fxstat64
-#  define __xstat __xstat64
-#  define __lxstat __lxstat64
+#  ifdef __USE_TIME_BITS64
+#   define __fxstat __fxstat64_t64
+#   define __xstat __xstat64_t64
+#   define __lxstat __lxstat64_t64
+#  else
+#   define __fxstat __fxstat64
+#   define __xstat __xstat64
+#   define __lxstat __lxstat64
+#  endif
 # endif
 #endif
 
diff --git a/io/utime.h b/io/utime.h
index 8409ba4..4f07935 100644
--- a/io/utime.h
+++ b/io/utime.h
@@ -32,15 +32,24 @@ __BEGIN_DECLS
 # include <bits/types/time_t.h>
 #endif
 
-/* Structure describing file times.  */
+/* Structure describing file times, 32- or 64-bit time.  */
 struct utimbuf
   {
-    __time_t actime;		/* Access time.  */
-    __time_t modtime;		/* Modification time.  */
+    time_t actime;		/* Access time.  */
+    time_t modtime;		/* Modification time.  */
   };
 
 /* Set the access and modification times of FILE to those given in
    *FILE_TIMES.  If FILE_TIMES is NULL, set them to the current time.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (utime, (const char *__file,
+     const struct utimbuf *__file_times),
+     __utime_t64) __THROW __nonnull ((1));
+# else
+# define utime __utime_t64
+# endif
+#endif
 extern int utime (const char *__file,
 		  const struct utimbuf *__file_times)
      __THROW __nonnull ((1));
diff --git a/manual/creature.texi b/manual/creature.texi
index 96f8ee0..713a5fa 100644
--- a/manual/creature.texi
+++ b/manual/creature.texi
@@ -148,6 +148,34 @@ This macro was introduced as part of the Large File Support extension
 (LFS).
 @end defvr
 
+@defvr Macro _TIME_BITS
+This macro determines the bit size of @code{time_t} (and therefore the
+bit size of all @code{time_t} derived types and the prototypes of all
+related functions). If @code{_TIME_BITS} is undefined, the bit size of
+time_t equals the bit size of the architecture.
+
+If @code{_TIME_BITS} is undefined, or if @code{_TIME_BITS} is defined
+to the value @code{32} and @code{__WORDSIZE} is defined to the value
+@code{32}, or or if @code{_TIME_BITS} is defined to the value @code{64}
+and @code{__WORDSIZE} is defined to the value @code{64}, nothing changes.
+
+If @code{_TIME_BITS} is defined to the value @code{64} and if
+@code{__WORDSIZE} is defined to the value @code{32}, then the @w{64 bit}
+time API and implementation are used even though the architecture word
+size is @code{32}. Also, if the kernel provides @w{64 bit} time support,
+it is used; otherwise, the @w{32 bit} kernel time support is used (with
+no provision to address kernel Y2038 shortcomings).
+
+If @code{_TIME_BITS} is defined to the value @code{32} and if
+@code{__WORDSIZE} is defined to the value @code{64}, then a compile-time
+error is emitted.
+
+If @code{_TIME_BITS} is defined to a value different from both @code{32}
+and @code{64}, then a compile-time error is emitted.
+
+This macro was introduced as part of the Y2038 support.
+@end defvr
+
 @defvr Macro _ISOC99_SOURCE
 @standards{GNU, (none)}
 Until the revised @w{ISO C} standard is widely adopted the new features
diff --git a/misc/sys/select.h b/misc/sys/select.h
index 6dd0c83..aa31194 100644
--- a/misc/sys/select.h
+++ b/misc/sys/select.h
@@ -98,6 +98,18 @@ __BEGIN_DECLS
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (select, (int __nfds,
+                                fd_set *__restrict __readfds,
+                                fd_set *__restrict __writefds,
+                                fd_set *__restrict __exceptfds,
+                                struct timeval *__restrict __timeout),
+                       __select_t64);
+# else
+# define select __select_t64
+# endif
+#endif
 extern int select (int __nfds, fd_set *__restrict __readfds,
 		   fd_set *__restrict __writefds,
 		   fd_set *__restrict __exceptfds,
@@ -110,6 +122,19 @@ extern int select (int __nfds, fd_set *__restrict __readfds,
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (pselect, (int __nfds,
+                                 fd_set *__restrict __readfds,
+                                 fd_set *__restrict __writefds,
+                                 fd_set *__restrict __exceptfds,
+                                 const struct timespec *__restrict __timeout,
+                                 const __sigset_t *__restrict __sigmask),
+                       __pselect_t64);
+# else
+# define pselect __pselect_t64
+# endif
+#endif
 extern int pselect (int __nfds, fd_set *__restrict __readfds,
 		    fd_set *__restrict __writefds,
 		    fd_set *__restrict __exceptfds,
diff --git a/posix/sched.h b/posix/sched.h
index 619b3b3..7c46401 100644
--- a/posix/sched.h
+++ b/posix/sched.h
@@ -74,6 +74,15 @@ extern int sched_get_priority_max (int __algorithm) __THROW;
 extern int sched_get_priority_min (int __algorithm) __THROW;
 
 /* Get the SCHED_RR interval for the named process.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (sched_rr_get_interval,
+                       (__pid_t __pid, struct timespec *__t),
+                       __sched_rr_get_interval_t64) __THROW;
+# else
+# define sched_rr_get_interval __sched_rr_get_interval_t64
+# endif
+#endif
 extern int sched_rr_get_interval (__pid_t __pid, struct timespec *__t) __THROW;
 
 
diff --git a/resource/sys/resource.h b/resource/sys/resource.h
index 881db39..a5c8f35 100644
--- a/resource/sys/resource.h
+++ b/resource/sys/resource.h
@@ -84,6 +84,15 @@ extern int setrlimit64 (__rlimit_resource_t __resource,
 
 /* Return resource usage information on process indicated by WHO
    and put it in *USAGE.  Returns 0 for success, -1 for failure.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern double __REDIRECT (getrusage, (__rusage_who_t __who, 
+                                      struct rusage *__usage),
+     __getrusage_t64) __THROW;
+# else
+# define getrusage __getrusage_t64
+# endif
+#endif
 extern int getrusage (__rusage_who_t __who, struct rusage *__usage) __THROW;
 
 /* Return the highest priority of any process specified by WHICH and WHO
diff --git a/rt/mqueue.h b/rt/mqueue.h
index 5f354b4..cd9a8cc 100644
--- a/rt/mqueue.h
+++ b/rt/mqueue.h
@@ -73,6 +73,17 @@ extern int mq_send (mqd_t __mqdes, const char *__msg_ptr, size_t __msg_len,
 #ifdef __USE_XOPEN2K
 /* Receive the oldest from highest priority messages in message queue
    MQDES, stop waiting if ABS_TIMEOUT expires.  */
+# ifdef __USE_TIME_BITS64
+#  if defined(__REDIRECT)
+extern ssize_t __REDIRECT (mq_timedreceive, (mqd_t __mqdes,
+                           char *__restrict __msg_ptr, size_t __msg_len,
+                           unsigned int *__restrict __msg_prio,
+                           const struct timespec *__restrict __abs_timeout),
+     __mq_timedreceive_t64) __nonnull((2, 5));
+#  else
+#   define mq_timedreceive __mq_timedreceive_t64
+#  endif
+# endif
 extern ssize_t mq_timedreceive (mqd_t __mqdes, char *__restrict __msg_ptr,
 				size_t __msg_len,
 				unsigned int *__restrict __msg_prio,
@@ -81,6 +92,17 @@ extern ssize_t mq_timedreceive (mqd_t __mqdes, char *__restrict __msg_ptr,
 
 /* Add message pointed by MSG_PTR to message queue MQDES, stop blocking
    on full message queue if ABS_TIMEOUT expires.  */
+# ifdef __USE_TIME_BITS64
+#  if defined(__REDIRECT)
+extern int __REDIRECT (mq_timedsend, (mqd_t __mqdes,
+                       const char *__msg_ptr, size_t __msg_len,
+                       unsigned int __msg_prio,
+                       const struct timespec *__abs_timeout),
+     __mq_timedsend_t64) __nonnull((2, 5));
+#  else
+#   define mq_timedsend __mq_timedsend_t64
+#  endif
+# endif
 extern int mq_timedsend (mqd_t __mqdes, const char *__msg_ptr,
 			 size_t __msg_len, unsigned int __msg_prio,
 			 const struct timespec *__abs_timeout)
diff --git a/signal/signal.h b/signal/signal.h
index 87dc82a..3f8c6e1 100644
--- a/signal/signal.h
+++ b/signal/signal.h
@@ -266,6 +266,16 @@ extern int sigwaitinfo (const sigset_t *__restrict __set,
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (sigtimedwait, (const sigset_t *__restrict __set,
+			 siginfo_t *__restrict __info,
+			 const struct timespec *__restrict __timeout),
+     __sigtimedwait64) __nonnull ((1));
+# else
+# define sigtimedwait __sigtimedwait64
+# endif
+#endif
 extern int sigtimedwait (const sigset_t *__restrict __set,
 			 siginfo_t *__restrict __info,
 			 const struct timespec *__restrict __timeout)
diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h
index df049ab..6583b1f 100644
--- a/sysdeps/nptl/pthread.h
+++ b/sysdeps/nptl/pthread.h
@@ -765,6 +765,16 @@ extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
 
 #ifdef __USE_XOPEN2K
 /* Wait until lock becomes available, or specified time passes. */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (pthread_mutex_timedlock,
+                               (pthread_mutex_t *__restrict __mutex,
+                                const struct timespec *__restrict __abstime),
+       __pthread_mutex_timedlock_t64) __THROWNL __nonnull ((1, 2));
+# else
+# define pthread_mutex_timedlock __pthread_mutex_timedlock_t64
+# endif
+#endif
 extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
 				    const struct timespec *__restrict
 				    __abstime) __THROWNL __nonnull ((1, 2));
@@ -904,6 +914,16 @@ extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
 
 # ifdef __USE_XOPEN2K
 /* Try to acquire read lock for RWLOCK or return after specfied time.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (pthread_rwlock_timedrdlock,
+                               (pthread_rwlock_t *__restrict __rwlock,
+                                struct timespec *__restrict __abstime),
+       __pthread_rwlock_timedrdlock_t64) __THROWNL __nonnull ((1, 2));
+# else
+# define pthread_rwlock_timedrdlock __pthread_rwlock_timedrdlock_t64
+# endif
+#endif
 extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
 				       const struct timespec *__restrict
 				       __abstime) __THROWNL __nonnull ((1, 2));
@@ -919,6 +939,16 @@ extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
 
 # ifdef __USE_XOPEN2K
 /* Try to acquire write lock for RWLOCK or return after specfied time.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (pthread_rwlock_timedwrlock,
+                               (pthread_rwlock_t *__restrict __rwlock,
+                                struct timespec *__restrict __abstime),
+       __pthread_rwlock_timedwrlock_t64) __THROWNL __nonnull ((1, 2));
+# else
+# define pthread_rwlock_timedwrlock __pthread_rwlock_timedwrlock_t64
+# endif
+#endif
 extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
 				       const struct timespec *__restrict
 				       __abstime) __THROWNL __nonnull ((1, 2));
@@ -998,6 +1028,17 @@ extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (pthread_cond_timedwait,
+                               (pthread_cond_t *__restrict __cond,
+                                pthread_mutex_t *__restrict __mutex,
+                                const struct timespec *__restrict __abstime),
+       __pthread_cond_timedwait_t64) __nonnull ((1, 2, 3));
+# else
+# define pthread_cond_timedwait __pthread_cond_timedwait_t64
+# endif
+#endif
 extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
 				   pthread_mutex_t *__restrict __mutex,
 				   const struct timespec *__restrict __abstime)
diff --git a/sysdeps/pthread/semaphore.h b/sysdeps/pthread/semaphore.h
index ff672eb..0d2103a 100644
--- a/sysdeps/pthread/semaphore.h
+++ b/sysdeps/pthread/semaphore.h
@@ -57,6 +57,16 @@ extern int sem_wait (sem_t *__sem);
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (sem_timedwait,
+                               (sem_t *__restrict __sem,
+                                const struct timespec *__restrict __abstime),
+       __sem_timedwait_t64);
+# else
+# define sem_timedwait __sem_timedwait_t64
+# endif
+#endif
 extern int sem_timedwait (sem_t *__restrict __sem,
 			  const struct timespec *__restrict __abstime);
 #endif
diff --git a/sysdeps/unix/sysv/linux/bits/stat.h b/sysdeps/unix/sysv/linux/bits/stat.h
index 48ef82d..fde673e 100644
--- a/sysdeps/unix/sysv/linux/bits/stat.h
+++ b/sysdeps/unix/sysv/linux/bits/stat.h
@@ -119,11 +119,11 @@ struct stat64
     struct timespec st_mtim;		/* Time of last modification.  */
     struct timespec st_ctim;		/* Time of last status change.  */
 # else
-    __time_t st_atime;			/* Time of last access.  */
+    time_t st_atime;			/* Time of last access.  */
     unsigned long int st_atimensec;	/* Nscecs of last access.  */
-    __time_t st_mtime;			/* Time of last modification.  */
+    time_t st_mtime;			/* Time of last modification.  */
     unsigned long int st_mtimensec;	/* Nsecs of last modification.  */
-    __time_t st_ctime;			/* Time of last status change.  */
+    time_t st_ctime;			/* Time of last status change.  */
     unsigned long int st_ctimensec;	/* Nsecs of last status change.  */
 # endif
     __ino64_t st_ino;			/* File serial number.		*/
diff --git a/sysdeps/unix/sysv/linux/sys/timerfd.h b/sysdeps/unix/sysv/linux/sys/timerfd.h
index 4d75e15..291f1f6 100644
--- a/sysdeps/unix/sysv/linux/sys/timerfd.h
+++ b/sysdeps/unix/sysv/linux/sys/timerfd.h
@@ -43,11 +43,30 @@ extern int timerfd_create (__clockid_t __clock_id, int __flags) __THROW;
 /* Set next expiration time of interval timer source UFD to UTMR.  If
    FLAGS has the TFD_TIMER_ABSTIME flag set the timeout value is
    absolute.  Optionally return the old expiration time in OTMR.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (timerfd_settime, (int __ufd, int __flags,
+                       const struct itimerspec *__utmr,
+                       struct itimerspec *__otmr),__timerfd_settime64)
+                       __THROW;
+# else
+# define timerfd_settime __timerfd_settime64
+# endif
+#endif
 extern int timerfd_settime (int __ufd, int __flags,
 			    const struct itimerspec *__utmr,
 			    struct itimerspec *__otmr) __THROW;
 
 /* Return the next expiration time of UFD.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (timerfd_gettime, (int __ufd,
+                       struct itimerspec *__otmr),__timerfd_gettime64)
+                       __THROW;
+# else
+# define timerfd_gettime __timerfd_gettime64
+# endif
+#endif
 extern int timerfd_gettime (int __ufd, struct itimerspec *__otmr) __THROW;
 
 __END_DECLS
diff --git a/sysdeps/unix/sysv/linux/sys/timex.h b/sysdeps/unix/sysv/linux/sys/timex.h
index 0d652c8..4d9fe9b 100644
--- a/sysdeps/unix/sysv/linux/sys/timex.h
+++ b/sysdeps/unix/sysv/linux/sys/timex.h
@@ -55,15 +55,43 @@ struct ntptimeval
 __BEGIN_DECLS
 
 extern int __adjtimex (struct timex *__ntx) __THROW;
+
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (adjtimex, (struct timex *__ntx),
+     __adjtimex_t64) __THROW;
+# else
+# define adjtimex __adjtimex_t64
+# endif
+#endif
 extern int adjtimex (struct timex *__ntx) __THROW;
 
-#ifdef __REDIRECT_NTH
+#if __WORDSIZE > 32 || ! defined(__USE_TIME_BITS64)
+# ifdef __REDIRECT_NTH
 extern int __REDIRECT_NTH (ntp_gettime, (struct ntptimeval *__ntv),
 			   ntp_gettimex);
-#else
+# else
 extern int ntp_gettimex (struct ntptimeval *__ntv) __THROW;
+#  define ntp_gettime ntp_gettimex
+# endif
+#else
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (ntp_gettimex, (struct ntptimeval *__ntv),
+                          __ntp_gettimex_t64) __THROW;
+# else
+# define ntp_gettimex __ntp_gettimex_t64
+# endif
 # define ntp_gettime ntp_gettimex
 #endif
+
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (ntp_adjtime, (struct timex *__tntx),
+                       __ntp_adjtime_t64) __THROW;
+# else
+# define ntp_adjtime __ntp_adjtime_t64
+# endif
+#endif
 extern int ntp_adjtime (struct timex *__tntx) __THROW;
 
 __END_DECLS
diff --git a/sysvipc/sys/msg.h b/sysvipc/sys/msg.h
index 1635839..bfcba02 100644
--- a/sysvipc/sys/msg.h
+++ b/sysvipc/sys/msg.h
@@ -58,6 +58,15 @@ struct msgbuf
 __BEGIN_DECLS
 
 /* Message queue control operation.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (msgctl, (int __msqid, int __cmd,
+                       struct msqid_ds *__buf),
+                       __msgctl_t64) __THROW;
+# else
+# define msgctl __msgctl_t64
+# endif
+#endif
 extern int msgctl (int __msqid, int __cmd, struct msqid_ds *__buf) __THROW;
 
 /* Get messages queue.  */
diff --git a/time/bits/types/struct_timespec.h b/time/bits/types/struct_timespec.h
index 644db9f..1ba24ce 100644
--- a/time/bits/types/struct_timespec.h
+++ b/time/bits/types/struct_timespec.h
@@ -2,13 +2,30 @@
 #define __timespec_defined 1
 
 #include <bits/types.h>
+#include <endian.h>
 
 /* POSIX.1b structure for a time value.  This is like a `struct timeval' but
    has nanoseconds instead of microseconds.  */
+#if __WORDSIZE > 32 || ! defined(__USE_TIME_BITS64)
 struct timespec
 {
   __time_t tv_sec;		/* Seconds.  */
   __syscall_slong_t tv_nsec;	/* Nanoseconds.  */
 };
+# elif BYTE_ORDER == BIG_ENDIAN
+struct timespec
+{
+  __time64_t tv_sec;		/* Seconds.  */
+  int: 32;			/* Hidden padding */
+  __syscall_slong_t tv_nsec;	/* Nanoseconds.  */
+};
+# else
+struct timespec
+{
+  __time64_t tv_sec;		/* Seconds.  */
+  __syscall_slong_t tv_nsec;	/* Nanoseconds.  */
+  int: 32;			/* Hidden padding */
+};
+# endif
 
 #endif
diff --git a/time/bits/types/struct_timeval.h b/time/bits/types/struct_timeval.h
index 70394ce..85e0cb5 100644
--- a/time/bits/types/struct_timeval.h
+++ b/time/bits/types/struct_timeval.h
@@ -5,9 +5,18 @@
 
 /* A time value that is accurate to the nearest
    microsecond but also has a range of years.  */
+#ifdef __USE_TIME_BITS64
+struct timeval
+{
+  __time64_t tv_sec;		/* Seconds.  */
+  __uint64_t tv_usec;   	/* Microseconds.  */
+};
+#else
 struct timeval
 {
   __time_t tv_sec;		/* Seconds.  */
   __suseconds_t tv_usec;	/* Microseconds.  */
 };
 #endif
+
+#endif
diff --git a/time/bits/types/time_t.h b/time/bits/types/time_t.h
index ab8287c..84d67f6 100644
--- a/time/bits/types/time_t.h
+++ b/time/bits/types/time_t.h
@@ -4,6 +4,10 @@
 #include <bits/types.h>
 
 /* Returned by `time'.  */
+#ifdef __USE_TIME_BITS64
+typedef __time64_t time_t;
+#else
 typedef __time_t time_t;
+#endif
 
 #endif
diff --git a/time/sys/time.h b/time/sys/time.h
index 4166a5b..4db24b5 100644
--- a/time/sys/time.h
+++ b/time/sys/time.h
@@ -65,12 +65,31 @@ typedef void *__restrict __timezone_ptr_t;
    Returns 0 on success, -1 on errors.
    NOTE: This form of timezone information is obsolete.
    Use the functions and variables declared in <time.h> instead.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (gettimeofday, (struct timeval *__restrict __tv,
+                                   __timezone_ptr_t __tz),
+                    __gettimeofday_t64) __THROW __nonnull((1));
+# else
+# define gettimeofday __gettimeofday_t64
+# endif
+#endif
 extern int gettimeofday (struct timeval *__restrict __tv,
 			 __timezone_ptr_t __tz) __THROW __nonnull ((1));
 
 #ifdef __USE_MISC
 /* Set the current time of day and timezone information.
    This call is restricted to the super-user.  */
+#ifdef __USE_TIME_BITS64
+#  if defined(__REDIRECT)
+extern int __REDIRECT (settimeofday,
+                       (const struct timeval *__tv,
+                        const struct timezone *__tz),
+                    __settimeofday_t64) __THROW;
+#  else
+#   define settimeofday __settimeofday_t64
+#  endif
+# endif
 extern int settimeofday (const struct timeval *__tv,
 			 const struct timezone *__tz)
      __THROW;
@@ -79,6 +98,16 @@ extern int settimeofday (const struct timeval *__tv,
    If OLDDELTA is not NULL, it is filled in with the amount
    of time adjustment remaining to be done from the last `adjtime' call.
    This call is restricted to the super-user.  */
+#ifdef __USE_TIME_BITS64
+#  if defined(__REDIRECT)
+extern int __REDIRECT (adjtime,
+                       (const struct timeval *__delta,
+                        struct timeval *__olddelta),
+                    __adjtime_t64) __THROW;
+#  else
+#   define adjtime __adjtime_t64
+#  endif
+# endif
 extern int adjtime (const struct timeval *__delta,
 		    struct timeval *__olddelta) __THROW;
 #endif
@@ -119,12 +148,31 @@ typedef int __itimer_which_t;
 
 /* Set *VALUE to the current setting of timer WHICH.
    Return 0 on success, -1 on errors.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (getitimer, (__itimer_which_t __which,
+       struct itimerval *__value), __getitimer_t64)
+       __THROW;
+# else
+# define getitimer __getitimer_t64
+# endif
+#endif
 extern int getitimer (__itimer_which_t __which,
 		      struct itimerval *__value) __THROW;
 
 /* Set the timer WHICH to *NEW.  If OLD is not NULL,
    set *OLD to the old value of timer WHICH.
    Returns 0 on success, -1 on errors.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (setitimer, (__itimer_which_t __which,
+       const struct itimerval *__restrict __new,
+       struct itimerval *__restrict __old), __setitimer_t64)
+       __THROW;
+# else
+# define setitimer __setitimer_t64
+# endif
+#endif
 extern int setitimer (__itimer_which_t __which,
 		      const struct itimerval *__restrict __new,
 		      struct itimerval *__restrict __old) __THROW;
@@ -132,15 +180,41 @@ extern int setitimer (__itimer_which_t __which,
 /* Change the access time of FILE to TVP[0] and the modification time of
    FILE to TVP[1].  If TVP is a null pointer, use the current time instead.
    Returns 0 on success, -1 on errors.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (utimes, (const char *__file,
+       const struct timeval __tvp[2]), __utimes64)
+       __THROW __nonnull ((1));
+# else
+# define utimes __utimes64
+# endif
+#endif
 extern int utimes (const char *__file, const struct timeval __tvp[2])
      __THROW __nonnull ((1));
 
 #ifdef __USE_MISC
 /* Same as `utimes', but does not follow symbolic links.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (lutimes, (const char *__file,
+       const struct timeval __tvp[2]), __lutimes64)
+       __THROW __nonnull ((1));
+# else
+# define lutimes __lutimes64
+# endif
+#endif
 extern int lutimes (const char *__file, const struct timeval __tvp[2])
      __THROW __nonnull ((1));
 
 /* Same as `utimes', but takes an open file descriptor instead of a name.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (futimes, (int __fd, const struct timeval __tvp[2]),
+     __futimes64) __THROW;
+# else
+# define futimes __futimes64
+# endif
+#endif
 extern int futimes (int __fd, const struct timeval __tvp[2]) __THROW;
 #endif
 
diff --git a/time/time.h b/time/time.h
index 49d3043..94271bb 100644
--- a/time/time.h
+++ b/time/time.h
@@ -72,16 +72,38 @@ __BEGIN_DECLS
 extern clock_t clock (void) __THROW;
 
 /* Return the current time and put it in *TIMER if TIMER is not NULL.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (time, (time_t * __timer),
+     __time_t64) __THROW;
+# else
+# define time __time_t64
+# endif
+#endif
 extern time_t time (time_t *__timer) __THROW;
 
 /* Return the difference between TIME1 and TIME0.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern double __REDIRECT (difftime, (time_t __time1, time_t __time0),
+     __difftime64) __THROW;
+# else
+# define difftime __difftime64
+# endif
+#endif
 extern double difftime (time_t __time1, time_t __time0)
      __THROW __attribute__ ((__const__));
 
 /* Return the `time_t' representation of TP and normalize TP.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (mktime, (struct tm *__tp), __mktime64) __THROW;
+# else
+# define mktime __mktime64
+# endif
+#endif
 extern time_t mktime (struct tm *__tp) __THROW;
 
-
 /* Format TP into S according to FORMAT.
    Write no more than MAXSIZE characters and return the number
    of characters written, or 0 if it would exceed MAXSIZE.  */
@@ -116,20 +138,52 @@ extern char *strptime_l (const char *__restrict __s,
 
 /* Return the `struct tm' representation of *TIMER
    in Universal Coordinated Time (aka Greenwich Mean Time).  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (gmtime, (const time_t *__timer),
+       __gmtime64) __THROW;
+# else
+# define gmtime __gmtime64
+# endif
+#endif
 extern struct tm *gmtime (const time_t *__timer) __THROW;
 
 /* Return the `struct tm' representation
    of *TIMER in the local timezone.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (localtime, (const time_t *__timer),
+       __localtime64) __THROW;
+# else
+# define localtime __localtime64
+# endif
+#endif
 extern struct tm *localtime (const time_t *__timer) __THROW;
 
 #ifdef __USE_POSIX
 /* Return the `struct tm' representation of *TIMER in UTC,
    using *TP to store the result.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (gmtime_r, (const time_t *__restrict
+       __timer, struct tm *__restrict __tp), __gmtime64_r) __THROW;
+# else
+# define gmtime_r __gmtime64_r
+# endif
+#endif
 extern struct tm *gmtime_r (const time_t *__restrict __timer,
 			    struct tm *__restrict __tp) __THROW;
 
 /* Return the `struct tm' representation of *TIMER in local time,
    using *TP to store the result.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern struct tm * __REDIRECT (localtime_r, (const time_t *__restrict
+       __timer, struct tm *__restrict __tp), __localtime64_r) __THROW;
+# else
+# define localtime_r __localtime64_r
+# endif
+#endif
 extern struct tm *localtime_r (const time_t *__restrict __timer,
 			       struct tm *__restrict __tp) __THROW;
 #endif	/* POSIX */
@@ -139,6 +193,14 @@ extern struct tm *localtime_r (const time_t *__restrict __timer,
 extern char *asctime (const struct tm *__tp) __THROW;
 
 /* Equivalent to `asctime (localtime (timer))'.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern char * __REDIRECT (ctime, (const time_t *__timer),
+       __ctime64) __THROW;
+# else
+# define ctime __ctime64
+# endif
+#endif
 extern char *ctime (const time_t *__timer) __THROW;
 
 #ifdef __USE_POSIX
@@ -150,6 +212,14 @@ extern char *asctime_r (const struct tm *__restrict __tp,
 			char *__restrict __buf) __THROW;
 
 /* Equivalent to `asctime_r (localtime_r (timer, *TMP*), buf)'.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern char * __REDIRECT (ctime_r, (const time_t *__restrict __timer,
+       char *__restrict __buf), __ctime64_r) __THROW;
+# else
+# define ctime_r __ctime64_r
+# endif
+#endif
 extern char *ctime_r (const time_t *__restrict __timer,
 		      char *__restrict __buf) __THROW;
 #endif	/* POSIX */
@@ -178,6 +248,14 @@ extern long int timezone;
 #ifdef __USE_MISC
 /* Set the system time to *WHEN.
    This call is restricted to the superuser.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (stime, (const time_t *__when), __stime_t64)
+           __THROW;
+# else
+# define stime __stime_t64
+# endif
+#endif
 extern int stime (const time_t *__when) __THROW;
 #endif
 
@@ -193,9 +271,23 @@ extern int stime (const time_t *__when) __THROW;
    localtime package.  These are included only for compatibility.  */
 
 /* Like `mktime', but for TP represents Universal Time, not local time.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (timegm, (struct tm *__tp), __timegm64) __THROW;
+# else
+# define timegm __timegm64
+# endif
+#endif
 extern time_t timegm (struct tm *__tp) __THROW;
 
 /* Another name for `mktime'.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern time_t __REDIRECT (timelocal, (struct tm *__tp), __timelocal64) __THROW;
+# else
+# define timelocal __timelocal64
+# endif
+#endif
 extern time_t timelocal (struct tm *__tp) __THROW;
 
 /* Return the number of days in YEAR.  */
@@ -208,17 +300,49 @@ extern int dysize (int __year) __THROW  __attribute__ ((__const__));
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (nanosleep, (const struct timespec *__requested_time,
+		      struct timespec *__remaining), __nanosleep_t64);
+# else
+# define nanosleep __nanosleep_t64
+# endif
+#endif
 extern int nanosleep (const struct timespec *__requested_time,
 		      struct timespec *__remaining);
 
 
 /* Get resolution of clock CLOCK_ID.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (clock_getres, (clockid_t __clock_id, struct
+     timespec *__res), __clock_getres64) __THROW;
+# else
+# define clock_getres __clock_getres64
+# endif
+#endif
 extern int clock_getres (clockid_t __clock_id, struct timespec *__res) __THROW;
 
 /* Get current value of clock CLOCK_ID and store it in TP.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (clock_gettime, (clockid_t __clock_id, struct
+     timespec *__tp), __clock_gettime64) __THROW;
+# else
+# define clock_gettime __clock_gettime64
+# endif
+#endif
 extern int clock_gettime (clockid_t __clock_id, struct timespec *__tp) __THROW;
 
 /* Set clock CLOCK_ID to value TP.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (clock_settime, (clockid_t __clock_id, const struct
+     timespec *__tp), __clock_settime64) __THROW;
+# else
+# define clock_settime __clock_settime64
+# endif
+#endif
 extern int clock_settime (clockid_t __clock_id, const struct timespec *__tp)
      __THROW;
 
@@ -227,6 +351,15 @@ extern int clock_settime (clockid_t __clock_id, const struct timespec *__tp)
 
    This function is a cancellation point and therefore not marked with
    __THROW.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (clock_nanosleep, (clockid_t __clock_id, int __flags,
+     const struct timespec *__req, struct timespec *__rem),
+     __clock_nanosleep64) __THROW;
+# else
+# define clock_nanosleep __clock_nanosleep64
+# endif
+#endif
 extern int clock_nanosleep (clockid_t __clock_id, int __flags,
 			    const struct timespec *__req,
 			    struct timespec *__rem);
@@ -245,11 +378,29 @@ extern int timer_create (clockid_t __clock_id,
 extern int timer_delete (timer_t __timerid) __THROW;
 
 /* Set timer TIMERID to VALUE, returning old value in OVALUE.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (timer_settime, (timer_t __timerid, int __flags,
+			  const struct itimerspec *__restrict __value,
+			  struct itimerspec *__restrict __ovalue),
+                          __timer_settime64) __THROW;
+# else
+# define timer_settime __timer_settime64
+# endif
+#endif
 extern int timer_settime (timer_t __timerid, int __flags,
 			  const struct itimerspec *__restrict __value,
 			  struct itimerspec *__restrict __ovalue) __THROW;
 
 /* Get current value of timer TIMERID and store it in VALUE.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (timer_gettime, (timer_t __timerid, struct
+     itimerspec *__value), __timer_gettime64) __THROW __nonnull ((1));
+# else
+# define timer_gettime __timer_gettime64
+# endif
+#endif
 extern int timer_gettime (timer_t __timerid, struct itimerspec *__value)
      __THROW;
 
@@ -260,6 +411,14 @@ extern int timer_getoverrun (timer_t __timerid) __THROW;
 
 #ifdef __USE_ISOC11
 /* Set TS to calendar time based in time base BASE.  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern int __REDIRECT (timespec_get, (struct timespec *__ts, int __base),
+     __timespec_get64) __THROW __nonnull ((1));
+# else
+# define timespec_get __timespec_get64
+# endif
+#endif
 extern int timespec_get (struct timespec *__ts, int __base)
      __THROW __nonnull ((1));
 #endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=943f74e4ef0a6a7465bdceea68d3d64b42e0469e

commit 943f74e4ef0a6a7465bdceea68d3d64b42e0469e
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 19:49:08 2017 +0200

    Y2038: add RPC functions
    
    Three functions in RPC have a struct timeval in their arguments:
    pmap_rmtcall, clntudp_create, and clntudp_bufcreate.
    
    Since these struct timeval arguments contain relative timeouts, and
    since RPC timeouts can reasonably be expected to be small enough to
    still fit in 32-bit representations, the implementations of these
    functions just verify that the 64-bit timeout they received can fit
    in 32 bits, convert it to 32 bit, and pass it to their 32-bit-time
    counterparts.

diff --git a/sunrpc/clnt_udp.c b/sunrpc/clnt_udp.c
index c2436e3..d4e26c4 100644
--- a/sunrpc/clnt_udp.c
+++ b/sunrpc/clnt_udp.c
@@ -644,3 +644,39 @@ clntudp_destroy (CLIENT *cl)
   mem_free ((caddr_t) cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
   mem_free ((caddr_t) cl, sizeof (CLIENT));
 }
+
+/* 64-bit time versions */
+
+CLIENT *
+__clntudp_create64 (struct sockaddr_in *raddr, u_long program, u_long version,
+		    struct __timeval64 wait, int *sockp)
+{
+  struct timeval wait32;
+  if (wait.tv_sec > INT_MAX)
+  {
+    return NULL;
+  }  
+
+  wait32.tv_sec = wait.tv_sec;
+  wait32.tv_usec = wait.tv_usec;
+
+  return clntudp_create (raddr, program, version, wait32, sockp);
+}
+
+CLIENT *
+__clntudp_bufcreate64 (struct sockaddr_in *raddr, u_long program, u_long version,
+		       struct __timeval64 wait, int *sockp, u_int sendsz,
+		       u_int recvsz)
+{
+  struct timeval wait32;
+
+  if (wait.tv_sec > INT_MAX)
+  {
+    return NULL;
+  }
+
+  wait32.tv_sec = wait.tv_sec;
+  wait32.tv_usec = wait.tv_usec;
+
+  return clntudp_bufcreate (raddr, program, version, wait32, sockp, sendsz, recvsz);
+}
diff --git a/sunrpc/pmap_rmt.c b/sunrpc/pmap_rmt.c
index 6b142e5..6f9236c 100644
--- a/sunrpc/pmap_rmt.c
+++ b/sunrpc/pmap_rmt.c
@@ -390,3 +390,26 @@ done_broad:
   return stat;
 }
 libc_hidden_nolink_sunrpc (clnt_broadcast, GLIBC_2_0)
+
+/* 64-bit-time version */
+
+/* The 64-bit-time version of pmap_rmtcall.
+ * Only handles 64-bit-time timeouts smaller than 2^^31 seconds.
+ */
+enum clnt_stat
+__pmap_rmtcall_t64 (struct sockaddr_in *addr, u_long prog, u_long vers,
+		    u_long proc, xdrproc_t xdrargs, caddr_t argsp,
+		    xdrproc_t xdrres, caddr_t resp,
+		    struct __timeval64 tout, u_long *port_ptr)
+{
+  struct timeval tout32;
+  if (tout.tv_sec > INT_MAX)
+  {
+    return RPC_SYSTEMERROR;
+  }
+  tout32.tv_sec = tout.tv_sec;
+  tout32.tv_usec = tout.tv_usec;
+  
+  return pmap_rmtcall (addr, prog, vers, proc, xdrargs, argsp, xdrres,
+                       resp, tout32, port_ptr);
+}
diff --git a/sunrpc/rpc/clnt.h b/sunrpc/rpc/clnt.h
index f4d4a94..e559242 100644
--- a/sunrpc/rpc/clnt.h
+++ b/sunrpc/rpc/clnt.h
@@ -329,9 +329,33 @@ extern CLIENT *clnttcp_create (struct sockaddr_in *__raddr, u_long __prog,
  *	u_int sendsz;
  *	u_int recvsz;
  */
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern CLIENT * __REDIRECT (clntudp_create,(struct sockaddr_in *__raddr,
+                                            u_long __program,
+					    u_long __version,
+					    struct timeval __wait_resend,
+					    int *__sockp),
+                            __clntudp_create_t64) __THROW;
+# else
+# define clntudp_create __clntudp_create_t64
+# endif
+#endif
 extern CLIENT *clntudp_create (struct sockaddr_in *__raddr, u_long __program,
 			       u_long __version, struct timeval __wait_resend,
 			       int *__sockp) __THROW;
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern CLIENT * __REDIRECT (clntudp_bufcreate,(struct sockaddr_in *__raddr,
+				               u_long __program, u_long __version,
+				               struct __timeval64 __wait_resend,
+					       int *__sockp, u_int __sendsz,
+					       u_int __recvsz),
+                            __clntudp_bufcreate_t64) __THROW;
+# else
+# define clntudp_bufcreate __clntudp_bufcreate_t64
+# endif
+#endif
 extern CLIENT *clntudp_bufcreate (struct sockaddr_in *__raddr,
 				  u_long __program, u_long __version,
 				  struct timeval __wait_resend, int *__sockp,
diff --git a/sunrpc/rpc/pmap_clnt.h b/sunrpc/rpc/pmap_clnt.h
index 1cc94b8..70ec89b 100644
--- a/sunrpc/rpc/pmap_clnt.h
+++ b/sunrpc/rpc/pmap_clnt.h
@@ -71,6 +71,21 @@ extern bool_t pmap_set (const u_long __program, const u_long __vers,
 extern bool_t pmap_unset (const u_long __program, const u_long __vers)
      __THROW;
 extern struct pmaplist *pmap_getmaps (struct sockaddr_in *__address) __THROW;
+#ifdef __USE_TIME_BITS64
+# if defined(__REDIRECT)
+extern enum clnt_stat __REDIRECT (pmap_rmtcall, (struct sockaddr_in *__addr,
+                                                 const u_long __prog,
+                                                 const u_long __vers,
+                                                 const u_long __proc,
+                                                 xdrproc_t __xdrargs,
+                                                 caddr_t __argsp, xdrproc_t __xdrres,
+                                                 caddr_t __resp, struct timeval __tout,
+                                                 u_long *__port_ptr),
+                                  __pmap_rmtcall_t64) __THROW;
+# else
+# define pmap_rmtcall __pmap_rmtcall_t64
+# endif
+#endif
 extern enum clnt_stat pmap_rmtcall (struct sockaddr_in *__addr,
 				    const u_long __prog,
 				    const u_long __vers,

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=14448e479e15296ad37c9281d7e018d3613e9bcf

commit 14448e479e15296ad37c9281d7e018d3613e9bcf
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 19:49:07 2017 +0200

    Y2038: add function select

diff --git a/include/sys/select.h b/include/sys/select.h
index 3eb76fa..df10085 100644
--- a/include/sys/select.h
+++ b/include/sys/select.h
@@ -24,5 +24,10 @@ extern int __pselect_t64 (int __nfds, fd_set *__readfds,
 		         const struct __timespec64 *__timeout,
 		         const __sigset_t *__sigmask);
 
+extern int __select_t64 (int __nfds, fd_set *__restrict __readfds,
+		         fd_set *__restrict __writefds,
+		         fd_set *__restrict __exceptfds,
+		         struct __timeval64 *__restrict __timeout);
+
 #endif
 #endif
diff --git a/sysdeps/unix/sysv/linux/select.c b/sysdeps/unix/sysv/linux/select.c
index e4124a1..8deaa5b 100644
--- a/sysdeps/unix/sysv/linux/select.c
+++ b/sysdeps/unix/sysv/linux/select.c
@@ -33,6 +33,8 @@
 # define __NR_select __NR__newselect
 #endif
 
+extern int __y2038_linux_support;
+
 int
 __select (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
 	  struct timeval *timeout)
@@ -69,3 +71,67 @@ libc_hidden_def (__select)
 
 weak_alias (__select, select)
 weak_alias (__select, __libc_select)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__select_t64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
+	      struct __timeval64 *timeout)
+{
+  struct timeval tval32, *timeout32 = NULL;
+#ifndef __NR_select
+  int result;
+  struct timespec ts32, *tsp32 = NULL;
+#endif
+
+  if (__y2038_linux_support)
+  {
+    /* TODO: implement using Linux kernel system call */
+  }
+
+  if (timeout != NULL)
+    {
+      if (timeout->tv_sec > INT_MAX)
+      {
+        errno = EOVERFLOW;
+        return -1;
+      }
+      tval32.tv_sec = timeout->tv_sec;
+      tval32.tv_usec = timeout->tv_usec;
+      timeout32 = &tval32;
+    }
+
+#ifdef __NR_select
+  return SYSCALL_CANCEL (select, nfds, readfds, writefds, exceptfds,
+			 timeout32);
+#else
+  if (timeout)
+    {
+      if (timeout->tv_sec > INT_MAX)
+      {
+        errno = EOVERFLOW;
+        return -1;
+      }
+      ts32.tv_sec = timeout->tv_sec;
+      ts32.tv_nsec = timeout->tv_usec * 1000;
+      tsp32 = &ts32;
+    }
+
+  result = SYSCALL_CANCEL (pselect6, nfds, readfds, writefds, exceptfds, tsp32,
+			   NULL);
+
+  if (timeout)
+    {
+      /* Linux by default will update the timeout after a pselect6 syscall
+         (though the pselect() glibc call suppresses this behavior).
+         Since select() on Linux has the same behavior as the pselect6
+         syscall, we update the timeout here.  */
+      timeout->tv_sec = ts32.tv_sec;
+      timeout->tv_usec = ts32.tv_nsec / 1000;
+    }
+
+  return result;
+#endif
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=896a40a2ebeb307d203dda2737beaa70f58eb569

commit 896a40a2ebeb307d203dda2737beaa70f58eb569
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:16 2017 +0200

    Y2038: add function pselect

diff --git a/include/sys/select.h b/include/sys/select.h
index 07bb49b..3eb76fa 100644
--- a/include/sys/select.h
+++ b/include/sys/select.h
@@ -3,6 +3,9 @@
 
 #ifndef _ISOMAC
 /* Now define the internal interfaces.  */
+
+#include <include/time.h>
+
 extern int __pselect (int __nfds, fd_set *__readfds,
 		      fd_set *__writefds, fd_set *__exceptfds,
 		      const struct timespec *__timeout,
@@ -14,5 +17,12 @@ extern int __select (int __nfds, fd_set *__restrict __readfds,
 		     struct timeval *__restrict __timeout);
 libc_hidden_proto (__select)
 
+/* 64-bit time version */
+
+extern int __pselect_t64 (int __nfds, fd_set *__readfds,
+		         fd_set *__writefds, fd_set *__exceptfds,
+		         const struct __timespec64 *__timeout,
+		         const __sigset_t *__sigmask);
+
 #endif
 #endif
diff --git a/sysdeps/unix/sysv/linux/pselect.c b/sysdeps/unix/sysv/linux/pselect.c
index 2b052e7..c286ef8 100644
--- a/sysdeps/unix/sysv/linux/pselect.c
+++ b/sysdeps/unix/sysv/linux/pselect.c
@@ -79,6 +79,67 @@ __pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
 }
 weak_alias (__pselect, pselect)
 
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__pselect_t64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
+	       const struct __timespec64 *timeout, const sigset_t *sigmask)
+{
+  struct timespec tval32, *timeout32 = NULL;
+
+  if (__y2038_linux_support)
+  {
+    /* TODO: implement using Linux kernel system call */
+  }
+
+  /* The Linux kernel can in some situations update the timeout value.
+     We do not want that so use a local variable.  */
+  if (timeout != NULL)
+    {
+      if (timeout->tv_sec > INT_MAX)
+      {
+        errno = EOVERFLOW;
+        return -1;
+      }
+      tval32.tv_sec = timeout->tv_sec;
+      tval32.tv_nsec = timeout->tv_nsec;
+      timeout32 = &tval32;
+    }
+
+  /* Note: the system call expects 7 values but on most architectures
+     we can only pass in 6 directly.  If there is an architecture with
+     support for more parameters a new version of this file needs to
+     be created.  */
+  struct
+  {
+    __syscall_ulong_t ss;
+    __syscall_ulong_t ss_len;
+  } data;
+
+  data.ss = (__syscall_ulong_t) (uintptr_t) sigmask;
+  data.ss_len = _NSIG / 8;
+
+  int result;
+
+#ifndef CALL_PSELECT6
+# define CALL_PSELECT6(nfds, readfds, writefds, exceptfds, timeout, data) \
+  SYSCALL_CANCEL (pselect6, nfds, readfds, writefds, exceptfds,	timeout32, data)
+#endif
+
+  result = CALL_PSELECT6 (nfds, readfds, writefds, exceptfds, timeout32,
+			  &data);
+
+# ifndef __ASSUME_PSELECT
+  if (result == -1 && errno == ENOSYS)
+    result = __generic_pselect (nfds, readfds, writefds, exceptfds, timeout32,
+				sigmask);
+# endif
+
+  return result;
+}
+
 # ifndef __ASSUME_PSELECT
 #  define __pselect static __generic_pselect
 # endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=90545ba886a84ad0ab85596f9cf6b1c4f21c41ea

commit 90545ba886a84ad0ab85596f9cf6b1c4f21c41ea
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:15 2017 +0200

    Y2038: add function __adjtimex_t64 (and __ntp_adjtime_t64)

diff --git a/include/time.h b/include/time.h
index 94d5d87..91c6f4f 100644
--- a/include/time.h
+++ b/include/time.h
@@ -74,6 +74,37 @@ struct __ntptimeval_t64
   long int __glibc_reserved4;
 };
 
+/* 64-bit time version of the current struct timex */
+struct __timex_t64
+{
+  unsigned int modes;		/* mode selector */
+  __syscall_slong_t offset;	/* time offset (usec) */
+  __syscall_slong_t freq;	/* frequency offset (scaled ppm) */
+  __syscall_slong_t maxerror;	/* maximum error (usec) */
+  __syscall_slong_t esterror;	/* estimated error (usec) */
+  int status;			/* clock command/status */
+  __syscall_slong_t constant;	/* pll time constant */
+  __syscall_slong_t precision;	/* clock precision (usec) (ro) */
+  __syscall_slong_t tolerance;	/* clock frequency tolerance (ppm) (ro) */
+  struct __timeval64 time;	/* (read only, except for ADJ_SETOFFSET) */
+  __syscall_slong_t tick;	/* (modified) usecs between clock ticks */
+  __syscall_slong_t ppsfreq;	/* pps frequency (scaled ppm) (ro) */
+  __syscall_slong_t jitter;	/* pps jitter (us) (ro) */
+  int shift;			/* interval duration (s) (shift) (ro) */
+  __syscall_slong_t stabil;	/* pps stability (scaled ppm) (ro) */
+  __syscall_slong_t jitcnt;	/* jitter limit exceeded (ro) */
+  __syscall_slong_t calcnt;	/* calibration intervals (ro) */
+  __syscall_slong_t errcnt;	/* calibration errors (ro) */
+  __syscall_slong_t stbcnt;	/* stability limit exceeded (ro) */
+
+  int tai;			/* TAI offset (ro) */
+
+  /* ??? */
+  int  :32; int  :32; int  :32; int  :32;
+  int  :32; int  :32; int  :32; int  :32;
+  int  :32; int  :32; int  :32;
+};
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)
diff --git a/sysdeps/unix/sysv/linux/adjtime.c b/sysdeps/unix/sysv/linux/adjtime.c
index 829fa0f..6ca4350 100644
--- a/sysdeps/unix/sysv/linux/adjtime.c
+++ b/sysdeps/unix/sysv/linux/adjtime.c
@@ -19,6 +19,7 @@
 #include <limits.h>
 #include <sys/time.h>
 #include <sys/timex.h>
+#include <include/time.h>
 
 #define MAX_SEC	(INT_MAX / 1000000L - 2)
 #define MIN_SEC	(INT_MIN / 1000000L + 2)
@@ -133,3 +134,83 @@ int __adjtime_t64 (const struct __timeval64 *itv,
     }
   return 0;
 }
+
+int
+__adjtimex_t64(struct __timex_t64 *tx)
+{
+  struct timex tx32;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: implement with a 64-bit time syscall */
+    }
+
+  if (tx == NULL)
+    {
+      __set_errno (EFAULT);
+      return -1;
+    }
+
+  if ((tx->modes & ADJ_SETOFFSET) != 0 && tx->time.tv_sec > INT_MAX)
+    {
+      __set_errno (EOVERFLOW);
+      return -1;
+    }
+
+  /* Implement with existing 32-bit time syscall */
+
+  /* Just copy everything */
+  tx32.modes = tx->modes;
+  tx32.offset = tx->offset;
+  tx32.freq = tx->freq;
+  tx32.maxerror = tx->maxerror;
+  tx32.esterror = tx->esterror;
+  tx32.status = tx->status;
+  tx32.constant = tx->constant;
+  tx32.precision = tx->precision;
+  tx32.tolerance = tx->tolerance;
+  tx32.time.tv_sec = tx->time.tv_sec;
+  tx32.time.tv_usec = tx->time.tv_usec;
+  tx32.tick = tx->tick;
+  tx32.ppsfreq = tx->ppsfreq;
+  tx32.jitter = tx->jitter;
+  tx32.shift = tx->shift;
+  tx32.stabil = tx->stabil;
+  tx32.jitcnt = tx->jitcnt;
+  tx32.calcnt = tx->calcnt;
+  tx32.errcnt = tx->errcnt;
+  tx32.stbcnt = tx->stbcnt;
+
+  tx32.tai = tx->tai;
+  /* WARNING -- anonymous fields after TAI are not copied. */
+
+  int result = ADJTIMEX(&tx32);
+
+  if (result == 0)
+    {
+      /* Just copy back everything */
+      tx->modes = tx32.modes;
+      tx->offset = tx32.offset;
+      tx->freq = tx32.freq;
+      tx->maxerror = tx32.maxerror;
+      tx->esterror = tx32.esterror;
+      tx->status = tx32.status;
+      tx->constant = tx32.constant;
+      tx->precision = tx32.precision;
+      tx->tolerance = tx32.tolerance;
+      tx->time.tv_sec = tx32.time.tv_sec;
+      tx->time.tv_usec = tx32.time.tv_usec;
+      tx->tick = tx32.tick;
+      tx->ppsfreq = tx32.ppsfreq;
+      tx->jitter = tx32.jitter;
+      tx->shift = tx32.shift;
+      tx->stabil = tx32.stabil;
+      tx->jitcnt = tx32.jitcnt;
+      tx->calcnt = tx32.calcnt;
+      tx->errcnt = tx32.errcnt;
+      tx->stbcnt = tx32.stbcnt;
+    }
+
+  return result;
+}
+weak_alias (__adjtimex_t64, __ntp_adjtime_t64);
diff --git a/time/Versions b/time/Versions
index 797161d..9bcdb81 100644
--- a/time/Versions
+++ b/time/Versions
@@ -93,5 +93,6 @@ libc {
     __adjtime_t64;
     __getitimer_t64;
     __setitimer_t64;
+    __adjtimex_t64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=798bc999b029bf5dac7a5b8eb0413173f78dd348

commit 798bc999b029bf5dac7a5b8eb0413173f78dd348
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:14 2017 +0200

    Y2038: add function __ntp_gettimex_t64

diff --git a/sysdeps/unix/sysv/linux/Versions b/sysdeps/unix/sysv/linux/Versions
index 59b8964..ee85fb6 100644
--- a/sysdeps/unix/sysv/linux/Versions
+++ b/sysdeps/unix/sysv/linux/Versions
@@ -171,6 +171,7 @@ libc {
     mlock2;
     pkey_alloc; pkey_free; pkey_set; pkey_get; pkey_mprotect;
     __ntp_gettime_t64;
+    __ntp_gettimex_t64;
   }
   GLIBC_PRIVATE {
     # functions used in other libraries
diff --git a/sysdeps/unix/sysv/linux/ntp_gettimex.c b/sysdeps/unix/sysv/linux/ntp_gettimex.c
index e491549..db66e91 100644
--- a/sysdeps/unix/sysv/linux/ntp_gettimex.c
+++ b/sysdeps/unix/sysv/linux/ntp_gettimex.c
@@ -16,6 +16,7 @@
    <http://www.gnu.org/licenses/>.  */
 
 #include <sys/timex.h>
+#include <include/time.h>
 
 #ifndef MOD_OFFSET
 # define modes mode
@@ -40,3 +41,33 @@ ntp_gettimex (struct ntptimeval *ntv)
   ntv->__glibc_reserved4 = 0;
   return result;
 }
+
+/* The 64-bit-time version */
+
+extern int __y2038_linux_support;
+
+int
+__ntp_gettimex_t64 (struct __ntptimeval_t64 *ntv)
+{
+  struct timex tntx;
+  int result;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscall if possible
+    }
+
+  tntx.modes = 0;
+  result = __adjtimex (&tntx);
+  ntv->time.tv_sec = tntx.time.tv_sec;
+  ntv->time.tv_usec = tntx.time.tv_usec;
+  ntv->maxerror = tntx.maxerror;
+  ntv->maxerror = tntx.maxerror;
+  ntv->esterror = tntx.esterror;
+  ntv->tai = tntx.tai;
+  ntv->__glibc_reserved1 = 0;
+  ntv->__glibc_reserved2 = 0;
+  ntv->__glibc_reserved3 = 0;
+  ntv->__glibc_reserved4 = 0;
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=a664ccb656427bb9716d773e9af34223c70e6e95

commit a664ccb656427bb9716d773e9af34223c70e6e95
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:13 2017 +0200

    Y2038: add function __ntp_gettime_t64

diff --git a/sysdeps/unix/sysv/linux/Versions b/sysdeps/unix/sysv/linux/Versions
index 336c13b..59b8964 100644
--- a/sysdeps/unix/sysv/linux/Versions
+++ b/sysdeps/unix/sysv/linux/Versions
@@ -170,6 +170,7 @@ libc {
     memfd_create;
     mlock2;
     pkey_alloc; pkey_free; pkey_set; pkey_get; pkey_mprotect;
+    __ntp_gettime_t64;
   }
   GLIBC_PRIVATE {
     # functions used in other libraries
diff --git a/sysdeps/unix/sysv/linux/ntp_gettime.c b/sysdeps/unix/sysv/linux/ntp_gettime.c
index 18650da..2b7dc7c 100644
--- a/sysdeps/unix/sysv/linux/ntp_gettime.c
+++ b/sysdeps/unix/sysv/linux/ntp_gettime.c
@@ -18,6 +18,7 @@
 #define ntp_gettime ntp_gettime_redirect
 
 #include <sys/timex.h>
+#include <include/time.h>
 
 #undef ntp_gettime
 
@@ -39,3 +40,27 @@ ntp_gettime (struct ntptimeval *ntv)
   ntv->esterror = tntx.esterror;
   return result;
 }
+
+/* The 64-bit-time version */
+
+extern int __y2038_linux_support;
+
+int
+__ntp_gettime_t64 (struct __ntptimeval_t64 *ntv)
+{
+  struct timex tntx;
+  int result;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscall if possible
+    }
+
+  tntx.modes = 0;
+  result = __adjtimex (&tntx);
+  ntv->time.tv_sec = tntx.time.tv_sec;
+  ntv->time.tv_usec = tntx.time.tv_usec;
+  ntv->maxerror = tntx.maxerror;
+  ntv->esterror = tntx.esterror;
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=ead40aa24868e39d79e941e0a7e3bc29d8865de9

commit ead40aa24868e39d79e941e0a7e3bc29d8865de9
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:12 2017 +0200

    Y2038: add struct __ntp_timeval_t64

diff --git a/include/time.h b/include/time.h
index dbabb81..94d5d87 100644
--- a/include/time.h
+++ b/include/time.h
@@ -61,6 +61,19 @@ struct __itimerval_t64
   struct __timeval64 it_value;
 };
 
+struct __ntptimeval_t64
+{
+  struct __timeval64 time;	/* current time (ro) */
+  long int maxerror;	/* maximum error (us) (ro) */
+  long int esterror;	/* estimated error (us) (ro) */
+  long int tai;		/* TAI offset (ro) */
+
+  long int __glibc_reserved1;
+  long int __glibc_reserved2;
+  long int __glibc_reserved3;
+  long int __glibc_reserved4;
+};
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=91208d223c691fbe45fd06fed9d537252972d348

commit 91208d223c691fbe45fd06fed9d537252972d348
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:11 2017 +0200

    Y2038: add function __getrusage_t64

diff --git a/resource/Makefile b/resource/Makefile
index bf66294..c482e82 100644
--- a/resource/Makefile
+++ b/resource/Makefile
@@ -23,7 +23,7 @@ headers	  := sys/resource.h bits/resource.h sys/vlimit.h sys/vtimes.h	\
 	     ulimit.h bits/types/struct_rusage.h
 
 routines := getrlimit setrlimit getrlimit64 setrlimit64 getrusage ulimit      \
-	    vlimit vtimes getpriority setpriority nice
+	    vlimit vtimes getpriority setpriority nice getrusage64
 
 tests = tst-getrlimit bug-ulimit1
 
diff --git a/resource/Versions b/resource/Versions
index d6c2cce..84f3a79 100644
--- a/resource/Versions
+++ b/resource/Versions
@@ -25,4 +25,11 @@ libc {
   GLIBC_PRIVATE {
     __getrlimit;
   }
+
+  # Y2038 symbols are given their own version until they can be put in
+  # the right place
+
+  GLIBC_Y2038 {
+    __getrusage_t64;
+  }
 }
diff --git a/resource/getrusage64.c b/resource/getrusage64.c
new file mode 100644
index 0000000..e9c27e5
--- /dev/null
+++ b/resource/getrusage64.c
@@ -0,0 +1,187 @@
+/* Return resource usage for the current process.
+   
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sys/resource.h>
+#include <include/time.h>
+#include <sysdep.h>
+#include <sys/syscall.h>
+
+/* Structure which says how much of each resource has been used.  */
+/* 64-bit time version */
+/* The purpose of all the unions is to have the kernel-compatible layout
+   while keeping the API type as 'long int', and among machines where
+   __syscall_slong_t is not 'long int', this only does the right thing
+   for little-endian ones, like x32.  */
+struct __rusage_t64
+  {
+    /* Total amount of user time used.  */
+    struct __timeval64 ru_utime;
+    /* Total amount of system time used.  */
+    struct __timeval64 ru_stime;
+    /* Maximum resident set size (in kilobytes).  */
+    __extension__ union
+      {
+	long int ru_maxrss;
+	__syscall_slong_t __ru_maxrss_word;
+      };
+    /* Amount of sharing of text segment memory
+       with other processes (kilobyte-seconds).  */
+    /* Maximum resident set size (in kilobytes).  */
+    __extension__ union
+      {
+	long int ru_ixrss;
+	__syscall_slong_t __ru_ixrss_word;
+      };
+    /* Amount of data segment memory used (kilobyte-seconds).  */
+    __extension__ union
+      {
+	long int ru_idrss;
+	__syscall_slong_t __ru_idrss_word;
+      };
+    /* Amount of stack memory used (kilobyte-seconds).  */
+    __extension__ union
+      {
+	long int ru_isrss;
+	 __syscall_slong_t __ru_isrss_word;
+      };
+    /* Number of soft page faults (i.e. those serviced by reclaiming
+       a page from the list of pages awaiting reallocation.  */
+    __extension__ union
+      {
+	long int ru_minflt;
+	__syscall_slong_t __ru_minflt_word;
+      };
+    /* Number of hard page faults (i.e. those that required I/O).  */
+    __extension__ union
+      {
+	long int ru_majflt;
+	__syscall_slong_t __ru_majflt_word;
+      };
+    /* Number of times a process was swapped out of physical memory.  */
+    __extension__ union
+      {
+	long int ru_nswap;
+	__syscall_slong_t __ru_nswap_word;
+      };
+    /* Number of input operations via the file system.  Note: This
+       and `ru_oublock' do not include operations with the cache.  */
+    __extension__ union
+      {
+	long int ru_inblock;
+	__syscall_slong_t __ru_inblock_word;
+      };
+    /* Number of output operations via the file system.  */
+    __extension__ union
+      {
+	long int ru_oublock;
+	__syscall_slong_t __ru_oublock_word;
+      };
+    /* Number of IPC messages sent.  */
+    __extension__ union
+      {
+	long int ru_msgsnd;
+	__syscall_slong_t __ru_msgsnd_word;
+      };
+    /* Number of IPC messages received.  */
+    __extension__ union
+      {
+	long int ru_msgrcv;
+	__syscall_slong_t __ru_msgrcv_word;
+      };
+    /* Number of signals delivered.  */
+    __extension__ union
+      {
+	long int ru_nsignals;
+	__syscall_slong_t __ru_nsignals_word;
+      };
+    /* Number of voluntary context switches, i.e. because the process
+       gave up the process before it had to (usually to wait for some
+       resource to be available).  */
+    __extension__ union
+      {
+	long int ru_nvcsw;
+	__syscall_slong_t __ru_nvcsw_word;
+      };
+    /* Number of involuntary context switches, i.e. a higher priority process
+       became runnable or the current process used up its time slice.  */
+    __extension__ union
+      {
+	long int ru_nivcsw;
+	__syscall_slong_t __ru_nivcsw_word;
+      };
+  };
+
+extern int __y2038_linux_support;
+
+int __getrusage_t64 (__rusage_who_t __who, struct __rusage_t64 *__usage)
+{
+  int result;
+  struct rusage usage32;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit-time syscall if available
+    }
+
+  result = INLINE_SYSCALL(getrusage, 2, __who, &usage32);
+  /* Copy fields from 32-bit into 64-bit rusage structure */
+  /* Total amount of user time used.  */
+  __usage->ru_utime.tv_sec = usage32.ru_utime.tv_sec;
+  __usage->ru_utime.tv_usec = usage32.ru_utime.tv_usec;
+  /* Total amount of system time used.  */
+  __usage->ru_stime.tv_sec = usage32.ru_stime.tv_sec;
+  __usage->ru_stime.tv_usec = usage32.ru_stime.tv_usec;
+  /* Maximum resident set size (in kilobytes).  */
+  __usage->ru_maxrss = usage32.ru_maxrss;
+  /* Amount of sharing of text segment memory
+     with other processes (kilobyte-seconds).  */
+  /* Maximum resident set size (in kilobytes).  */
+  __usage->ru_ixrss = usage32.ru_ixrss;
+  /* Amount of data segment memory used (kilobyte-seconds).  */
+  __usage->ru_idrss = usage32.ru_idrss;
+  /* Amount of stack memory used (kilobyte-seconds).  */
+  __usage->ru_isrss = usage32.ru_isrss;
+  /* Number of soft page faults (i.e. those serviced by reclaiming
+     a page from the list of pages awaiting reallocation.  */
+  __usage->ru_minflt = usage32.ru_minflt;
+  /* Number of hard page faults (i.e. those that required I/O).  */
+  __usage->ru_majflt = usage32.ru_majflt;
+  /* Number of times a process was swapped out of physical memory.  */
+  __usage->ru_nswap = usage32.ru_nswap;
+  /* Number of input operations via the file system.  Note: This
+     and `ru_oublock' do not include operations with the cache.  */
+  __usage->ru_inblock = usage32.ru_inblock;
+  /* Number of output operations via the file system.  */
+  __usage->ru_oublock = usage32.ru_oublock;
+  /* Number of IPC messages sent.  */
+  __usage->ru_msgsnd = usage32.ru_msgsnd;
+  /* Number of IPC messages received.  */
+  __usage->ru_msgrcv = usage32.ru_msgrcv;
+  /* Number of signals delivered.  */
+  __usage->ru_nsignals = usage32.ru_nsignals;
+  /* Number of voluntary context switches, i.e. because the process
+     gave up the process before it had to (usually to wait for some
+     resource to be available).  */
+  __usage->ru_nvcsw = usage32.ru_nvcsw;
+  /* Number of involuntary context switches, i.e. a higher priority process
+     became runnable or the current process used up its time slice.  */
+  __usage->ru_nivcsw = usage32.ru_nivcsw;
+
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=692bcd7d90d9fb0fa4b974c508b142d490edbc6c

commit 692bcd7d90d9fb0fa4b974c508b142d490edbc6c
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:10 2017 +0200

    Y2038: add functions using futexes
    
    This creates 64-bit time versions of the following APIs:
    - pthread_rwlock_timedrdlock
    - pthread_rwlock_timedwrlock
    - pthread_mutex_timedlock
    - pthread_cond_timedwait
    - sem_timedwait
    - aio_suspend
    
    It also creates 64-bit time versions of the following
    functions or macros:
    - lll_timedlock_elision
    - lll_timedlock
    - __lll_timedlock_wait
    - futex_reltimed_wait_cancelable
    - lll_futex_timed_wait
    - __pthread_cond_wait_common
    - futex_abstimed_wait_cancelable
    - lll_futex_timed_wait_bitset
    - do_aio_misc_wait
    - AIO_MISC_WAIT
    - __new_sem_wait_slow
    - do_futex_wait
    - __pthread_rwlock_wrlock_full
    - __pthread_rwlock_rdlock_full
    - futex_abstimed_wait

diff --git a/nptl/Versions b/nptl/Versions
index 0ae5def..9ed0872 100644
--- a/nptl/Versions
+++ b/nptl/Versions
@@ -272,4 +272,15 @@ libpthread {
     __pthread_barrier_init; __pthread_barrier_wait;
     __shm_directory;
   }
+
+  # Y2038 symbols are given their own version until they can be put in
+  # the right place
+
+  GLIBC_Y2038 {
+    __pthread_rwlock_rdlock_t64;
+    __pthread_rwlock_wrlock_t64;
+    __pthread_mutex_timedlock_t64;
+    __sem_timedwait_t64;
+    __pthread_cond_timedwait_t64;
+  }
 }
diff --git a/nptl/lll_timedlock_wait.c b/nptl/lll_timedlock_wait.c
index 91bf963..14d95c8 100644
--- a/nptl/lll_timedlock_wait.c
+++ b/nptl/lll_timedlock_wait.c
@@ -57,3 +57,40 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 
   return 0;
 }
+
+/* 64-bit time version */
+
+int
+__lll_timedlock_wait_t64 (int *futex, const struct __timespec64 *abstime, int private)
+{
+  /* Reject invalid timeouts.  */
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+    return EINVAL;
+
+  /* Try locking.  */
+  while (atomic_exchange_acq (futex, 2) != 0)
+    {
+      struct timeval tv;
+
+      /* Get the current time.  */
+      (void) __gettimeofday (&tv, NULL);
+
+      /* Compute relative timeout.  */
+      struct timespec rt;
+      rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+      rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+      if (rt.tv_nsec < 0)
+        {
+          rt.tv_nsec += 1000000000;
+          --rt.tv_sec;
+        }
+
+      if (rt.tv_sec < 0)
+        return ETIMEDOUT;
+
+      /* If *futex == 2, wait until woken or timeout.  */
+      lll_futex_timed_wait (futex, 2, &rt, private);
+    }
+
+  return 0;
+}
diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
index 3e11054..4c6aaed 100644
--- a/nptl/pthread_cond_wait.c
+++ b/nptl/pthread_cond_wait.c
@@ -647,6 +647,280 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
   return (err != 0) ? err : result;
 }
 
+/* 64-bit time variant */
+
+static __always_inline int
+__pthread_cond_wait_common_t64 (pthread_cond_t *cond, pthread_mutex_t *mutex,
+    const struct __timespec64 *abstime)
+{
+  const int maxspin = 0;
+  int err;
+  int result = 0;
+
+  LIBC_PROBE (cond_wait, 2, cond, mutex);
+
+  /* Acquire a position (SEQ) in the waiter sequence (WSEQ).  We use an
+     atomic operation because signals and broadcasts may update the group
+     switch without acquiring the mutex.  We do not need release MO here
+     because we do not need to establish any happens-before relation with
+     signalers (see __pthread_cond_signal); modification order alone
+     establishes a total order of waiters/signals.  We do need acquire MO
+     to synchronize with group reinitialization in
+     __condvar_quiesce_and_switch_g1.  */
+  uint64_t wseq = __condvar_fetch_add_wseq_acquire (cond, 2);
+  /* Find our group's index.  We always go into what was G2 when we acquired
+     our position.  */
+  unsigned int g = wseq & 1;
+  uint64_t seq = wseq >> 1;
+
+  /* Increase the waiter reference count.  Relaxed MO is sufficient because
+     we only need to synchronize when decrementing the reference count.  */
+  unsigned int flags = atomic_fetch_add_relaxed (&cond->__data.__wrefs, 8);
+  int private = __condvar_get_private (flags);
+
+  /* Now that we are registered as a waiter, we can release the mutex.
+     Waiting on the condvar must be atomic with releasing the mutex, so if
+     the mutex is used to establish a happens-before relation with any
+     signaler, the waiter must be visible to the latter; thus, we release the
+     mutex after registering as waiter.
+     If releasing the mutex fails, we just cancel our registration as a
+     waiter and confirm that we have woken up.  */
+  err = __pthread_mutex_unlock_usercnt (mutex, 0);
+  if (__glibc_unlikely (err != 0))
+    {
+      __condvar_cancel_waiting (cond, seq, g, private);
+      __condvar_confirm_wakeup (cond, private);
+      return err;
+    }
+
+  /* Now wait until a signal is available in our group or it is closed.
+     Acquire MO so that if we observe a value of zero written after group
+     switching in __condvar_quiesce_and_switch_g1, we synchronize with that
+     store and will see the prior update of __g1_start done while switching
+     groups too.  */
+  unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
+
+  do
+    {
+      while (1)
+	{
+	  /* Spin-wait first.
+	     Note that spinning first without checking whether a timeout
+	     passed might lead to what looks like a spurious wake-up even
+	     though we should return ETIMEDOUT (e.g., if the caller provides
+	     an absolute timeout that is clearly in the past).  However,
+	     (1) spurious wake-ups are allowed, (2) it seems unlikely that a
+	     user will (ab)use pthread_cond_wait as a check for whether a
+	     point in time is in the past, and (3) spinning first without
+	     having to compare against the current time seems to be the right
+	     choice from a performance perspective for most use cases.  */
+	  unsigned int spin = maxspin;
+	  while (signals == 0 && spin > 0)
+	    {
+	      /* Check that we are not spinning on a group that's already
+		 closed.  */
+	      if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))
+		goto done;
+
+	      /* TODO Back off.  */
+
+	      /* Reload signals.  See above for MO.  */
+	      signals = atomic_load_acquire (cond->__data.__g_signals + g);
+	      spin--;
+	    }
+
+	  /* If our group will be closed as indicated by the flag on signals,
+	     don't bother grabbing a signal.  */
+	  if (signals & 1)
+	    goto done;
+
+	  /* If there is an available signal, don't block.  */
+	  if (signals != 0)
+	    break;
+
+	  /* No signals available after spinning, so prepare to block.
+	     We first acquire a group reference and use acquire MO for that so
+	     that we synchronize with the dummy read-modify-write in
+	     __condvar_quiesce_and_switch_g1 if we read from that.  In turn,
+	     in this case this will make us see the closed flag on __g_signals
+	     that designates a concurrent attempt to reuse the group's slot.
+	     We use acquire MO for the __g_signals check to make the
+	     __g1_start check work (see spinning above).
+	     Note that the group reference acquisition will not mask the
+	     release MO when decrementing the reference count because we use
+	     an atomic read-modify-write operation and thus extend the release
+	     sequence.  */
+	  atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);
+	  if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0)
+	      || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)))
+	    {
+	      /* Our group is closed.  Wake up any signalers that might be
+		 waiting.  */
+	      __condvar_dec_grefs (cond, g, private);
+	      goto done;
+	    }
+
+	  // Now block.
+	  struct _pthread_cleanup_buffer buffer;
+	  struct _condvar_cleanup_buffer cbuffer;
+	  cbuffer.wseq = wseq;
+	  cbuffer.cond = cond;
+	  cbuffer.mutex = mutex;
+	  cbuffer.private = private;
+	  __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
+
+	  if (abstime == NULL)
+	    {
+	      /* Block without a timeout.  */
+	      err = futex_wait_cancelable (
+		  cond->__data.__g_signals + g, 0, private);
+	    }
+	  else
+	    {
+	      /* Block, but with a timeout.
+		 Work around the fact that the kernel rejects negative timeout
+		 values despite them being valid.  */
+	      if (__glibc_unlikely (abstime->tv_sec < 0))
+	        err = ETIMEDOUT;
+
+	      else if ((flags & __PTHREAD_COND_CLOCK_MONOTONIC_MASK) != 0)
+		{
+		  /* CLOCK_MONOTONIC is requested.  */
+		  struct timespec rt;
+		  struct __timespec64 rt64;
+		  if (__clock_gettime (CLOCK_MONOTONIC, &rt) != 0)
+		    __libc_fatal ("clock_gettime does not support "
+				  "CLOCK_MONOTONIC");
+		  /* Convert the absolute timeout value to a relative
+		     timeout.  */
+		  rt64.tv_sec = abstime->tv_sec - rt.tv_sec;
+		  rt64.tv_nsec = abstime->tv_nsec - rt.tv_nsec;
+		  if (rt64.tv_nsec < 0)
+		    {
+		      rt64.tv_nsec += 1000000000;
+		      --rt64.tv_sec;
+		    }
+		  /* Did we already time out?  */
+		  if (__glibc_unlikely (rt64.tv_sec < 0))
+		    err = ETIMEDOUT;
+		  else
+		    err = futex_reltimed_wait_cancelable_t64
+			(cond->__data.__g_signals + g, 0, &rt64, private);
+		}
+	      else
+		{
+		  /* Use CLOCK_REALTIME.  */
+		  err = futex_abstimed_wait_cancelable_t64
+		      (cond->__data.__g_signals + g, 0, abstime, private);
+		}
+	    }
+
+	  __pthread_cleanup_pop (&buffer, 0);
+
+	  if (__glibc_unlikely (err == ETIMEDOUT))
+	    {
+	      __condvar_dec_grefs (cond, g, private);
+	      /* If we timed out, we effectively cancel waiting.  Note that
+		 we have decremented __g_refs before cancellation, so that a
+		 deadlock between waiting for quiescence of our group in
+		 __condvar_quiesce_and_switch_g1 and us trying to acquire
+		 the lock during cancellation is not possible.  */
+	      __condvar_cancel_waiting (cond, seq, g, private);
+	      result = ETIMEDOUT;
+	      goto done;
+	    }
+	  else
+	    __condvar_dec_grefs (cond, g, private);
+
+	  /* Reload signals.  See above for MO.  */
+	  signals = atomic_load_acquire (cond->__data.__g_signals + g);
+	}
+
+    }
+  /* Try to grab a signal.  Use acquire MO so that we see an up-to-date value
+     of __g1_start below (see spinning above for a similar case).  In
+     particular, if we steal from a more recent group, we will also see a
+     more recent __g1_start below.  */
+  while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g,
+						&signals, signals - 2));
+
+  /* We consumed a signal but we could have consumed from a more recent group
+     that aliased with ours due to being in the same group slot.  If this
+     might be the case our group must be closed as visible through
+     __g1_start.  */
+  uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
+  if (seq < (g1_start >> 1))
+    {
+      /* We potentially stole a signal from a more recent group but we do not
+	 know which group we really consumed from.
+	 We do not care about groups older than current G1 because they are
+	 closed; we could have stolen from these, but then we just add a
+	 spurious wake-up for the current groups.
+	 We will never steal a signal from current G2 that was really intended
+	 for G2 because G2 never receives signals (until it becomes G1).  We
+	 could have stolen a signal from G2 that was conservatively added by a
+	 previous waiter that also thought it stole a signal -- but given that
+	 that signal was added unnecessarily, it's not a problem if we steal
+	 it.
+	 Thus, the remaining case is that we could have stolen from the current
+	 G1, where "current" means the __g1_start value we observed.  However,
+	 if the current G1 does not have the same slot index as we do, we did
+	 not steal from it and do not need to undo that.  This is the reason
+	 for putting a bit with G2's index into__g1_start as well.  */
+      if (((g1_start & 1) ^ 1) == g)
+	{
+	  /* We have to conservatively undo our potential mistake of stealing
+	     a signal.  We can stop trying to do that when the current G1
+	     changes because other spinning waiters will notice this too and
+	     __condvar_quiesce_and_switch_g1 has checked that there are no
+	     futex waiters anymore before switching G1.
+	     Relaxed MO is fine for the __g1_start load because we need to
+	     merely be able to observe this fact and not have to observe
+	     something else as well.
+	     ??? Would it help to spin for a little while to see whether the
+	     current G1 gets closed?  This might be worthwhile if the group is
+	     small or close to being closed.  */
+	  unsigned int s = atomic_load_relaxed (cond->__data.__g_signals + g);
+	  while (__condvar_load_g1_start_relaxed (cond) == g1_start)
+	    {
+	      /* Try to add a signal.  We don't need to acquire the lock
+		 because at worst we can cause a spurious wake-up.  If the
+		 group is in the process of being closed (LSB is true), this
+		 has an effect similar to us adding a signal.  */
+	      if (((s & 1) != 0)
+		  || atomic_compare_exchange_weak_relaxed
+		       (cond->__data.__g_signals + g, &s, s + 2))
+		{
+		  /* If we added a signal, we also need to add a wake-up on
+		     the futex.  We also need to do that if we skipped adding
+		     a signal because the group is being closed because
+		     while __condvar_quiesce_and_switch_g1 could have closed
+		     the group, it might stil be waiting for futex waiters to
+		     leave (and one of those waiters might be the one we stole
+		     the signal from, which cause it to block using the
+		     futex).  */
+		  futex_wake (cond->__data.__g_signals + g, 1, private);
+		  break;
+		}
+	      /* TODO Back off.  */
+	    }
+	}
+    }
+
+ done:
+
+  /* Confirm that we have been woken.  We do that before acquiring the mutex
+     to allow for execution of pthread_cond_destroy while having acquired the
+     mutex.  */
+  __condvar_confirm_wakeup (cond, private);
+
+  /* Woken up; now re-acquire the mutex.  If this doesn't fail, return RESULT,
+     which is set to ETIMEDOUT if a timeout occured, or zero otherwise.  */
+  err = __pthread_mutex_cond_lock (mutex);
+  /* XXX Abort on errors that are disallowed by POSIX?  */
+  return (err != 0) ? err : result;
+}
+
 
 /* See __pthread_cond_wait_common.  */
 int
@@ -667,6 +941,17 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
   return __pthread_cond_wait_common (cond, mutex, abstime);
 }
 
+int
+__pthread_cond_timedwait_t64 (pthread_cond_t *cond, pthread_mutex_t *mutex,
+    const struct __timespec64 *abstime)
+{
+  /* Check parameter validity.  This should also tell the compiler that
+     it can assume that abstime is not NULL.  */
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+    return EINVAL;
+  return __pthread_cond_wait_common_t64 (cond, mutex, abstime);
+}
+
 versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
 		  GLIBC_2_3_2);
 versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 66efd39..fff3800 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -32,6 +32,10 @@
 #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
 #endif
 
+#ifndef lll_timedlock_elision_t64
+#define lll_timedlock_elision_t64(a,dummy,b,c) lll_timedlock_t64(a, b, c)
+#endif
+
 #ifndef lll_trylock_elision
 #define lll_trylock_elision(a,t) lll_trylock(a)
 #endif
@@ -638,3 +642,635 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,
   return result;
 }
 weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
+
+/* 64-bit time version */
+
+int
+pthread_mutex_timedlock_t64 (pthread_mutex_t *mutex,
+			 const struct __timespec64 *abstime)
+{
+/* Only compile this function if kernel provides clock_gettime64 */
+#ifdef __NR_clock_gettime64
+  int oldval;
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+  int result = 0;
+#endif
+  struct timespec abstime32;
+
+  LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
+
+/* Only compile this function if kernel provides clock_gettime64 */
+#ifdef __NR_clock_gettime64
+  if (__y2038_kernel_support())
+    {
+
+      /* We must not check ABSTIME here.  If the thread does not block
+         abstime must not be checked for a valid value.  */
+
+      switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
+                    PTHREAD_MUTEX_TIMED_NP))
+        {
+          /* Recursive mutex.  */
+        case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
+        case PTHREAD_MUTEX_RECURSIVE_NP:
+          /* Check whether we already hold the mutex.  */
+          if (mutex->__data.__owner == id)
+        {
+          /* Just bump the counter.  */
+          if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
+            /* Overflow of the counter.  */
+            return EAGAIN;
+
+          ++mutex->__data.__count;
+
+          goto out;
+        }
+
+          /* We have to get the mutex.  */
+          result = lll_timedlock_t64 (mutex->__data.__lock, abstime,
+                          PTHREAD_MUTEX_PSHARED (mutex));
+
+          if (result != 0)
+        goto out;
+
+          /* Only locked once so far.  */
+          mutex->__data.__count = 1;
+          break;
+
+          /* Error checking mutex.  */
+        case PTHREAD_MUTEX_ERRORCHECK_NP:
+          /* Check whether we already hold the mutex.  */
+          if (__glibc_unlikely (mutex->__data.__owner == id))
+        return EDEADLK;
+
+          /* Don't do lock elision on an error checking mutex.  */
+          goto simple;
+
+        case PTHREAD_MUTEX_TIMED_NP:
+          FORCE_ELISION (mutex, goto elision);
+        simple:
+          /* Normal mutex.  */
+          result = lll_timedlock_t64 (mutex->__data.__lock, abstime,
+                          PTHREAD_MUTEX_PSHARED (mutex));
+          break;
+
+        case PTHREAD_MUTEX_TIMED_ELISION_NP:
+        elision: __attribute__((unused))
+          /* Don't record ownership */
+          return lll_timedlock_elision_t64 (mutex->__data.__lock,
+                        mutex->__data.__spins,
+                        abstime,
+                        PTHREAD_MUTEX_PSHARED (mutex));
+
+
+        case PTHREAD_MUTEX_ADAPTIVE_NP:
+          if (! __is_smp)
+        goto simple;
+
+          if (lll_trylock (mutex->__data.__lock) != 0)
+        {
+          int cnt = 0;
+          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
+                     mutex->__data.__spins * 2 + 10);
+          do
+            {
+              if (cnt++ >= max_cnt)
+            {
+              result = lll_timedlock_t64 (mutex->__data.__lock, abstime,
+                              PTHREAD_MUTEX_PSHARED (mutex));
+              break;
+            }
+              atomic_spin_nop ();
+            }
+          while (lll_trylock (mutex->__data.__lock) != 0);
+
+          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
+        }
+          break;
+
+        case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+        case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+        case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+        case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                 &mutex->__data.__list.__next);
+          /* We need to set op_pending before starting the operation.  Also
+         see comments at ENQUEUE_MUTEX.  */
+          __asm ("" ::: "memory");
+
+          oldval = mutex->__data.__lock;
+          /* This is set to FUTEX_WAITERS iff we might have shared the
+         FUTEX_WAITERS flag with other threads, and therefore need to keep it
+         set to avoid lost wake-ups.  We have the same requirement in the
+         simple mutex algorithm.  */
+          unsigned int assume_other_futex_waiters = 0;
+          while (1)
+        {
+          /* Try to acquire the lock through a CAS from 0 (not acquired) to
+             our TID | assume_other_futex_waiters.  */
+          if (__glibc_likely (oldval == 0))
+            {
+              oldval
+                = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                    id | assume_other_futex_waiters, 0);
+              if (__glibc_likely (oldval == 0))
+            break;
+            }
+
+          if ((oldval & FUTEX_OWNER_DIED) != 0)
+            {
+              /* The previous owner died.  Try locking the mutex.  */
+              int newval = id | (oldval & FUTEX_WAITERS)
+              | assume_other_futex_waiters;
+
+              newval
+            = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                   newval, oldval);
+              if (newval != oldval)
+            {
+              oldval = newval;
+              continue;
+            }
+
+              /* We got the mutex.  */
+              mutex->__data.__count = 1;
+              /* But it is inconsistent unless marked otherwise.  */
+              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+              /* We must not enqueue the mutex before we have acquired it.
+             Also see comments at ENQUEUE_MUTEX.  */
+              __asm ("" ::: "memory");
+              ENQUEUE_MUTEX (mutex);
+              /* We need to clear op_pending after we enqueue the mutex.  */
+              __asm ("" ::: "memory");
+              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+              /* Note that we deliberately exit here.  If we fall
+             through to the end of the function __nusers would be
+             incremented which is not correct because the old
+             owner has to be discounted.  */
+              return EOWNERDEAD;
+            }
+
+          /* Check whether we already hold the mutex.  */
+          if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
+            {
+              int kind = PTHREAD_MUTEX_TYPE (mutex);
+              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+            {
+              /* We do not need to ensure ordering wrt another memory
+                 access.  Also see comments at ENQUEUE_MUTEX. */
+              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                     NULL);
+              return EDEADLK;
+            }
+
+              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+            {
+              /* We do not need to ensure ordering wrt another memory
+                 access.  */
+              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                     NULL);
+
+              /* Just bump the counter.  */
+              if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
+                /* Overflow of the counter.  */
+                return EAGAIN;
+
+              ++mutex->__data.__count;
+
+              LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
+
+              return 0;
+            }
+            }
+
+          /* We are about to block; check whether the timeout is invalid.  */
+          if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+            return EINVAL;
+          /* Work around the fact that the kernel rejects negative timeout
+             values despite them being valid.  */
+          if (__glibc_unlikely (abstime->tv_sec < 0))
+            return ETIMEDOUT;
+    #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
+         || !defined lll_futex_timed_wait_bitset)
+          struct timeval tv;
+          struct timespec rt;
+
+          /* Get the current time.  */
+          (void) __gettimeofday (&tv, NULL);
+
+          /* Compute relative timeout.  */
+          rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+          rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+          if (rt.tv_nsec < 0)
+            {
+              rt.tv_nsec += 1000000000;
+              --rt.tv_sec;
+            }
+
+          /* Already timed out?  */
+          if (rt.tv_sec < 0)
+            return ETIMEDOUT;
+    #endif
+
+          /* We cannot acquire the mutex nor has its owner died.  Thus, try
+             to block using futexes.  Set FUTEX_WAITERS if necessary so that
+             other threads are aware that there are potentially threads
+             blocked on the futex.  Restart if oldval changed in the
+             meantime.  */
+          if ((oldval & FUTEX_WAITERS) == 0)
+            {
+              if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
+                                oldval | FUTEX_WAITERS,
+                                oldval)
+              != 0)
+            {
+              oldval = mutex->__data.__lock;
+              continue;
+            }
+              oldval |= FUTEX_WAITERS;
+            }
+
+          /* It is now possible that we share the FUTEX_WAITERS flag with
+             another thread; therefore, update assume_other_futex_waiters so
+             that we do not forget about this when handling other cases
+             above and thus do not cause lost wake-ups.  */
+          assume_other_futex_waiters |= FUTEX_WAITERS;
+
+          /* Block using the futex.  */
+    #if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
+         || !defined lll_futex_timed_wait_bitset)
+          lll_futex_timed wait_64 (&mutex->__data.__lock, oldval,
+                    &rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+    #else
+          int err = lll_futex_timed_wait_bitset_t64 (&mutex->__data.__lock,
+              oldval, abstime, FUTEX_CLOCK_REALTIME,
+              PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+          /* The futex call timed out.  */
+          if (err == -ETIMEDOUT)
+            return -err;
+    #endif
+          /* Reload current lock value.  */
+          oldval = mutex->__data.__lock;
+        }
+
+          /* We have acquired the mutex; check if it is still consistent.  */
+          if (__builtin_expect (mutex->__data.__owner
+                    == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+        {
+          /* This mutex is now not recoverable.  */
+          mutex->__data.__count = 0;
+          int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
+          lll_unlock (mutex->__data.__lock, private);
+          /* FIXME This violates the mutex destruction requirements.  See
+             __pthread_mutex_unlock_full.  */
+          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+          return ENOTRECOVERABLE;
+        }
+
+          mutex->__data.__count = 1;
+          /* We must not enqueue the mutex before we have acquired it.
+         Also see comments at ENQUEUE_MUTEX.  */
+          __asm ("" ::: "memory");
+          ENQUEUE_MUTEX (mutex);
+          /* We need to clear op_pending after we enqueue the mutex.  */
+          __asm ("" ::: "memory");
+          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+          break;
+
+        /* The PI support requires the Linux futex system call.  If that's not
+           available, pthread_mutex_init should never have allowed the type to
+           be set.  So it will get the default case for an invalid type.  */
+    #ifdef __NR_futex
+        case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+        case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+        case PTHREAD_MUTEX_PI_NORMAL_NP:
+        case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+        case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+        case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+        case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+        case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+          {
+        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+        int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+        if (robust)
+          {
+            /* Note: robust PI futexes are signaled by setting bit 0.  */
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                   (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                         | 1));
+            /* We need to set op_pending before starting the operation.  Also
+               see comments at ENQUEUE_MUTEX.  */
+            __asm ("" ::: "memory");
+          }
+
+        oldval = mutex->__data.__lock;
+
+        /* Check whether we already hold the mutex.  */
+        if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
+          {
+            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+              {
+            /* We do not need to ensure ordering wrt another memory
+               access.  */
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+            return EDEADLK;
+              }
+
+            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+              {
+            /* We do not need to ensure ordering wrt another memory
+               access.  */
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+            /* Just bump the counter.  */
+            if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
+              /* Overflow of the counter.  */
+              return EAGAIN;
+
+            ++mutex->__data.__count;
+
+            LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
+
+            return 0;
+              }
+          }
+
+        oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                  id, 0);
+
+        if (oldval != 0)
+          {
+            /* The mutex is locked.  The kernel will now take care of
+               everything.  The timeout value must be a relative value.
+               Convert it.  */
+            int private = (robust
+                   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                   : PTHREAD_MUTEX_PSHARED (mutex));
+            INTERNAL_SYSCALL_DECL (__err);
+
+            int e;
+            
+            if (abstime->tv_sec > INT_MAX)
+            {
+              e = EOVERFLOW;
+            }
+            else
+            {
+              struct timespec ts;
+              ts.tv_sec = abstime->tv_sec;
+              ts.tv_nsec = abstime->tv_nsec;
+              e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                          __lll_private_flag (FUTEX_LOCK_PI,
+                                  private), 1,
+                          &ts);
+            }
+            if (INTERNAL_SYSCALL_ERROR_P (e, __err))
+              {
+            if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
+              return ETIMEDOUT;
+
+            if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+                || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
+              {
+                assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+                    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+                    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+                /* ESRCH can happen only for non-robust PI mutexes where
+                   the owner of the lock died.  */
+                assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
+                    || !robust);
+
+                /* Delay the thread until the timeout is reached.
+                   Then return ETIMEDOUT.  */
+                struct timespec reltime;
+                struct __timespec64 now;
+
+                INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
+                          &now);
+                reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+                reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
+                if (reltime.tv_nsec < 0)
+                  {
+                reltime.tv_nsec += 1000000000;
+                --reltime.tv_sec;
+                  }
+                if (reltime.tv_sec >= 0)
+                  while (nanosleep_not_cancel (&reltime, &reltime) != 0)
+                continue;
+
+                return ETIMEDOUT;
+              }
+
+            return INTERNAL_SYSCALL_ERRNO (e, __err);
+              }
+
+            oldval = mutex->__data.__lock;
+
+            assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+          }
+
+        if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
+          {
+            atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+            /* We got the mutex.  */
+            mutex->__data.__count = 1;
+            /* But it is inconsistent unless marked otherwise.  */
+            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+            /* We must not enqueue the mutex before we have acquired it.
+               Also see comments at ENQUEUE_MUTEX.  */
+            __asm ("" ::: "memory");
+            ENQUEUE_MUTEX_PI (mutex);
+            /* We need to clear op_pending after we enqueue the mutex.  */
+            __asm ("" ::: "memory");
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+            /* Note that we deliberately exit here.  If we fall
+               through to the end of the function __nusers would be
+               incremented which is not correct because the old owner
+               has to be discounted.  */
+            return EOWNERDEAD;
+          }
+
+        if (robust
+            && __builtin_expect (mutex->__data.__owner
+                     == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+          {
+            /* This mutex is now not recoverable.  */
+            mutex->__data.__count = 0;
+
+            INTERNAL_SYSCALL_DECL (__err);
+            INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                      __lll_private_flag (FUTEX_UNLOCK_PI,
+                              PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+                      0, 0);
+
+            /* To the kernel, this will be visible after the kernel has
+               acquired the mutex in the syscall.  */
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+            return ENOTRECOVERABLE;
+          }
+
+        mutex->__data.__count = 1;
+        if (robust)
+          {
+            /* We must not enqueue the mutex before we have acquired it.
+               Also see comments at ENQUEUE_MUTEX.  */
+            __asm ("" ::: "memory");
+            ENQUEUE_MUTEX_PI (mutex);
+            /* We need to clear op_pending after we enqueue the mutex.  */
+            __asm ("" ::: "memory");
+            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+          }
+        }
+          break;
+    #endif  /* __NR_futex.  */
+
+        case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+        case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+        case PTHREAD_MUTEX_PP_NORMAL_NP:
+        case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+          {
+        int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+        oldval = mutex->__data.__lock;
+
+        /* Check whether we already hold the mutex.  */
+        if (mutex->__data.__owner == id)
+          {
+            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+              return EDEADLK;
+
+            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+              {
+            /* Just bump the counter.  */
+            if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
+              /* Overflow of the counter.  */
+              return EAGAIN;
+
+            ++mutex->__data.__count;
+
+            LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
+
+            return 0;
+              }
+          }
+
+        int oldprio = -1, ceilval;
+        do
+          {
+            int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+            if (__pthread_current_priority () > ceiling)
+              {
+            result = EINVAL;
+              failpp:
+            if (oldprio != -1)
+              __pthread_tpp_change_priority (oldprio, -1);
+            return result;
+              }
+
+            result = __pthread_tpp_change_priority (oldprio, ceiling);
+            if (result)
+              return result;
+
+            ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+            oldprio = ceiling;
+
+            oldval
+              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                 ceilval | 1, ceilval);
+
+            if (oldval == ceilval)
+              break;
+
+            do
+              {
+            oldval
+              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                 ceilval | 2,
+                                 ceilval | 1);
+
+            if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+              break;
+
+            if (oldval != ceilval)
+              {
+                /* Reject invalid timeouts.  */
+                if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+                  {
+                result = EINVAL;
+                goto failpp;
+                  }
+
+                struct timeval tv;
+                struct timespec rt;
+
+                /* Get the current time.  */
+                (void) __gettimeofday (&tv, NULL);
+
+                /* Compute relative timeout.  */
+                rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+                rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+                if (rt.tv_nsec < 0)
+                  {
+                rt.tv_nsec += 1000000000;
+                --rt.tv_sec;
+                  }
+
+                /* Already timed out?  */
+                if (rt.tv_sec < 0)
+                  {
+                result = ETIMEDOUT;
+                goto failpp;
+                  }
+
+                lll_futex_timed_wait (&mutex->__data.__lock,
+                          ceilval | 2, &rt,
+                          PTHREAD_MUTEX_PSHARED (mutex));
+              }
+              }
+            while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                ceilval | 2, ceilval)
+               != ceilval);
+          }
+        while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+        assert (mutex->__data.__owner == 0);
+        mutex->__data.__count = 1;
+          }
+          break;
+
+        default:
+          /* Correct code cannot set any other type.  */
+          return EINVAL;
+        }
+
+      if (result == 0)
+        {
+          /* Record the ownership.  */
+          mutex->__data.__owner = id;
+          ++mutex->__data.__nusers;
+
+          LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
+        }
+     out:
+      return result;
+    }
+#endif
+
+  if (abstime->tv_sec > INT_MAX || abstime->tv_sec < INT_MIN)
+    {
+      return EOVERFLOW;
+    }
+
+  abstime32.tv_sec = (time_t) (abstime->tv_sec);
+  abstime32.tv_nsec = abstime->tv_nsec;
+
+  return pthread_mutex_timedlock (mutex, &abstime32);
+}
diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c
index a290d08..3016dca 100644
--- a/nptl/pthread_rwlock_common.c
+++ b/nptl/pthread_rwlock_common.c
@@ -507,6 +507,240 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,
   return 0;
 }
 
+/* 64-bit time version */
+
+static __always_inline int
+__pthread_rwlock_rdlock_full_t64 (pthread_rwlock_t *rwlock,
+    const struct __timespec64 *abstime)
+{
+  unsigned int r;
+
+  /* Make sure we are not holding the rwlock as a writer.  This is a deadlock
+     situation we recognize and report.  */
+  if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)
+      == THREAD_GETMEM (THREAD_SELF, tid)))
+    return EDEADLK;
+
+  /* If we prefer writers, recursive rdlock is disallowed, we are in a read
+     phase, and there are other readers present, we try to wait without
+     extending the read phase.  We will be unblocked by either one of the
+     other active readers, or if the writer gives up WRLOCKED (e.g., on
+     timeout).
+     If there are no other readers, we simply race with any existing primary
+     writer; it would have been a race anyway, and changing the odds slightly
+     will likely not make a big difference.  */
+  if (rwlock->__data.__flags == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
+    {
+      r = atomic_load_relaxed (&rwlock->__data.__readers);
+      while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)
+	      && ((r & PTHREAD_RWLOCK_WRLOCKED) != 0)
+	      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) > 0))
+	{
+	  /* TODO Spin first.  */
+	  /* Try setting the flag signaling that we are waiting without having
+	     incremented the number of readers.  Relaxed MO is fine because
+	     this is just about waiting for a state change in __readers.  */
+	  if (atomic_compare_exchange_weak_relaxed
+	      (&rwlock->__data.__readers, &r, r | PTHREAD_RWLOCK_RWAITING))
+	    {
+	      /* Wait for as long as the flag is set.  An ABA situation is
+		 harmless because the flag is just about the state of
+		 __readers, and all threads set the flag under the same
+		 conditions.  */
+	      while ((atomic_load_relaxed (&rwlock->__data.__readers)
+		  & PTHREAD_RWLOCK_RWAITING) != 0)
+		{
+		  int private = __pthread_rwlock_get_private (rwlock);
+		  int err = futex_abstimed_wait_t64 (&rwlock->__data.__readers,
+		      r, abstime, private);
+		  /* We ignore EAGAIN and EINTR.  On time-outs, we can just
+		     return because we don't need to clean up anything.  */
+		  if (err == ETIMEDOUT)
+		    return err;
+		}
+	      /* It makes sense to not break out of the outer loop here
+		 because we might be in the same situation again.  */
+	    }
+	  else
+	    {
+	      /* TODO Back-off.  */
+	    }
+	}
+    }
+  /* Register as a reader, using an add-and-fetch so that R can be used as
+     expected value for future operations.  Acquire MO so we synchronize with
+     prior writers as well as the last reader of the previous read phase (see
+     below).  */
+  r = atomic_fetch_add_acquire (&rwlock->__data.__readers,
+      (1 << PTHREAD_RWLOCK_READER_SHIFT)) + (1 << PTHREAD_RWLOCK_READER_SHIFT);
+
+  /* Check whether there is an overflow in the number of readers.  We assume
+     that the total number of threads is less than half the maximum number
+     of readers that we have bits for in __readers (i.e., with 32-bit int and
+     PTHREAD_RWLOCK_READER_SHIFT of 3, we assume there are less than
+     1 << (32-3-1) concurrent threads).
+     If there is an overflow, we use a CAS to try to decrement the number of
+     readers if there still is an overflow situation.  If so, we return
+     EAGAIN; if not, we are not a thread causing an overflow situation, and so
+     we just continue.  Using a fetch-add instead of the CAS isn't possible
+     because other readers might release the lock concurrently, which could
+     make us the last reader and thus responsible for handing ownership over
+     to writers (which requires a CAS too to make the decrement and ownership
+     transfer indivisible).  */
+  while (__glibc_unlikely (r >= PTHREAD_RWLOCK_READER_OVERFLOW))
+    {
+      /* Relaxed MO is okay because we just want to undo our registration and
+	 cannot have changed the rwlock state substantially if the CAS
+	 succeeds.  */
+      if (atomic_compare_exchange_weak_relaxed (&rwlock->__data.__readers, &r,
+	  r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
+	return EAGAIN;
+    }
+
+  /* We have registered as a reader, so if we are in a read phase, we have
+     acquired a read lock.  This is also the reader--reader fast-path.
+     Even if there is a primary writer, we just return.  If writers are to
+     be preferred and we are the only active reader, we could try to enter a
+     write phase to let the writer proceed.  This would be okay because we
+     cannot have acquired the lock previously as a reader (which could result
+     in deadlock if we would wait for the primary writer to run).  However,
+     this seems to be a corner case and handling it specially not be worth the
+     complexity.  */
+  if (__glibc_likely ((r & PTHREAD_RWLOCK_WRPHASE) == 0))
+    return 0;
+
+  /* If there is no primary writer but we are in a write phase, we can try
+     to install a read phase ourself.  */
+  while (((r & PTHREAD_RWLOCK_WRPHASE) != 0)
+      && ((r & PTHREAD_RWLOCK_WRLOCKED) == 0))
+    {
+       /* Try to enter a read phase: If the CAS below succeeds, we have
+	 ownership; if it fails, we will simply retry and reassess the
+	 situation.
+	 Acquire MO so we synchronize with prior writers.  */
+      if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers, &r,
+	  r ^ PTHREAD_RWLOCK_WRPHASE))
+	{
+	  /* We started the read phase, so we are also responsible for
+	     updating the write-phase futex.  Relaxed MO is sufficient.
+	     Note that there can be no other reader that we have to wake
+	     because all other readers will see the read phase started by us
+	     (or they will try to start it themselves); if a writer started
+	     the read phase, we cannot have started it.  Furthermore, we
+	     cannot discard a PTHREAD_RWLOCK_FUTEX_USED flag because we will
+	     overwrite the value set by the most recent writer (or the readers
+	     before it in case of explicit hand-over) and we know that there
+	     are no waiting readers.  */
+	  atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 0);
+	  return 0;
+	}
+      else
+	{
+	  /* TODO Back off before retrying.  Also see above.  */
+	}
+    }
+
+  if ((r & PTHREAD_RWLOCK_WRPHASE) != 0)
+    {
+      /* We are in a write phase, and there must be a primary writer because
+	 of the previous loop.  Block until the primary writer gives up the
+	 write phase.  This case requires explicit hand-over using
+	 __wrphase_futex.
+	 However, __wrphase_futex might not have been set to 1 yet (either
+	 because explicit hand-over to the writer is still ongoing, or because
+	 the writer has started the write phase but does not yet have updated
+	 __wrphase_futex).  The least recent value of __wrphase_futex we can
+	 read from here is the modification of the last read phase (because
+	 we synchronize with the last reader in this read phase through
+	 __readers; see the use of acquire MO on the fetch_add above).
+	 Therefore, if we observe a value of 0 for __wrphase_futex, we need
+	 to subsequently check that __readers now indicates a read phase; we
+	 need to use acquire MO for this so that if we observe a read phase,
+	 we will also see the modification of __wrphase_futex by the previous
+	 writer.  We then need to load __wrphase_futex again and continue to
+	 wait if it is not 0, so that we do not skip explicit hand-over.
+	 Relaxed MO is sufficient for the load from __wrphase_futex because
+	 we just use it as an indicator for when we can proceed; we use
+	 __readers and the acquire MO accesses to it to eventually read from
+	 the proper stores to __wrphase_futex.  */
+      unsigned int wpf;
+      bool ready = false;
+      for (;;)
+	{
+	  while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))
+	      | PTHREAD_RWLOCK_FUTEX_USED) == (1 | PTHREAD_RWLOCK_FUTEX_USED))
+	    {
+	      int private = __pthread_rwlock_get_private (rwlock);
+	      if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)
+		  && !atomic_compare_exchange_weak_relaxed
+		      (&rwlock->__data.__wrphase_futex,
+		       &wpf, wpf | PTHREAD_RWLOCK_FUTEX_USED))
+		continue;
+	      int err = futex_abstimed_wait_t64 (&rwlock->__data.__wrphase_futex,
+		  1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
+	      if (err == ETIMEDOUT)
+		{
+		  /* If we timed out, we need to unregister.  If no read phase
+		     has been installed while we waited, we can just decrement
+		     the number of readers.  Otherwise, we just acquire the
+		     lock, which is allowed because we give no precise timing
+		     guarantees, and because the timeout is only required to
+		     be in effect if we would have had to wait for other
+		     threads (e.g., if futex_wait would time-out immediately
+		     because the given absolute time is in the past).  */
+		  r = atomic_load_relaxed (&rwlock->__data.__readers);
+		  while ((r & PTHREAD_RWLOCK_WRPHASE) != 0)
+		    {
+		      /* We don't need to make anything else visible to
+			 others besides unregistering, so relaxed MO is
+			 sufficient.  */
+		      if (atomic_compare_exchange_weak_relaxed
+			  (&rwlock->__data.__readers, &r,
+			   r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
+			return ETIMEDOUT;
+		      /* TODO Back-off.  */
+		    }
+		  /* Use the acquire MO fence to mirror the steps taken in the
+		     non-timeout case.  Note that the read can happen both
+		     in the atomic_load above as well as in the failure case
+		     of the CAS operation.  */
+		  atomic_thread_fence_acquire ();
+		  /* We still need to wait for explicit hand-over, but we must
+		     not use futex_wait anymore because we would just time out
+		     in this case and thus make the spin-waiting we need
+		     unnecessarily expensive.  */
+		  while ((atomic_load_relaxed (&rwlock->__data.__wrphase_futex)
+		      | PTHREAD_RWLOCK_FUTEX_USED)
+		      == (1 | PTHREAD_RWLOCK_FUTEX_USED))
+		    {
+		      /* TODO Back-off?  */
+		    }
+		  ready = true;
+		  break;
+		}
+	      /* If we got interrupted (EINTR) or the futex word does not have the
+		 expected value (EAGAIN), retry.  */
+	    }
+	  if (ready)
+	    /* See below.  */
+	    break;
+	  /* We need acquire MO here so that we synchronize with the lock
+	     release of the writer, and so that we observe a recent value of
+	     __wrphase_futex (see below).  */
+	  if ((atomic_load_acquire (&rwlock->__data.__readers)
+	      & PTHREAD_RWLOCK_WRPHASE) == 0)
+	    /* We are in a read phase now, so the least recent modification of
+	       __wrphase_futex we can read from is the store by the writer
+	       with value 1.  Thus, only now we can assume that if we observe
+	       a value of 0, explicit hand-over is finished. Retry the loop
+	       above one more time.  */
+	    ready = true;
+	}
+    }
+
+  return 0;
+}
+
 
 static __always_inline void
 __pthread_rwlock_wrunlock (pthread_rwlock_t *rwlock)
@@ -924,3 +1158,360 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,
       THREAD_GETMEM (THREAD_SELF, tid));
   return 0;
 }
+
+/* 64-bit time version */
+
+static __always_inline int
+__pthread_rwlock_wrlock_full_t64 (pthread_rwlock_t *rwlock,
+    const struct __timespec64 *abstime)
+{
+  /* Make sure we are not holding the rwlock as a writer.  This is a deadlock
+     situation we recognize and report.  */
+  if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)
+      == THREAD_GETMEM (THREAD_SELF, tid)))
+    return EDEADLK;
+
+  /* First we try to acquire the role of primary writer by setting WRLOCKED;
+     if it was set before, there already is a primary writer.  Acquire MO so
+     that we synchronize with previous primary writers.
+
+     We do not try to change to a write phase right away using a fetch_or
+     because we would have to reset it again and wake readers if there are
+     readers present (some readers could try to acquire the lock more than
+     once, so setting a write phase in the middle of this could cause
+     deadlock).  Changing to a write phase eagerly would only speed up the
+     transition from a read phase to a write phase in the uncontended case,
+     but it would slow down the contended case if readers are preferred (which
+     is the default).
+     We could try to CAS from a state with no readers to a write phase, but
+     this could be less scalable if readers arrive and leave frequently.  */
+  bool may_share_futex_used_flag = false;
+  unsigned int r = atomic_fetch_or_acquire (&rwlock->__data.__readers,
+      PTHREAD_RWLOCK_WRLOCKED);
+  if (__glibc_unlikely ((r & PTHREAD_RWLOCK_WRLOCKED) != 0))
+    {
+      /* There is another primary writer.  */
+      bool prefer_writer =
+	  (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP);
+      if (prefer_writer)
+	{
+	  /* We register as a waiting writer, so that we can make use of
+	     writer--writer hand-over.  Relaxed MO is fine because we just
+	     want to register.  We assume that the maximum number of threads
+	     is less than the capacity in __writers.  */
+	  atomic_fetch_add_relaxed (&rwlock->__data.__writers, 1);
+	}
+      for (;;)
+	{
+	  /* TODO Spin until WRLOCKED is 0 before trying the CAS below.
+	     But pay attention to not delay trying writer--writer hand-over
+	     for too long (which we must try eventually anyway).  */
+	  if ((r & PTHREAD_RWLOCK_WRLOCKED) == 0)
+	    {
+	      /* Try to become the primary writer or retry.  Acquire MO as in
+		 the fetch_or above.  */
+	      if (atomic_compare_exchange_weak_acquire
+		  (&rwlock->__data.__readers, &r,
+		      r | PTHREAD_RWLOCK_WRLOCKED))
+		{
+		  if (prefer_writer)
+		    {
+		      /* Unregister as a waiting writer.  Note that because we
+			 acquired WRLOCKED, WRHANDOVER will not be set.
+			 Acquire MO on the CAS above ensures that
+			 unregistering happens after the previous writer;
+			 this sorts the accesses to __writers by all
+			 primary writers in a useful way (e.g., any other
+			 primary writer acquiring after us or getting it from
+			 us through WRHANDOVER will see both our changes to
+			 __writers).
+			 ??? Perhaps this is not strictly necessary for
+			 reasons we do not yet know of.  */
+		      atomic_fetch_add_relaxed (&rwlock->__data.__writers,
+			  -1);
+		    }
+		  break;
+		}
+	      /* Retry if the CAS fails (r will have been updated).  */
+	      continue;
+	    }
+	  /* If writer--writer hand-over is available, try to become the
+	     primary writer this way by grabbing the WRHANDOVER token.  If we
+	     succeed, we own WRLOCKED.  */
+	  if (prefer_writer)
+	    {
+	      unsigned int w = atomic_load_relaxed
+		  (&rwlock->__data.__writers);
+	      if ((w & PTHREAD_RWLOCK_WRHANDOVER) != 0)
+		{
+		  /* Acquire MO is required here so that we synchronize with
+		     the writer that handed over WRLOCKED.  We also need this
+		     for the reload of __readers below because our view of
+		     __readers must be at least as recent as the view of the
+		     writer that handed over WRLOCKED; we must avoid an ABA
+		     through WRHANDOVER, which could, for example, lead to us
+		     assuming we are still in a write phase when in fact we
+		     are not.  */
+		  if (atomic_compare_exchange_weak_acquire
+		      (&rwlock->__data.__writers,
+		       &w, (w - PTHREAD_RWLOCK_WRHANDOVER - 1)))
+		    {
+		      /* Reload so our view is consistent with the view of
+			 the previous owner of WRLOCKED.  See above.  */
+		      r = atomic_load_relaxed (&rwlock->__data.__readers);
+		      break;
+		    }
+		  /* We do not need to reload __readers here.  We should try
+		     to perform writer--writer hand-over if possible; if it
+		     is not possible anymore, we will reload __readers
+		     elsewhere in this loop.  */
+		  continue;
+		}
+	    }
+	  /* We did not acquire WRLOCKED nor were able to use writer--writer
+	     hand-over, so we block on __writers_futex.  */
+	  int private = __pthread_rwlock_get_private (rwlock);
+	  unsigned int wf = atomic_load_relaxed
+	      (&rwlock->__data.__writers_futex);
+	  if (((wf & ~(unsigned int) PTHREAD_RWLOCK_FUTEX_USED) != 1)
+	      || ((wf != (1 | PTHREAD_RWLOCK_FUTEX_USED))
+		  && !atomic_compare_exchange_weak_relaxed
+		      (&rwlock->__data.__writers_futex, &wf,
+		       1 | PTHREAD_RWLOCK_FUTEX_USED)))
+	    {
+	      /* If we cannot block on __writers_futex because there is no
+		 primary writer, or we cannot set PTHREAD_RWLOCK_FUTEX_USED,
+		 we retry.  We must reload __readers here in case we cannot
+		 block on __writers_futex so that we can become the primary
+		 writer and are not stuck in a loop that just continuously
+		 fails to block on __writers_futex.  */
+	      r = atomic_load_relaxed (&rwlock->__data.__readers);
+	      continue;
+	    }
+	  /* We set the flag that signals that the futex is used, or we could
+	     have set it if we had been faster than other waiters.  As a
+	     result, we may share the flag with an unknown number of other
+	     writers.  Therefore, we must keep this flag set when we acquire
+	     the lock.  We do not need to do this when we do not reach this
+	     point here because then we are not part of the group that may
+	     share the flag, and another writer will wake one of the writers
+	     in this group.  */
+	  may_share_futex_used_flag = true;
+	  int err = futex_abstimed_wait_t64 (&rwlock->__data.__writers_futex,
+	      1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
+	  if (err == ETIMEDOUT)
+	    {
+	      if (prefer_writer)
+		{
+		  /* We need to unregister as a waiting writer.  If we are the
+		     last writer and writer--writer hand-over is available,
+		     we must make use of it because nobody else will reset
+		     WRLOCKED otherwise.  (If we use it, we simply pretend
+		     that this happened before the timeout; see
+		     pthread_rwlock_rdlock_full for the full reasoning.)
+		     Also see the similar code above.  */
+		  unsigned int w = atomic_load_relaxed
+		      (&rwlock->__data.__writers);
+		  while (!atomic_compare_exchange_weak_acquire
+		      (&rwlock->__data.__writers, &w,
+			  (w == PTHREAD_RWLOCK_WRHANDOVER + 1 ? 0 : w - 1)))
+		    {
+		      /* TODO Back-off.  */
+		    }
+		  if (w == PTHREAD_RWLOCK_WRHANDOVER + 1)
+		    {
+		      /* We must continue as primary writer.  See above.  */
+		      r = atomic_load_relaxed (&rwlock->__data.__readers);
+		      break;
+		    }
+		}
+	      /* We cleaned up and cannot have stolen another waiting writer's
+		 futex wake-up, so just return.  */
+	      return ETIMEDOUT;
+	    }
+	  /* If we got interrupted (EINTR) or the futex word does not have the
+	     expected value (EAGAIN), retry after reloading __readers.  */
+	  r = atomic_load_relaxed (&rwlock->__data.__readers);
+	}
+      /* Our snapshot of __readers is up-to-date at this point because we
+	 either set WRLOCKED using a CAS or were handed over WRLOCKED from
+	 another writer whose snapshot of __readers we inherit.  */
+    }
+
+  /* If we are in a read phase and there are no readers, try to start a write
+     phase.  */
+  while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)
+      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) == 0))
+    {
+      /* Acquire MO so that we synchronize with prior writers and do
+	 not interfere with their updates to __writers_futex, as well
+	 as regarding prior readers and their updates to __wrphase_futex,
+	 respectively.  */
+      if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers,
+	  &r, r | PTHREAD_RWLOCK_WRPHASE))
+	{
+	  /* We have started a write phase, so need to enable readers to wait.
+	     See the similar case in__pthread_rwlock_rdlock_full.  */
+	  atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1);
+	  /* Make sure we fall through to the end of the function.  */
+	  r |= PTHREAD_RWLOCK_WRPHASE;
+	  break;
+	}
+      /* TODO Back-off.  */
+    }
+
+  /* We are the primary writer; enable blocking on __writers_futex.  Relaxed
+     MO is sufficient for futex words; acquire MO on the previous
+     modifications of __readers ensures that this store happens after the
+     store of value 0 by the previous primary writer.  */
+  atomic_store_relaxed (&rwlock->__data.__writers_futex,
+      1 | (may_share_futex_used_flag ? PTHREAD_RWLOCK_FUTEX_USED : 0));
+
+  if (__glibc_unlikely ((r & PTHREAD_RWLOCK_WRPHASE) == 0))
+    {
+      /* We are not in a read phase and there are readers (because of the
+	 previous loop).  Thus, we have to wait for explicit hand-over from
+	 one of these readers.
+	 We basically do the same steps as for the similar case in
+	 __pthread_rwlock_rdlock_full, except that we additionally might try
+	 to directly hand over to another writer and need to wake up
+	 other writers or waiting readers (i.e., PTHREAD_RWLOCK_RWAITING).  */
+      unsigned int wpf;
+      bool ready = false;
+      for (;;)
+	{
+	  while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))
+	      | PTHREAD_RWLOCK_FUTEX_USED) == PTHREAD_RWLOCK_FUTEX_USED)
+	    {
+	      int private = __pthread_rwlock_get_private (rwlock);
+	      if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)
+		  && !atomic_compare_exchange_weak_relaxed
+		      (&rwlock->__data.__wrphase_futex, &wpf,
+		       PTHREAD_RWLOCK_FUTEX_USED))
+		continue;
+	      int err = futex_abstimed_wait_t64 (&rwlock->__data.__wrphase_futex,
+		  PTHREAD_RWLOCK_FUTEX_USED, abstime, private);
+	      if (err == ETIMEDOUT)
+		{
+		  if (rwlock->__data.__flags
+		      != PTHREAD_RWLOCK_PREFER_READER_NP)
+		    {
+		      /* We try writer--writer hand-over.  */
+		      unsigned int w = atomic_load_relaxed
+			  (&rwlock->__data.__writers);
+		      if (w != 0)
+			{
+			  /* We are about to hand over WRLOCKED, so we must
+			     release __writers_futex too; otherwise, we'd have
+			     a pending store, which could at least prevent
+			     other threads from waiting using the futex
+			     because it could interleave with the stores
+			     by subsequent writers.  In turn, this means that
+			     we have to clean up when we do not hand over
+			     WRLOCKED.
+			     Release MO so that another writer that gets
+			     WRLOCKED from us can take over our view of
+			     __readers.  */
+			  unsigned int wf = atomic_exchange_relaxed
+			      (&rwlock->__data.__writers_futex, 0);
+			  while (w != 0)
+			    {
+			      if (atomic_compare_exchange_weak_release
+				  (&rwlock->__data.__writers, &w,
+				      w | PTHREAD_RWLOCK_WRHANDOVER))
+				{
+				  /* Wake other writers.  */
+				  if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
+				    futex_wake
+					(&rwlock->__data.__writers_futex, 1,
+					 private);
+				  return ETIMEDOUT;
+				}
+			      /* TODO Back-off.  */
+			    }
+			  /* We still own WRLOCKED and someone else might set
+			     a write phase concurrently, so enable waiting
+			     again.  Make sure we don't loose the flag that
+			     signals whether there are threads waiting on
+			     this futex.  */
+			  atomic_store_relaxed
+			      (&rwlock->__data.__writers_futex, wf);
+			}
+		    }
+		  /* If we timed out and we are not in a write phase, we can
+		     just stop being a primary writer.  Otherwise, we just
+		     acquire the lock.  */
+		  r = atomic_load_relaxed (&rwlock->__data.__readers);
+		  if ((r & PTHREAD_RWLOCK_WRPHASE) == 0)
+		    {
+		      /* We are about to release WRLOCKED, so we must release
+			 __writers_futex too; see the handling of
+			 writer--writer hand-over above.  */
+		      unsigned int wf = atomic_exchange_relaxed
+			  (&rwlock->__data.__writers_futex, 0);
+		      while ((r & PTHREAD_RWLOCK_WRPHASE) == 0)
+			{
+			  /* While we don't need to make anything from a
+			     caller's critical section visible to other
+			     threads, we need to ensure that our changes to
+			     __writers_futex are properly ordered.
+			     Therefore, use release MO to synchronize with
+			     subsequent primary writers.  Also wake up any
+			     waiting readers as they are waiting because of
+			     us.  */
+			  if (atomic_compare_exchange_weak_release
+			      (&rwlock->__data.__readers, &r,
+			       (r ^ PTHREAD_RWLOCK_WRLOCKED)
+			       & ~(unsigned int) PTHREAD_RWLOCK_RWAITING))
+			    {
+			      /* Wake other writers.  */
+			      if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
+				futex_wake (&rwlock->__data.__writers_futex,
+				    1, private);
+			      /* Wake waiting readers.  */
+			      if ((r & PTHREAD_RWLOCK_RWAITING) != 0)
+				futex_wake (&rwlock->__data.__readers,
+				    INT_MAX, private);
+			      return ETIMEDOUT;
+			    }
+			}
+		      /* We still own WRLOCKED and someone else might set a
+			 write phase concurrently, so enable waiting again.
+			 Make sure we don't loose the flag that signals
+			 whether there are threads waiting on this futex.  */
+		      atomic_store_relaxed (&rwlock->__data.__writers_futex,
+			  wf);
+		    }
+		  /* Use the acquire MO fence to mirror the steps taken in the
+		     non-timeout case.  Note that the read can happen both
+		     in the atomic_load above as well as in the failure case
+		     of the CAS operation.  */
+		  atomic_thread_fence_acquire ();
+		  /* We still need to wait for explicit hand-over, but we must
+		     not use futex_wait anymore.  */
+		  while ((atomic_load_relaxed
+		      (&rwlock->__data.__wrphase_futex)
+		       | PTHREAD_RWLOCK_FUTEX_USED)
+		      == PTHREAD_RWLOCK_FUTEX_USED)
+		    {
+		      /* TODO Back-off.  */
+		    }
+		  ready = true;
+		  break;
+		}
+	      /* If we got interrupted (EINTR) or the futex word does not have
+		 the expected value (EAGAIN), retry.  */
+	    }
+	  /* See pthread_rwlock_rdlock_full.  */
+	  if (ready)
+	    break;
+	  if ((atomic_load_acquire (&rwlock->__data.__readers)
+	      & PTHREAD_RWLOCK_WRPHASE) != 0)
+	    ready = true;
+	}
+    }
+
+  atomic_store_relaxed (&rwlock->__data.__cur_writer,
+      THREAD_GETMEM (THREAD_SELF, tid));
+  return 0;
+}
diff --git a/nptl/pthread_rwlock_timedrdlock.c b/nptl/pthread_rwlock_timedrdlock.c
index 383e41e..ea37902 100644
--- a/nptl/pthread_rwlock_timedrdlock.c
+++ b/nptl/pthread_rwlock_timedrdlock.c
@@ -35,3 +35,22 @@ pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
 
   return __pthread_rwlock_rdlock_full (rwlock, abstime);
 }
+
+/* 64-bit time version */
+
+int
+pthread_rwlock_timedrdlock_t64 (pthread_rwlock_t *rwlock,
+    const struct __timespec64 *abstime)
+{
+  /* Make sure the passed in timeout value is valid.  Note that the previous
+     implementation assumed that this check *must* not be performed if there
+     would in fact be no blocking; however, POSIX only requires that "the
+     validity of the abstime parameter need not be checked if the lock can be
+     immediately acquired" (i.e., we need not but may check it).  */
+  /* ??? Just move this to __pthread_rwlock_rdlock_full?  */
+  if (__glibc_unlikely (abstime->tv_nsec >= 1000000000
+      || abstime->tv_nsec < 0))
+    return EINVAL;
+
+  return __pthread_rwlock_rdlock_full_t64 (rwlock, abstime);
+}
diff --git a/nptl/pthread_rwlock_timedwrlock.c b/nptl/pthread_rwlock_timedwrlock.c
index 7a3d2ec..b6a56c3 100644
--- a/nptl/pthread_rwlock_timedwrlock.c
+++ b/nptl/pthread_rwlock_timedwrlock.c
@@ -35,3 +35,22 @@ pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
 
   return __pthread_rwlock_wrlock_full (rwlock, abstime);
 }
+
+/* 64-bit time version */
+
+int
+pthread_rwlock_timedwrlock_t64 (pthread_rwlock_t *rwlock,
+    const struct __timespec64 *abstime)
+{
+  /* Make sure the passed in timeout value is valid.  Note that the previous
+     implementation assumed that this check *must* not be performed if there
+     would in fact be no blocking; however, POSIX only requires that "the
+     validity of the abstime parameter need not be checked if the lock can be
+     immediately acquired" (i.e., we need not but may check it).  */
+  /* ??? Just move this to __pthread_rwlock_wrlock_full?  */
+  if (__glibc_unlikely (abstime->tv_nsec >= 1000000000
+      || abstime->tv_nsec < 0))
+    return EINVAL;
+
+  return __pthread_rwlock_wrlock_full_t64 (rwlock, abstime);
+}
diff --git a/nptl/sem_timedwait.c b/nptl/sem_timedwait.c
index 8886ea2..f7db9c5 100644
--- a/nptl/sem_timedwait.c
+++ b/nptl/sem_timedwait.c
@@ -38,3 +38,21 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
   else
     return __new_sem_wait_slow((struct new_sem *) sem, abstime);
 }
+
+int
+sem_timedwait_t64 (sem_t *sem, const struct __timespec64 *abstime)
+{
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  /* Check sem_wait.c for a more detailed explanation why it is required.  */
+  __pthread_testcancel ();
+
+  if (__new_sem_wait_fast ((struct new_sem *) sem, 0) == 0)
+    return 0;
+  else
+    return __new_sem_wait_slow_t64 ((struct new_sem *) sem, abstime);
+}
diff --git a/nptl/sem_wait.c b/nptl/sem_wait.c
index e7d9106..88eca16 100644
--- a/nptl/sem_wait.c
+++ b/nptl/sem_wait.c
@@ -43,6 +43,30 @@ __new_sem_wait (sem_t *sem)
 }
 versioned_symbol (libpthread, __new_sem_wait, sem_wait, GLIBC_2_1);
 
+/* 64-bit time version */
+
+int
+__new_sem_wait_t64 (sem_t *sem)
+{
+  /* We need to check whether we need to act upon a cancellation request here
+     because POSIX specifies that cancellation points "shall occur" in
+     sem_wait and sem_timedwait, which also means that they need to check
+     this regardless whether they block or not (unlike "may occur"
+     functions).  See the POSIX Rationale for this requirement: Section
+     "Thread Cancellation Overview" [1] and austin group issue #1076 [2]
+     for thoughs on why this may be a suboptimal design.
+
+     [1] http://pubs.opengroup.org/onlinepubs/9699919799/xrat/V4_xsh_chap02.html
+     [2] http://austingroupbugs.net/view.php?id=1076 for thoughts on why this
+   */
+  __pthread_testcancel ();
+
+  if (__new_sem_wait_fast ((struct new_sem *) sem, 0) == 0)
+    return 0;
+  else
+    return __new_sem_wait_slow_t64 ((struct new_sem *) sem, NULL);
+}
+
 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
 int
 attribute_compat_text_section
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 30984be..d6d1f1d 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -119,6 +119,24 @@ do_futex_wait (struct new_sem *sem, const struct timespec *abstime)
   return err;
 }
 
+static int
+__attribute__ ((noinline))
+do_futex_wait_t64 (struct new_sem *sem, const struct __timespec64 *abstime)
+{
+  int err;
+
+#if __HAVE_64B_ATOMICS
+  err = futex_abstimed_wait_cancelable_t64 (
+      (unsigned int *) &sem->data + SEM_VALUE_OFFSET, 0, abstime,
+      sem->private);
+#else
+  err = futex_abstimed_wait_cancelable_t64 (&sem->value, SEM_NWAITERS_MASK,
+					abstime, sem->private);
+#endif
+
+  return err;
+}
+
 /* Fast path: Try to grab a token without blocking.  */
 static int
 __new_sem_wait_fast (struct new_sem *sem, int definitive_result)
@@ -310,6 +328,160 @@ error:
   return err;
 }
 
+/* 64-bit time version */
+
+static int
+__attribute__ ((noinline))
+__new_sem_wait_slow_t64 (struct new_sem *sem, const struct __timespec64 *abstime)
+{
+  int err = 0;
+
+#if __HAVE_64B_ATOMICS
+  /* Add a waiter.  Relaxed MO is sufficient because we can rely on the
+     ordering provided by the RMW operations we use.  */
+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+      (uint64_t) 1 << SEM_NWAITERS_SHIFT);
+
+  pthread_cleanup_push (__sem_wait_cleanup, sem);
+
+  /* Wait for a token to be available.  Retry until we can grab one.  */
+  for (;;)
+    {
+      /* If there is no token available, sleep until there is.  */
+      if ((d & SEM_VALUE_MASK) == 0)
+	{
+	  err = do_futex_wait_t64 (sem, abstime);
+	  /* A futex return value of 0 or EAGAIN is due to a real or spurious
+	     wake-up, or due to a change in the number of tokens.  We retry in
+	     these cases.
+	     If we timed out, forward this to the caller.
+	     EINTR is returned if we are interrupted by a signal; we
+	     forward this to the caller.  (See futex_wait and related
+	     documentation.  Before Linux 2.6.22, EINTR was also returned on
+	     spurious wake-ups; we only support more recent Linux versions,
+	     so do not need to consider this here.)  */
+	  if (err == ETIMEDOUT || err == EINTR)
+	    {
+	      __set_errno (err);
+	      err = -1;
+	      /* Stop being registered as a waiter.  */
+	      atomic_fetch_add_relaxed (&sem->data,
+		  -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
+	      break;
+	    }
+	  /* Relaxed MO is sufficient; see below.  */
+	  d = atomic_load_relaxed (&sem->data);
+	}
+      else
+	{
+	  /* Try to grab both a token and stop being a waiter.  We need
+	     acquire MO so this synchronizes with all token providers (i.e.,
+	     the RMW operation we read from or all those before it in
+	     modification order; also see sem_post).  On the failure path,
+	     relaxed MO is sufficient because we only eventually need the
+	     up-to-date value; the futex_wait or the CAS perform the real
+	     work.  */
+	  if (atomic_compare_exchange_weak_acquire (&sem->data,
+	      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
+	    {
+	      err = 0;
+	      break;
+	    }
+	}
+    }
+
+  pthread_cleanup_pop (0);
+#else
+  /* The main difference to the 64b-atomics implementation is that we need to
+     access value and nwaiters in separate steps, and that the nwaiters bit
+     in the value can temporarily not be set even if nwaiters is nonzero.
+     We work around incorrectly unsetting the nwaiters bit by letting sem_wait
+     set the bit again and waking the number of waiters that could grab a
+     token.  There are two additional properties we need to ensure:
+     (1) We make sure that whenever unsetting the bit, we see the increment of
+     nwaiters by the other thread that set the bit.  IOW, we will notice if
+     we make a mistake.
+     (2) When setting the nwaiters bit, we make sure that we see the unsetting
+     of the bit by another waiter that happened before us.  This avoids having
+     to blindly set the bit whenever we need to block on it.  We set/unset
+     the bit while having incremented nwaiters (i.e., are a registered
+     waiter), and the problematic case only happens when one waiter indeed
+     followed another (i.e., nwaiters was never larger than 1); thus, this
+     works similarly as with a critical section using nwaiters (see the MOs
+     and related comments below).
+
+     An alternative approach would be to unset the bit after decrementing
+     nwaiters; however, that would result in needing Dekker-like
+     synchronization and thus full memory barriers.  We also would not be able
+     to prevent misspeculation, so this alternative scheme does not seem
+     beneficial.  */
+  unsigned int v;
+
+  /* Add a waiter.  We need acquire MO so this synchronizes with the release
+     MO we use when decrementing nwaiters below; it ensures that if another
+     waiter unset the bit before us, we see that and set it again.  Also see
+     property (2) above.  */
+  atomic_fetch_add_acquire (&sem->nwaiters, 1);
+
+  pthread_cleanup_push (__sem_wait_cleanup, sem);
+
+  /* Wait for a token to be available.  Retry until we can grab one.  */
+  /* We do not need any ordering wrt. to this load's reads-from, so relaxed
+     MO is sufficient.  The acquire MO above ensures that in the problematic
+     case, we do see the unsetting of the bit by another waiter.  */
+  v = atomic_load_relaxed (&sem->value);
+  do
+    {
+      do
+	{
+	  /* We are about to block, so make sure that the nwaiters bit is
+	     set.  We need release MO on the CAS to ensure that when another
+	     waiter unsets the nwaiters bit, it will also observe that we
+	     incremented nwaiters in the meantime (also see the unsetting of
+	     the bit below).  Relaxed MO on CAS failure is sufficient (see
+	     above).  */
+	  do
+	    {
+	      if ((v & SEM_NWAITERS_MASK) != 0)
+		break;
+	    }
+	  while (!atomic_compare_exchange_weak_release (&sem->value,
+	      &v, v | SEM_NWAITERS_MASK));
+	  /* If there is no token, wait.  */
+	  if ((v >> SEM_VALUE_SHIFT) == 0)
+	    {
+	      /* See __HAVE_64B_ATOMICS variant.  */
+	      err = do_futex_wait_t64 (sem, abstime);
+	      if (err == ETIMEDOUT || err == EINTR)
+		{
+		  __set_errno (err);
+		  err = -1;
+		  goto error;
+		}
+	      err = 0;
+	      /* We blocked, so there might be a token now.  Relaxed MO is
+		 sufficient (see above).  */
+	      v = atomic_load_relaxed (&sem->value);
+	    }
+	}
+      /* If there is no token, we must not try to grab one.  */
+      while ((v >> SEM_VALUE_SHIFT) == 0);
+    }
+  /* Try to grab a token.  We need acquire MO so this synchronizes with
+     all token providers (i.e., the RMW operation we read from or all those
+     before it in modification order; also see sem_post).  */
+  while (!atomic_compare_exchange_weak_acquire (&sem->value,
+      &v, v - (1 << SEM_VALUE_SHIFT)));
+
+error:
+  pthread_cleanup_pop (0);
+
+  __sem_wait_32_finish (sem);
+#endif
+
+  return err;
+}
+
 /* Stop being a registered waiter (non-64b-atomics code only).  */
 #if !__HAVE_64B_ATOMICS
 static void
diff --git a/rt/Versions b/rt/Versions
index 1eef2e6..a1c98a8 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -48,5 +48,6 @@ librt {
     __timerfd_settime64;
     __mq_timedreceive_t64;
     __mq_timedsend_t64;
+    __aio_suspend_t64;
   }
 }
diff --git a/sysdeps/nptl/aio_misc.h b/sysdeps/nptl/aio_misc.h
index 206d8e1..f5ed3ec 100644
--- a/sysdeps/nptl/aio_misc.h
+++ b/sysdeps/nptl/aio_misc.h
@@ -71,4 +71,43 @@
       }									      \
   } while (0)
 
+#define AIO_MISC_WAIT_T64(result, futex, timeout, cancel)		      \
+  do {									      \
+    volatile unsigned int *futexaddr = &futex;				      \
+    unsigned int oldval = futex;					      \
+									      \
+    if (oldval != 0)							      \
+      {									      \
+	pthread_mutex_unlock (&__aio_requests_mutex);			      \
+									      \
+	int oldtype;							      \
+	if (cancel)							      \
+	  oldtype = LIBC_CANCEL_ASYNC ();				      \
+									      \
+	int status;							      \
+	do								      \
+	  {								      \
+	    status = futex_reltimed_wait_t64 ((unsigned int *) futexaddr,     \
+					      oldval, timeout, FUTEX_PRIVATE);\
+	    if (status != EAGAIN)					      \
+	      break;							      \
+									      \
+	    oldval = *futexaddr;					      \
+	  }								      \
+	while (oldval != 0);						      \
+									      \
+	if (cancel)							      \
+	  LIBC_CANCEL_RESET (oldtype);					      \
+									      \
+	if (status == EINTR)						      \
+	  result = EINTR;						      \
+	else if (status == ETIMEDOUT)					      \
+	  result = EAGAIN;						      \
+	else								      \
+	  assert (status == 0 || status == EAGAIN);			      \
+									      \
+	pthread_mutex_lock (&__aio_requests_mutex);			      \
+      }									      \
+  } while (0)
+
 #include_next <aio_misc.h>
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 8326e28..e72a98e 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -122,6 +122,10 @@ extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
 extern int __lll_timedlock_wait (int *futex, const struct timespec *,
 				 int private) attribute_hidden;
 
+extern int __lll_timedlock_wait_t64 (int *futex,
+                                     const struct __timespec64 *,
+				     int private) attribute_hidden;
+
 
 /* As __lll_lock, but with a timeout.  If the timeout occurs then return
    ETIMEDOUT.  If ABSTIME is invalid, return EINVAL.  */
@@ -138,6 +142,19 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
 #define lll_timedlock(futex, abstime, private)  \
   __lll_timedlock (&(futex), abstime, private)
 
+#define __lll_timedlock_t64(futex, abstime, private)                \
+  ({                                                                \
+    int *__futex = (futex);                                         \
+    int __val = 0;                                                  \
+                                                                    \
+    if (__glibc_unlikely                                            \
+        (atomic_compare_and_exchange_bool_acq (__futex, 1, 0)))     \
+      __val = __lll_timedlock_wait_t64 (__futex, abstime, private); \
+    __val;                                                          \
+  })
+#define lll_timedlock_t64(futex, abstime, private)  \
+  __lll_timedlock_t64 (&(futex), abstime, private)
+
 
 /* This is an expression rather than a statement even though its value is
    void, so that it can be used in a comma expression or as an expression
diff --git a/sysdeps/pthread/aio_suspend.c b/sysdeps/pthread/aio_suspend.c
index 010cbf8..5069323 100644
--- a/sysdeps/pthread/aio_suspend.c
+++ b/sysdeps/pthread/aio_suspend.c
@@ -251,3 +251,167 @@ aio_suspend (const struct aiocb *const list[], int nent,
 }
 
 weak_alias (aio_suspend, aio_suspend64)
+
+#ifdef DONT_NEED_AIO_MISC_COND
+static int
+__attribute__ ((noinline))
+do_aio_misc_wait_t64 (unsigned int *cntr, 
+		      const struct __timespec64 *timeout)
+{
+  int result = 0;
+
+  AIO_MISC_WAIT_T64 (result, *cntr, timeout, 1);
+
+  return result;
+}
+#endif
+
+int
+aio_suspend_t64 (const struct aiocb *const list[], int nent,
+	         const struct __timespec64 *timeout)
+{
+  if (__glibc_unlikely (nent < 0))
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  struct waitlist waitlist[nent];
+  struct requestlist *requestlist[nent];
+#ifndef DONT_NEED_AIO_MISC_COND
+  pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+#endif
+  int cnt;
+  bool any = false;
+  int result = 0;
+  unsigned int cntr = 1;
+
+  /* Request the mutex.  */
+  pthread_mutex_lock (&__aio_requests_mutex);
+
+  /* There is not yet a finished request.  Signal the request that
+     we are working for it.  */
+  for (cnt = 0; cnt < nent; ++cnt)
+    if (list[cnt] != NULL)
+      {
+	if (list[cnt]->__error_code == EINPROGRESS)
+	  {
+	    requestlist[cnt] = __aio_find_req ((aiocb_union *) list[cnt]);
+
+	    if (requestlist[cnt] != NULL)
+	      {
+#ifndef DONT_NEED_AIO_MISC_COND
+		waitlist[cnt].cond = &cond;
+#endif
+		waitlist[cnt].result = NULL;
+		waitlist[cnt].next = requestlist[cnt]->waiting;
+		waitlist[cnt].counterp = &cntr;
+		waitlist[cnt].sigevp = NULL;
+#ifdef BROKEN_THREAD_SIGNALS
+		waitlist[cnt].caller_pid = 0;	/* Not needed.  */
+#endif
+		requestlist[cnt]->waiting = &waitlist[cnt];
+		any = true;
+	      }
+	    else
+	      /* We will never suspend.  */
+	      break;
+	  }
+	else
+	  /* We will never suspend.  */
+	  break;
+      }
+
+
+  /* Only if none of the entries is NULL or finished to be wait.  */
+  if (cnt == nent && any)
+    {
+      struct clparam clparam =
+	{
+	  .list = list,
+	  .waitlist = waitlist,
+	  .requestlist = requestlist,
+#ifndef DONT_NEED_AIO_MISC_COND
+	  .cond = &cond,
+#endif
+	  .nent = nent
+	};
+
+      pthread_cleanup_push (cleanup, &clparam);
+
+#ifdef DONT_NEED_AIO_MISC_COND
+      result = do_aio_misc_wait_t64 (&cntr, timeout);
+#else
+      if (timeout == NULL)
+	result = pthread_cond_wait (&cond, &__aio_requests_mutex);
+      else
+	{
+	  /* We have to convert the relative timeout value into an
+	     absolute time value with pthread_cond_timedwait expects.  */
+	  struct timeval now;
+	  struct timespec abstime;
+
+	  __gettimeofday (&now, NULL);
+	  abstime.tv_nsec = timeout->tv_nsec + now.tv_usec * 1000;
+	  abstime.tv_sec = timeout->tv_sec + now.tv_sec;
+	  if (abstime.tv_nsec >= 1000000000)
+	    {
+	      abstime.tv_nsec -= 1000000000;
+	      abstime.tv_sec += 1;
+	    }
+
+	  result = __pthread_cond_timedwait_t64 (&cond,
+					       &__aio_requests_mutex,
+					       &abstime);
+	}
+#endif
+
+      pthread_cleanup_pop (0);
+    }
+
+  /* Now remove the entry in the waiting list for all requests
+     which didn't terminate.  */
+  while (cnt-- > 0)
+    if (list[cnt] != NULL && list[cnt]->__error_code == EINPROGRESS)
+      {
+	struct waitlist **listp;
+
+	assert (requestlist[cnt] != NULL);
+
+	/* There is the chance that we cannot find our entry anymore. This
+	   could happen if the request terminated and restarted again.  */
+	listp = &requestlist[cnt]->waiting;
+	while (*listp != NULL && *listp != &waitlist[cnt])
+	  listp = &(*listp)->next;
+
+	if (*listp != NULL)
+	  *listp = (*listp)->next;
+      }
+
+#ifndef DONT_NEED_AIO_MISC_COND
+  /* Release the conditional variable.  */
+  if (__glibc_unlikely (pthread_cond_destroy (&cond) != 0))
+    /* This must never happen.  */
+    abort ();
+#endif
+
+  if (result != 0)
+    {
+#ifndef DONT_NEED_AIO_MISC_COND
+      /* An error occurred.  Possibly it's ETIMEDOUT.  We have to translate
+	 the timeout error report of `pthread_cond_timedwait' to the
+	 form expected from `aio_suspend'.  */
+      if (result == ETIMEDOUT)
+	__set_errno (EAGAIN);
+      else
+#endif
+	__set_errno (result);
+
+      result = -1;
+    }
+
+  /* Release the mutex.  */
+  pthread_mutex_unlock (&__aio_requests_mutex);
+
+  return result;
+}
diff --git a/sysdeps/unix/sysv/linux/futex-internal.h b/sysdeps/unix/sysv/linux/futex-internal.h
index 96a07b0..7704f59 100644
--- a/sysdeps/unix/sysv/linux/futex-internal.h
+++ b/sysdeps/unix/sysv/linux/futex-internal.h
@@ -131,6 +131,32 @@ futex_reltimed_wait (unsigned int *futex_word, unsigned int expected,
     }
 }
 
+/* 64-bit time version */
+static __always_inline int
+futex_reltimed_wait_t64 (unsigned int *futex_word, unsigned int expected,
+		         const struct __timespec64 *reltime, int private)
+{
+  int err = lll_futex_timed_wait_t64 (futex_word, expected, reltime,
+                                      private);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+      return -err;
+
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
+		     being normalized.  Must have been caused by a glibc or
+		     application bug.  */
+    case -ENOSYS: /* Must have been caused by a glibc bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
 /* See sysdeps/nptl/futex-internal.h for details.  */
 static __always_inline int
 futex_reltimed_wait_cancelable (unsigned int *futex_word,
@@ -160,6 +186,37 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
     }
 }
 
+/* 64-bit time version */
+
+static __always_inline int
+futex_reltimed_wait_cancelable_t64 (unsigned int *futex_word,
+				    unsigned int expected,
+			            const struct __timespec64 *reltime,
+                                    int private)
+{
+  int oldtype;
+  oldtype = __pthread_enable_asynccancel ();
+  int err = lll_futex_timed_wait_t64 (futex_word, expected, reltime, private);
+  __pthread_disable_asynccancel (oldtype);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+      return -err;
+
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
+		     being normalized.  Must have been caused by a glibc or
+		     application bug.  */
+    case -ENOSYS: /* Must have been caused by a glibc bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
 /* See sysdeps/nptl/futex-internal.h for details.  */
 static __always_inline int
 futex_abstimed_wait (unsigned int *futex_word, unsigned int expected,
@@ -190,6 +247,36 @@ futex_abstimed_wait (unsigned int *futex_word, unsigned int expected,
     }
 }
 
+/* 64-bit time version */
+static __always_inline int
+futex_abstimed_wait_t64 (unsigned int *futex_word, unsigned int expected,
+		         const struct __timespec64 *abstime, int private)
+{
+  /* Work around the fact that the kernel rejects negative timeout values
+     despite them being valid.  */
+  if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))
+    return ETIMEDOUT;
+  int err = lll_futex_timed_wait_bitset_t64 (futex_word, expected, abstime,
+					     FUTEX_CLOCK_REALTIME, private);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+      return -err;
+
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
+		     being normalized.  Must have been caused by a glibc or
+		     application bug.  */
+    case -ENOSYS: /* Must have been caused by a glibc bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
 /* See sysdeps/nptl/futex-internal.h for details.  */
 static __always_inline int
 futex_abstimed_wait_cancelable (unsigned int *futex_word,
@@ -224,6 +311,42 @@ futex_abstimed_wait_cancelable (unsigned int *futex_word,
     }
 }
 
+/* 64-bit time version */
+
+static __always_inline int
+futex_abstimed_wait_cancelable_t64 (unsigned int *futex_word,
+				    unsigned int expected,
+			            const struct __timespec64 *abstime,
+                                    int private)
+{
+  /* Work around the fact that the kernel rejects negative timeout values
+     despite them being valid.  */
+  if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))
+    return ETIMEDOUT;
+  int oldtype;
+  oldtype = __pthread_enable_asynccancel ();
+  int err = lll_futex_timed_wait_bitset_t64 (futex_word, expected, abstime,
+					     FUTEX_CLOCK_REALTIME, private);
+  __pthread_disable_asynccancel (oldtype);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+      return -err;
+
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
+		     being normalized.  Must have been caused by a glibc or
+		     application bug.  */
+    case -ENOSYS: /* Must have been caused by a glibc bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
 /* See sysdeps/nptl/futex-internal.h for details.  */
 static __always_inline void
 futex_wake (unsigned int *futex_word, int processes_to_wake, int private)
diff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
index 4eddadb..453d24e 100644
--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h
+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
@@ -97,6 +97,16 @@
 		     __lll_private_flag (FUTEX_WAIT, private),  \
 		     val, timeout)
 
+#define lll_futex_timed_wait_t64(futexp, val, timeout, private)     \
+  ({                                                                       \
+    struct timespec ts;                                                    \
+    ts.tv_sec = timeout->tv_sec;                                           \
+    ts.tv_nsec = timeout->tv_nsec;                                         \
+    lll_futex_syscall (4, futexp,                                 	   \
+		       __lll_private_flag (FUTEX_WAIT, private),  	   \
+		       val, &ts);					   \
+  })
+
 #define lll_futex_timed_wait_bitset(futexp, val, timeout, clockbit, private) \
   lll_futex_syscall (6, futexp,                                         \
 		     __lll_private_flag (FUTEX_WAIT_BITSET | (clockbit), \
@@ -104,6 +114,18 @@
 		     val, timeout, NULL /* Unused.  */,                 \
 		     FUTEX_BITSET_MATCH_ANY)
 
+#define lll_futex_timed_wait_bitset_t64(futexp, val, timeout, clockbit, private) \
+  ({                                                                       \
+    struct timespec ts;                                                    \
+    ts.tv_sec = timeout->tv_sec;                                           \
+    ts.tv_nsec = timeout->tv_nsec;                                         \
+    lll_futex_syscall (6, futexp,                                          \
+		       __lll_private_flag (FUTEX_WAIT_BITSET | (clockbit), \
+		                           private),                       \
+		       val, &ts, NULL /* Unused.  */,                      \
+		       FUTEX_BITSET_MATCH_ANY);                            \
+  })
+
 #define lll_futex_wake(futexp, nr, private)                             \
   lll_futex_syscall (4, futexp,                                         \
 		     __lll_private_flag (FUTEX_WAKE, private), nr, 0)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=fc7f66a29a69036d9d47b3d89b37b8493fcbe17b

commit fc7f66a29a69036d9d47b3d89b37b8493fcbe17b
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:09 2017 +0200

    Y2038: add function __setitimer_t64

diff --git a/time/Makefile b/time/Makefile
index 15b7d92..1be377b 100644
--- a/time/Makefile
+++ b/time/Makefile
@@ -38,7 +38,7 @@ routines := offtime asctime clock ctime ctime_r difftime \
 	    strftime wcsftime strftime_l wcsftime_l	 \
 	    timespec_get                                 \
 	    settimeofday64 nanosleep64                   \
-	    getitimer64
+	    getitimer64 setitimer64
 aux :=	    era alt_digit lc-time-cleanup
 
 tests	:= test_time clocktest tst-posixtz tst-strptime tst_wcsftime \
diff --git a/time/Versions b/time/Versions
index 9663204..797161d 100644
--- a/time/Versions
+++ b/time/Versions
@@ -92,5 +92,6 @@ libc {
     __nanosleep_t64;
     __adjtime_t64;
     __getitimer_t64;
+    __setitimer_t64;
   }
 }
diff --git a/time/setitimer64.c b/time/setitimer64.c
new file mode 100644
index 0000000..31480bd
--- /dev/null
+++ b/time/setitimer64.c
@@ -0,0 +1,71 @@
+/* Set an interval timer
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <stddef.h>
+#include <errno.h>
+#include <sys/time.h>
+
+extern int __y2038_linux_support;
+
+/* Set the timer WHICH to *NEW.  If OLD is not NULL,
+   set *OLD to the old value of timer WHICH.
+   Returns 0 on success, -1 on errors.  */
+int
+__setitimer_t64 (enum __itimer_which which,
+                 const struct __itimerval_t64 *new,
+                 struct __itimerval_t64 *old)
+{
+  struct itimerval new32, *new32p = NULL;
+  struct itimerval old32, *old32p = NULL;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+
+  if (new != NULL)
+    {
+      if (new->it_interval.tv_sec > INT_MAX ||
+          new->it_value.tv_sec > INT_MAX)
+        {
+          __set_errno (EOVERFLOW);
+          return -1;
+        }
+      new32.it_interval.tv_sec = new->it_interval.tv_sec;
+      new32.it_interval.tv_usec = new->it_interval.tv_usec;
+      new32.it_value.tv_sec = new->it_value.tv_sec;
+      new32.it_value.tv_usec = new->it_value.tv_usec;
+      new32p = &new32;
+    }
+
+  if (old != NULL)
+    old32p = &old32;
+
+  int result = setitimer(which, new32p, old32p);
+
+  if (old)
+    {
+      old->it_interval.tv_sec = old32.it_interval.tv_sec;
+      old->it_interval.tv_usec = old32.it_interval.tv_usec;
+      old->it_value.tv_sec = old32.it_value.tv_sec;
+      old->it_value.tv_usec = old32.it_value.tv_usec;
+    }
+
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=ba924f9792b3db2278b7a6670beb51d2b2b3e719

commit ba924f9792b3db2278b7a6670beb51d2b2b3e719
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:08 2017 +0200

    Y2038: add function __getitimer_t64

diff --git a/time/Makefile b/time/Makefile
index e0a766b..15b7d92 100644
--- a/time/Makefile
+++ b/time/Makefile
@@ -37,7 +37,8 @@ routines := offtime asctime clock ctime ctime_r difftime \
 	    getdate strptime strptime_l			 \
 	    strftime wcsftime strftime_l wcsftime_l	 \
 	    timespec_get                                 \
-	    settimeofday64 nanosleep64
+	    settimeofday64 nanosleep64                   \
+	    getitimer64
 aux :=	    era alt_digit lc-time-cleanup
 
 tests	:= test_time clocktest tst-posixtz tst-strptime tst_wcsftime \
diff --git a/time/Versions b/time/Versions
index a08c1e5..9663204 100644
--- a/time/Versions
+++ b/time/Versions
@@ -91,5 +91,6 @@ libc {
     __settimeofday_t64;
     __nanosleep_t64;
     __adjtime_t64;
+    __getitimer_t64;
   }
 }
diff --git a/time/getitimer64.c b/time/getitimer64.c
new file mode 100644
index 0000000..82d7b3b
--- /dev/null
+++ b/time/getitimer64.c
@@ -0,0 +1,53 @@
+/* Get the current value of an interval timer
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <stddef.h>
+#include <errno.h>
+#include <sys/time.h>
+
+extern int __y2038_linux_support;
+
+/* Set *VALUE to the current setting of timer WHICH.
+   Return 0 on success, -1 on errors.  */
+int
+__getitimer_t64 (enum __itimer_which which,
+                 struct __itimerval_t64 *value)
+{
+  struct itimerval value32, *value32p= NULL;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+ 
+  if (value != NULL)
+    value32p = &value32;
+
+  int result = getitimer(which, value32p);
+
+  if (result == 0 && value != NULL)
+    {
+      value->it_interval.tv_sec = value32.it_interval.tv_sec;
+      value->it_interval.tv_usec = value32.it_interval.tv_usec;
+      value->it_value.tv_sec = value32.it_value.tv_sec;
+      value->it_value.tv_usec = value32.it_value.tv_usec;
+    }
+
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=fefd49469ff9fa77cdd071b6db139a98ddfa1537

commit fefd49469ff9fa77cdd071b6db139a98ddfa1537
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:07 2017 +0200

    Y2038: add struct __itimerval_t64

diff --git a/include/time.h b/include/time.h
index 1048b2d..dbabb81 100644
--- a/include/time.h
+++ b/include/time.h
@@ -55,6 +55,12 @@ struct __itimerspec64
   struct __timespec64 it_value;
 };
 
+struct __itimerval_t64
+{
+  struct __timeval64 it_interval;
+  struct __timeval64 it_value;
+};
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=77605e807a31412ae20bae6ae8f2f25809e8bc9a

commit 77605e807a31412ae20bae6ae8f2f25809e8bc9a
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:06 2017 +0200

    Y2038: add function __utime_t64

diff --git a/include/utime.h b/include/utime.h
index 5049251..eb907f7 100644
--- a/include/utime.h
+++ b/include/utime.h
@@ -6,4 +6,11 @@
 libc_hidden_proto (utime)
 #endif
 
+/* Structure describing file times, 64-bit time version.  */
+struct __utimbuf64
+  {
+    __time64_t actime;		/* Access time.  */
+    __time64_t modtime;		/* Modification time.  */
+  };
+
 #endif /* utime.h */
diff --git a/io/Versions b/io/Versions
index d4a3b40..555d57d 100644
--- a/io/Versions
+++ b/io/Versions
@@ -135,5 +135,6 @@ libc {
     __xstat64_t64;
     __lxstat64_t64;
     __fxstatat64_t64;
+    __utime_t64;
   }
 }
diff --git a/io/utime.c b/io/utime.c
index 1b385a2..17e6598 100644
--- a/io/utime.c
+++ b/io/utime.c
@@ -37,3 +37,19 @@ utime (const char *file, const struct utimbuf *times)
 libc_hidden_def (utime)
 
 stub_warning (utime)
+
+/* 64-bit time version */
+
+int
+__utime_t64 (const char *file, const struct utimbuf *times)
+{
+  if (file == NULL)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__utime_t64)
diff --git a/sysdeps/posix/utime.c b/sysdeps/posix/utime.c
index 76f3cba..999cfd5 100644
--- a/sysdeps/posix/utime.c
+++ b/sysdeps/posix/utime.c
@@ -45,3 +45,25 @@ utime (const char *file, const struct utimbuf *times)
   return __utimes (file, tvp);
 }
 libc_hidden_def (utime)
+
+/* 64-bit time version */
+
+int
+__utime_t64 (const char *file, const struct __utimbuf64 *times)
+{
+  struct __timeval64 timevals[2];
+  struct __timeval64 *tvp;
+
+  if (times != NULL)
+    {
+      timevals[0].tv_sec = (time_t) times->actime;
+      timevals[0].tv_usec = 0L;
+      timevals[1].tv_sec = (time_t) times->modtime;
+      timevals[1].tv_usec = 0L;
+      tvp = timevals;
+    }
+  else
+    tvp = NULL;
+
+  return __utimes_t64 (file, tvp);
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=659a1e82c0506d8289ced2a3e57a8066adae1c2d

commit 659a1e82c0506d8289ced2a3e57a8066adae1c2d
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:05 2017 +0200

    Y2038: add function __adjtime_t64

diff --git a/sysdeps/unix/sysv/linux/adjtime.c b/sysdeps/unix/sysv/linux/adjtime.c
index 6edecb7..829fa0f 100644
--- a/sysdeps/unix/sysv/linux/adjtime.c
+++ b/sysdeps/unix/sysv/linux/adjtime.c
@@ -90,3 +90,46 @@ ADJTIME (const struct TIMEVAL *itv, struct TIMEVAL *otv)
 #ifdef NO_LOCAL_ADJTIME
 weak_alias (__adjtime, adjtime)
 #endif
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int __adjtime_t64 (const struct __timeval64 *itv,
+                 struct __timeval64 *otv)
+{
+  struct TIMEX tntx;
+
+  if (itv)
+    {
+      struct TIMEVAL tmp;
+
+      /* We will do some check here. */
+      tmp.tv_sec = itv->tv_sec + itv->tv_usec / 1000000L;
+      tmp.tv_usec = itv->tv_usec % 1000000L;
+      if (tmp.tv_sec > MAX_SEC || tmp.tv_sec < MIN_SEC)
+	return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+      tntx.offset = tmp.tv_usec + tmp.tv_sec * 1000000L;
+      tntx.modes = ADJ_OFFSET_SINGLESHOT;
+    }
+  else
+    tntx.modes = ADJ_OFFSET_SS_READ;
+
+  if (__glibc_unlikely (ADJTIMEX (&tntx) < 0))
+    return -1;
+
+  if (otv)
+    {
+      if (tntx.offset < 0)
+	{
+	  otv->tv_usec = -(-tntx.offset % 1000000);
+	  otv->tv_sec  = -(-tntx.offset / 1000000);
+	}
+      else
+	{
+	  otv->tv_usec = tntx.offset % 1000000;
+	  otv->tv_sec  = tntx.offset / 1000000;
+	}
+    }
+  return 0;
+}
diff --git a/time/Versions b/time/Versions
index e4e088f..a08c1e5 100644
--- a/time/Versions
+++ b/time/Versions
@@ -90,5 +90,6 @@ libc {
     __gettimeofday_t64;
     __settimeofday_t64;
     __nanosleep_t64;
+    __adjtime_t64;
   }
 }
diff --git a/time/adjtime.c b/time/adjtime.c
index 4a972d6..2ad1138 100644
--- a/time/adjtime.c
+++ b/time/adjtime.c
@@ -31,3 +31,13 @@ __adjtime (const struct timeval *delta, struct timeval *olddelta)
 stub_warning (adjtime)
 
 weak_alias (__adjtime, adjtime)
+
+/* 64-bit time version */
+
+int
+__adjtime_t64 (const struct __timeval64 *delta, struct __timeval64 *olddelta)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__adjtime_t64)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=5741ab6ccb7a6fbe313ef63d0c4f76b3ba0a6779

commit 5741ab6ccb7a6fbe313ef63d0c4f76b3ba0a6779
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:04 2017 +0200

    Y2038: add function __nanosleep64_t64

diff --git a/time/Makefile b/time/Makefile
index 12f3b35..e0a766b 100644
--- a/time/Makefile
+++ b/time/Makefile
@@ -37,7 +37,7 @@ routines := offtime asctime clock ctime ctime_r difftime \
 	    getdate strptime strptime_l			 \
 	    strftime wcsftime strftime_l wcsftime_l	 \
 	    timespec_get                                 \
-	    settimeofday64
+	    settimeofday64 nanosleep64
 aux :=	    era alt_digit lc-time-cleanup
 
 tests	:= test_time clocktest tst-posixtz tst-strptime tst_wcsftime \
diff --git a/time/Versions b/time/Versions
index 72d6511..e4e088f 100644
--- a/time/Versions
+++ b/time/Versions
@@ -89,5 +89,6 @@ libc {
     __utimes_t64;
     __gettimeofday_t64;
     __settimeofday_t64;
+    __nanosleep_t64;
   }
 }
diff --git a/time/nanosleep64.c b/time/nanosleep64.c
new file mode 100644
index 0000000..c42687c
--- /dev/null
+++ b/time/nanosleep64.c
@@ -0,0 +1,63 @@
+/* Pause execution for a number of nanoseconds
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <time.h>
+
+extern int __y2038_linux_support;
+
+/* Pause execution for a number of nanoseconds.  */
+int
+__nanosleep_t64 (const struct __timespec64 *requested_time,
+	         struct __timespec64 *remaining)
+{
+  struct timespec treq32, *treqp32 = NULL;
+  struct timespec trem32, *tremp32 = NULL;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit time syscalls */
+    }
+
+  if (requested_time)
+    {
+      if (requested_time->tv_sec > INT_MAX)
+        {
+          __set_errno(EOVERFLOW);
+          return -1;
+        }
+      treq32.tv_sec = requested_time->tv_sec;
+      treq32.tv_nsec = requested_time->tv_nsec;
+      treqp32 = & treq32;
+    }
+
+  if (remaining)
+    tremp32 = &trem32;
+
+  int result = nanosleep(treqp32, tremp32);
+
+  if (result == 1 && errno == EINTR && remaining)
+    {
+      remaining->tv_sec = trem32.tv_sec;
+      remaining->tv_nsec = trem32.tv_nsec;
+      remaining->tv_pad = 0;
+    }
+
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=21dc050da13eca28164e03b11aeb6cd4208c107b

commit 21dc050da13eca28164e03b11aeb6cd4208c107b
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:03 2017 +0200

    Y2038: add function __sched_rr_get_interval_t64

diff --git a/posix/Makefile b/posix/Makefile
index 83b3d74..4c309fb 100644
--- a/posix/Makefile
+++ b/posix/Makefile
@@ -62,7 +62,8 @@ routines :=								      \
 	spawnattr_getsigmask spawnattr_getschedpolicy spawnattr_getschedparam \
 	spawnattr_setsigmask spawnattr_setschedpolicy spawnattr_setschedparam \
 	posix_madvise							      \
-	get_child_max sched_cpucount sched_cpualloc sched_cpufree
+	get_child_max sched_cpucount sched_cpualloc sched_cpufree	      \
+	sched_rr_gi64
 
 aux		:= init-posix environ
 tests		:= test-errno tstgetopt testfnm runtests runptests \
diff --git a/posix/Versions b/posix/Versions
index 65e9687..e5ec374 100644
--- a/posix/Versions
+++ b/posix/Versions
@@ -140,4 +140,11 @@ libc {
   GLIBC_PRIVATE {
     __libc_fork; __libc_pread; __libc_pwrite;
   }
+
+  # Y2038 symbols are given their own version until they can be put in
+  # the right place
+
+  GLIBC_Y2038 {
+    __sched_rr_get_interval_t64;
+  }
 }
diff --git a/posix/sched_rr_gi64.c b/posix/sched_rr_gi64.c
new file mode 100644
index 0000000..caae6da
--- /dev/null
+++ b/posix/sched_rr_gi64.c
@@ -0,0 +1,51 @@
+/* Get the SCHED_RR interval for the named process.
+  
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <sched.h>
+#include <sys/types.h>
+
+extern int __y2038_linux_support;
+
+int
+__sched_rr_get_interval_t64 (pid_t pid, struct __timespec64 *t)
+{
+  struct timespec ts32;
+  int result;
+
+  if (t == NULL)
+    {
+      __set_errno(EINVAL);
+      return -1;
+    }
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+
+  result = sched_rr_get_interval(pid, &ts32);
+  if (result == 0)
+    {
+      t->tv_sec = ts32.tv_sec;
+      t->tv_nsec = ts32.tv_nsec;
+      t->tv_pad = 0;
+    }
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=00e5ed51f99f5d15b08aa3d67c6460c85b0f6959

commit 00e5ed51f99f5d15b08aa3d67c6460c85b0f6959
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:02 2017 +0200

    Y2038: add function __msgctl_t64

diff --git a/sysdeps/unix/sysv/linux/bits/msq.h b/sysdeps/unix/sysv/linux/bits/msq.h
index 7047490..28dbdd7 100644
--- a/sysdeps/unix/sysv/linux/bits/msq.h
+++ b/sysdeps/unix/sysv/linux/bits/msq.h
@@ -35,6 +35,25 @@ typedef unsigned long int msglen_t;
 
 /* Structure of record for one message inside the kernel.
    The type `struct msg' is opaque.  */
+#ifdef __USE_TIME_BITS64
+struct msqid_ds
+{
+  struct ipc_perm msg_perm;	/* structure describing operation permission */
+  __time64_t msg_stime;		/* time of last msgsnd command */
+  unsigned long int __glibc_reserved1;
+  __time64_t msg_rtime;		/* time of last msgrcv command */
+  unsigned long int __glibc_reserved2;
+  __time64_t msg_ctime;		/* time of last change */
+  unsigned long int __glibc_reserved3;
+  unsigned long int __msg_cbytes; /* current number of bytes on queue */
+  msgqnum_t msg_qnum;		/* number of messages currently on queue */
+  msglen_t msg_qbytes;		/* max number of bytes allowed on queue */
+  __pid_t msg_lspid;		/* pid of last msgsnd() */
+  __pid_t msg_lrpid;		/* pid of last msgrcv() */
+  unsigned long int __glibc_reserved4;
+  unsigned long int __glibc_reserved5;
+};
+#else
 struct msqid_ds
 {
   struct ipc_perm msg_perm;	/* structure describing operation permission */
@@ -52,6 +71,7 @@ struct msqid_ds
   unsigned long int __glibc_reserved4;
   unsigned long int __glibc_reserved5;
 };
+#endif
 
 #ifdef __USE_MISC
 
diff --git a/sysdeps/unix/sysv/linux/msgctl.c b/sysdeps/unix/sysv/linux/msgctl.c
index 7280cba..409c539 100644
--- a/sysdeps/unix/sysv/linux/msgctl.c
+++ b/sysdeps/unix/sysv/linux/msgctl.c
@@ -69,3 +69,75 @@ __old_msgctl (int msqid, int cmd, struct __old_msqid_ds *buf)
 }
 compat_symbol (libc, __old_msgctl, msgctl, GLIBC_2_0);
 #endif
+
+/* 64-bit time version */
+
+struct __msqid_ds_t64
+{
+  struct ipc_perm msg_perm;	/* structure describing operation permission */
+  __time64_t msg_stime;		/* time of last msgsnd command */
+  unsigned long int __glibc_reserved1;
+  __time64_t msg_rtime;		/* time of last msgrcv command */
+  unsigned long int __glibc_reserved2;
+  __time64_t msg_ctime;		/* time of last change */
+  unsigned long int __glibc_reserved3;
+  unsigned long int __msg_cbytes; /* current number of bytes on queue */
+  msgqnum_t msg_qnum;		/* number of messages currently on queue */
+  msglen_t msg_qbytes;		/* max number of bytes allowed on queue */
+  __pid_t msg_lspid;		/* pid of last msgsnd() */
+  __pid_t msg_lrpid;		/* pid of last msgrcv() */
+  unsigned long int __glibc_reserved4;
+  unsigned long int __glibc_reserved5;
+};
+
+extern int __y2038_linux_support;
+
+int
+__msgctl_t64 (int msqid, int cmd, struct __msqid_ds_t64 *buf)
+{
+  int result;
+  struct msqid_ds buf32, *pbuf32 = NULL;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscalls */
+    }
+
+  if (cmd == IPC_SET && buf != NULL)
+    {
+      buf32.msg_qbytes = buf->msg_qbytes;
+      buf32.msg_perm.uid = buf->msg_perm.uid;
+      buf32.msg_perm.gid = buf->msg_perm.gid;
+      buf32.msg_perm.mode = buf->msg_perm.mode;
+    }
+
+  if (cmd == IPC_SET || cmd == IPC_STAT)
+    pbuf32 = &buf32;
+
+#ifdef __ASSUME_DIRECT_SYSVIPC_SYSCALLS
+  result = INLINE_SYSCALL_CALL (msgctl, msqid, cmd | __IPC_64, pbuf32);
+#else
+  result = INLINE_SYSCALL_CALL (ipc, IPCOP_msgctl, msqid, cmd | __IPC_64,
+                                0, pbuf32);
+#endif
+
+  if (cmd == IPC_STAT && result == 0 && buf != NULL)
+    {
+      buf->msg_perm = buf32.msg_perm;
+      buf->msg_stime = buf32.msg_stime;
+      buf->__glibc_reserved1 = buf32.__glibc_reserved1;
+      buf->msg_rtime = buf32.msg_rtime;
+      buf->__glibc_reserved2 = buf32.__glibc_reserved2;
+      buf->msg_ctime = buf32.msg_ctime;
+      buf->__glibc_reserved3 = buf32.__glibc_reserved3;
+      buf->__msg_cbytes = buf32.__msg_cbytes;
+      buf->msg_qnum = buf32.msg_qnum;
+      buf->msg_qbytes = buf32.msg_qbytes;
+      buf->msg_lspid = buf32.msg_lspid;
+      buf->msg_lrpid = buf32.msg_lrpid;
+      buf->__glibc_reserved4 = buf32.__glibc_reserved4;
+      buf->__glibc_reserved5 = buf32.__glibc_reserved5;
+    }
+
+  return result;
+}
diff --git a/sysvipc/Versions b/sysvipc/Versions
index 4c797e2..db023d7 100644
--- a/sysvipc/Versions
+++ b/sysvipc/Versions
@@ -13,6 +13,9 @@ libc {
     # Non-standard function.
     semtimedop;
   }
+  GLIBC_2.27 {
+    __msgctl_t64;
+  }
   GLIBC_PRIVATE {
     # Cancellation point entries.
     __libc_msgrcv; __libc_msgsnd;

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=f8d9f822bff1183568b49cd437ef66c03fbb432b

commit f8d9f822bff1183568b49cd437ef66c03fbb432b
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:01 2017 +0200

    Y2038: add function __mq_timedsend_t64

diff --git a/rt/Makefile b/rt/Makefile
index 75dc650..be06b91 100644
--- a/rt/Makefile
+++ b/rt/Makefile
@@ -38,7 +38,7 @@ shm-routines   := shm_open shm_unlink
 mq-routines    := mq_open mq_close mq_unlink mq_getattr mq_setattr	\
 		  mq_notify mq_send mq_receive mq_timedsend		\
 		  mq_timedreceive					\
-		  mq_timedreceive_t64
+		  mq_timedreceive_t64 mq_timedsend_t64
 
 routines = $(clock-routines)
 
diff --git a/rt/Versions b/rt/Versions
index cb88f71..1eef2e6 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -47,5 +47,6 @@ librt {
     __timerfd_gettime64;
     __timerfd_settime64;
     __mq_timedreceive_t64;
+    __mq_timedsend_t64;
   }
 }
diff --git a/rt/mq_timedsend_t64.c b/rt/mq_timedsend_t64.c
new file mode 100644
index 0000000..d0133ec
--- /dev/null
+++ b/rt/mq_timedsend_t64.c
@@ -0,0 +1,45 @@
+/* Add message pointed by MSG_PTR to message queue MQDES, stop blocking
+   on full message queue if ABS_TIMEOUT expires.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <mqueue.h>
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__mq_timedsend_t64 (mqd_t mqdes, const char *msg_ptr, size_t msg_len,
+	      unsigned int msg_prio, const struct __timespec64 *abs_timeout)
+{
+  struct timespec ts32, *tsp32 = NULL;
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+
+  if (abs_timeout)
+    {
+      ts32.tv_sec = abs_timeout->tv_sec;
+      ts32.tv_nsec = abs_timeout->tv_nsec;
+      tsp32 = &ts32;
+    }
+  return mq_timedsend(mqdes, msg_ptr, msg_len, msg_prio, tsp32);
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=64a13a778995de6653fdb44cbf77238fc6e13092

commit 64a13a778995de6653fdb44cbf77238fc6e13092
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:42:00 2017 +0200

    Y2038: add function __mq_timedreceived_t64

diff --git a/rt/Makefile b/rt/Makefile
index 994891e..75dc650 100644
--- a/rt/Makefile
+++ b/rt/Makefile
@@ -37,7 +37,8 @@ timer-routines := timer_create timer_delete timer_getoverr		\
 shm-routines   := shm_open shm_unlink
 mq-routines    := mq_open mq_close mq_unlink mq_getattr mq_setattr	\
 		  mq_notify mq_send mq_receive mq_timedsend		\
-		  mq_timedreceive
+		  mq_timedreceive					\
+		  mq_timedreceive_t64
 
 routines = $(clock-routines)
 
diff --git a/rt/Versions b/rt/Versions
index 761fbb8..cb88f71 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -46,5 +46,6 @@ librt {
     __timer_settime64;
     __timerfd_gettime64;
     __timerfd_settime64;
+    __mq_timedreceive_t64;
   }
 }
diff --git a/rt/mq_timedreceive_t64.c b/rt/mq_timedreceive_t64.c
new file mode 100644
index 0000000..343cd62
--- /dev/null
+++ b/rt/mq_timedreceive_t64.c
@@ -0,0 +1,46 @@
+/* Receive the oldest from highest priority messages in message queue
+   MQDES, stop waiting if ABS_TIMEOUT expires.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <mqueue.h>
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+ssize_t
+__mq_timedreceive_t64 (mqd_t mqdes, char *__restrict msg_ptr, size_t msg_len,
+		 unsigned int *__restrict msg_prio,
+		 const struct timespec *__restrict abs_timeout)
+{
+  struct timespec ts32, *tsp32 = NULL;
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+
+  if (abs_timeout)
+    {
+      ts32.tv_sec = abs_timeout->tv_sec;
+      ts32.tv_nsec = abs_timeout->tv_nsec;
+      tsp32 = &ts32;
+    }
+  return mq_timedreceive(mqdes, msg_ptr, msg_len, msg_prio, tsp32);
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=54107e8c41868f1fa3d9d960dfdad98be3369585

commit 54107e8c41868f1fa3d9d960dfdad98be3369585
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:59 2017 +0200

    Y2038: add function __settimeofday_t64
    
    Implementing a 64-bit settimeofday requires adding a new
    file to build under time/ and we cannot name that new file
    'settimeofday.c' or it will break the 32-bit settimeofday
    symbol, so we call it 'settimeofday64.c'.

diff --git a/time/settimeofday.c b/sysdeps/unix/sysv/linux/settimeofday64.c
similarity index 62%
copy from time/settimeofday.c
copy to sysdeps/unix/sysv/linux/settimeofday64.c
index 01bf4b0..f290657 100644
--- a/time/settimeofday.c
+++ b/sysdeps/unix/sysv/linux/settimeofday64.c
@@ -1,4 +1,6 @@
-/* Copyright (C) 1991-2018 Free Software Foundation, Inc.
+/* Set the current time of day and timezone information.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -15,17 +17,30 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#include <sysdep.h>
 #include <errno.h>
 #include <sys/time.h>
 
-/* Set the current time of day and timezone information.
-   This call is restricted to the super-user.  */
-int
-__settimeofday (const struct timeval *tv, const struct timezone *tz)
+extern int __y2038_linux_support;
+
+int __settimeofday_t64(const struct __timeval64 *tv,
+                       const struct timezone *tz)
 {
-  __set_errno (ENOSYS);
-  return -1;
-}
-stub_warning (settimeofday)
+  struct timeval tv32;
 
-weak_alias (__settimeofday, settimeofday)
+  if (__y2038_linux_support)
+    {
+      /* TODO: use 64-bit syscall */
+    }
+
+  if (tv && tv->tv_sec > INT_MAX)
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  tv32.tv_sec = tv->tv_sec;
+  tv32.tv_usec = tv->tv_usec;
+
+  return settimeofday(&tv32, tz);
+}
diff --git a/time/Makefile b/time/Makefile
index 0db1206..12f3b35 100644
--- a/time/Makefile
+++ b/time/Makefile
@@ -36,7 +36,8 @@ routines := offtime asctime clock ctime ctime_r difftime \
 	    stime dysize timegm ftime			 \
 	    getdate strptime strptime_l			 \
 	    strftime wcsftime strftime_l wcsftime_l	 \
-	    timespec_get
+	    timespec_get                                 \
+	    settimeofday64
 aux :=	    era alt_digit lc-time-cleanup
 
 tests	:= test_time clocktest tst-posixtz tst-strptime tst_wcsftime \
diff --git a/time/Versions b/time/Versions
index 5d5f736..72d6511 100644
--- a/time/Versions
+++ b/time/Versions
@@ -88,5 +88,6 @@ libc {
     __stime_t64;
     __utimes_t64;
     __gettimeofday_t64;
+    __settimeofday_t64;
   }
 }
diff --git a/time/settimeofday.c b/time/settimeofday.c
index 01bf4b0..027489b 100644
--- a/time/settimeofday.c
+++ b/time/settimeofday.c
@@ -29,3 +29,11 @@ __settimeofday (const struct timeval *tv, const struct timezone *tz)
 stub_warning (settimeofday)
 
 weak_alias (__settimeofday, settimeofday)
+
+int
+__settimeofday_t64 (const struct timeval *tv, const struct timezone *tz)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__settimeofday_t64)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=ff68dd9ccc6d76bb6cdb021b019ae2250dbd7761

commit ff68dd9ccc6d76bb6cdb021b019ae2250dbd7761
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:58 2017 +0200

    Y2038: add function __gettimeofday_t64
    
    Implementing a 64-bit settimeofday requires adding a new
    file to build under time/ and we cannot name that new file
    'settimeofday.c' or it will break the 32-bit settimeofday
    symbol, so we call it 'settimeofday64.c'.

diff --git a/sysdeps/unix/sysv/linux/gettimeofday.c b/sysdeps/unix/sysv/linux/gettimeofday.c
index cd971dd..3001713 100644
--- a/sysdeps/unix/sysv/linux/gettimeofday.c
+++ b/sysdeps/unix/sysv/linux/gettimeofday.c
@@ -37,3 +37,29 @@ __gettimeofday (struct timeval *tv, struct timezone *tz)
 libc_hidden_def (__gettimeofday)
 weak_alias (__gettimeofday, gettimeofday)
 libc_hidden_weak (gettimeofday)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__gettimeofday_t64 (struct __timeval64 *tv, struct timezone *tz)
+{
+  struct timeval tv32;
+  int result;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: implement using 64-bit time syscall */
+    }
+
+  result = INLINE_VSYSCALL (gettimeofday, 2, &tv32, tz);
+
+  if (result == 0)
+    {
+      tv->tv_sec = tv32.tv_sec;
+      tv->tv_usec = tv32.tv_usec;
+    }
+
+  return result;
+}
diff --git a/time/Versions b/time/Versions
index f83bb93..5d5f736 100644
--- a/time/Versions
+++ b/time/Versions
@@ -87,5 +87,6 @@ libc {
     __time_t64;
     __stime_t64;
     __utimes_t64;
+    __gettimeofday_t64;
   }
 }
diff --git a/time/gettimeofday.c b/time/gettimeofday.c
index d1ab9ac..bcd075a 100644
--- a/time/gettimeofday.c
+++ b/time/gettimeofday.c
@@ -32,3 +32,13 @@ weak_alias (__gettimeofday, gettimeofday)
 libc_hidden_weak (gettimeofday)
 
 stub_warning (gettimeofday)
+
+/* 64-bit time version */
+
+int
+__gettimeofday_t64 (struct timeval *tv, struct timezone *tz)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__gettimeofday_t64)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=8449704d0fa0007fd3e26b7edef6c28f3d898224

commit 8449704d0fa0007fd3e26b7edef6c28f3d898224
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:57 2017 +0200

    Y2038: add function __utimes_t64

diff --git a/include/sys/time.h b/include/sys/time.h
index 98f6b6b..b5018c0 100644
--- a/include/sys/time.h
+++ b/include/sys/time.h
@@ -20,6 +20,9 @@
 # include <time/sys/time.h>
 
 # ifndef _ISOMAC
+
+#  include <include/time.h>
+
 extern int __gettimeofday (struct timeval *__tv,
 			   struct timezone *__tz);
 libc_hidden_proto (__gettimeofday)
@@ -39,5 +42,8 @@ extern int __utimes (const char *__file, const struct timeval __tvp[2])
 	attribute_hidden;
 extern int __futimes (int fd, const struct timeval tvp[2]) attribute_hidden;
 
+extern int __utimes_t64 (const char *file, const struct __timeval64 tvp[2])
+	attribute_hidden;
+
 # endif
 #endif
diff --git a/misc/utimes.c b/misc/utimes.c
index 0f37ad7..e48ff97 100644
--- a/misc/utimes.c
+++ b/misc/utimes.c
@@ -37,3 +37,18 @@ __utimes (const char *file, const struct timeval tvp[2])
 weak_alias (__utimes, utimes)
 
 stub_warning (utimes)
+
+int
+__utimes_t64 (const char *file, const struct __timeval64 tvp[2])
+{
+  if (file == NULL)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  __set_errno (ENOSYS);
+  return -1;
+}
+
+stub_warning (__utimes_t64)
diff --git a/sysdeps/unix/sysv/linux/utimes.c b/sysdeps/unix/sysv/linux/utimes.c
index 09790b2..2a2df11 100644
--- a/sysdeps/unix/sysv/linux/utimes.c
+++ b/sysdeps/unix/sysv/linux/utimes.c
@@ -34,3 +34,34 @@ __utimes (const char *file, const struct timeval tvp[2])
 }
 
 weak_alias (__utimes, utimes)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__utimes_t64 (const char *file, const struct __timeval64 tvp[2])
+{
+  struct timeval tv32[2], *tvp32 = NULL;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: implement using 64-bit time syscall */
+    }
+
+  if (tvp != NULL)
+    {
+      if (tvp[0].tv_sec > INT_MAX || tvp[1].tv_sec > INT_MAX)
+        {
+          __set_errno(EOVERFLOW);
+          return -1;
+        }
+      tv32[0].tv_sec = tvp[0].tv_sec;
+      tv32[0].tv_usec = tvp[0].tv_usec;
+      tv32[1].tv_sec = tvp[1].tv_sec;
+      tv32[1].tv_usec = tvp[1].tv_usec;
+      tvp32 = tv32;
+    }
+
+  return INLINE_SYSCALL (utimes, 2, file, tvp32);
+}
diff --git a/time/Versions b/time/Versions
index 510a52d..f83bb93 100644
--- a/time/Versions
+++ b/time/Versions
@@ -86,5 +86,6 @@ libc {
     __lutimes64;
     __time_t64;
     __stime_t64;
+    __utimes_t64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=4d9bde6de4536fdbc7e42e18581868dacc1eecf4

commit 4d9bde6de4536fdbc7e42e18581868dacc1eecf4
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:56 2017 +0200

    Y2038: add function __stime_t64
    
    These implementations use only 32-bit time kernel syscalls.
    
    Therefore, stime() will always set errno to EOVERFLOW and return -1 for dates beyond Y2038.

diff --git a/sysdeps/unix/sysv/linux/stime.c b/sysdeps/unix/sysv/linux/stime.c
new file mode 100644
index 0000000..43f9a93
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/stime.c
@@ -0,0 +1,72 @@
+/* Set the system clock on a Linux kernel 
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stddef.h>		/* For NULL.  */
+#include <sys/time.h>
+#include <time.h>
+
+/* Set the system clock to *WHEN.  */
+
+int
+stime (const time_t *when)
+{
+  struct timeval tv;
+
+  if (when == NULL)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  tv.tv_sec = *when;
+  tv.tv_usec = 0;
+  return __settimeofday (&tv, (struct timezone *) 0);
+}
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__stime_t64 (const __time64_t *when)
+{
+  struct timeval tv32;
+
+  if (when == NULL)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  if (__y2038_linux_support)
+  {
+    /* TODO: implement 64-bit-time syscall case */
+  }
+
+  if (*when > INT_MAX)
+    {
+      __set_errno (EOVERFLOW);
+      return -1;
+    }
+
+  tv32.tv_sec = *when;
+  tv32.tv_usec = 0;
+  return __settimeofday (&tv32, (struct timezone *) 0);
+}
diff --git a/time/Versions b/time/Versions
index e09465b..510a52d 100644
--- a/time/Versions
+++ b/time/Versions
@@ -85,5 +85,6 @@ libc {
     __futimes64;
     __lutimes64;
     __time_t64;
+    __stime_t64;
   }
 }
diff --git a/time/stime.c b/time/stime.c
index 0378e23..8ada926 100644
--- a/time/stime.c
+++ b/time/stime.c
@@ -35,3 +35,20 @@ stime (const time_t *when)
 }
 
 stub_warning (stime)
+
+/* 64-bit time version */
+
+int
+__stime_t64 (const __time64_t *when)
+{
+  if (when == NULL)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  __set_errno (ENOSYS);
+  return -1;
+}
+
+stub_warning (__stime_t64)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=6ae7326e2e6ad4e5ba0651824cea9bcbd372493f

commit 6ae7326e2e6ad4e5ba0651824cea9bcbd372493f
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:55 2017 +0200

    Y2038: add function __time_t64
    
    These implementations use only 32-bit time kernel syscalls.
    
    Therefore, stime() will always set errno to EOVERFLOW and return -1 for dates beyond Y2038.

diff --git a/sysdeps/posix/time.c b/sysdeps/posix/time.c
index 6d0bb4b..1a3fd4e 100644
--- a/sysdeps/posix/time.c
+++ b/sysdeps/posix/time.c
@@ -38,3 +38,29 @@ time (time_t *t)
   return result;
 }
 libc_hidden_def (time)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+__time64_t
+__time_t64 (__time64_t *t)
+{
+  struct timeval tv32;
+  __time64_t result;
+
+  if (__y2038_linux_support)
+  {
+    /* TODO: implement using 64-bit time syscall */
+  }
+
+  if (__gettimeofday (&tv32, (struct timezone *) NULL))
+    result = (__time64_t) -1;
+  else
+    result = (__time64_t) tv32.tv_sec;
+
+  if (t != NULL)
+    *t = result;
+
+  return result;
+}
diff --git a/sysdeps/unix/sysv/linux/time.c b/sysdeps/unix/sysv/linux/time.c
index 69116af..b68c0b7 100644
--- a/sysdeps/unix/sysv/linux/time.c
+++ b/sysdeps/unix/sysv/linux/time.c
@@ -34,6 +34,28 @@ time (time_t *t)
 }
 libc_hidden_def (time)
 
+/* 64-BIT TIME VERSION */
+
+extern int __y2038_linux_support;
+
+__time64_t
+__time_t64 (__time64_t *t)
+{
+  INTERNAL_SYSCALL_DECL (err);
+  __time64_t res;
+
+  if (__y2038_linux_support)
+    {
+      /* TODO: implement using 64-bit time syscall */
+    }
+
+  res = INTERNAL_SYSCALL (time, err, 1, NULL);
+  /* There cannot be any error.  */
+  if (t != NULL)
+    *t = res;
+  return res;
+}
+
 #else
 
 # include <sysdeps/posix/time.c>
diff --git a/time/Versions b/time/Versions
index e626dff..e09465b 100644
--- a/time/Versions
+++ b/time/Versions
@@ -84,5 +84,6 @@ libc {
     __sigtimedwait64;
     __futimes64;
     __lutimes64;
+    __time_t64;
   }
 }
diff --git a/time/time.c b/time/time.c
index 4996d26..2b296b7 100644
--- a/time/time.c
+++ b/time/time.c
@@ -31,3 +31,16 @@ time (time_t *timer)
 libc_hidden_def (time)
 
 stub_warning (time)
+
+/* 64-bit time version */
+
+__time64_t
+__time_t64 (__time64_ *timer)
+{
+  __set_errno (ENOSYS);
+
+  if (timer != NULL)
+    *timer = (__time64_t) -1;
+  return (__time64_t) -1;
+}
+libc_hidden_def (__time_t64)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=746c0110edacc570e1d345de6736d3825db04dfe

commit 746c0110edacc570e1d345de6736d3825db04dfe
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:54 2017 +0200

    Y2038: add function __fstatat64_t64 (and __fxstatat_t64)
    
    There is no Y2038-proof linux struct stat for now, so these
    implementations just use the existing syscalls and convert from kernel
    32-bit-time struct stat64 to GLIBC Y2038-ready struct __stat64_t64.

diff --git a/include/sys/stat.h b/include/sys/stat.h
index 97b2692..164c6fd 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -45,6 +45,10 @@ extern int __xstat64_t64 (int __ver, const char *__filename,
                           struct __stat64_t64 *__stat_buf);
 extern int __lxstat64_t64 (int __ver, const char *__filename,
 		           struct __stat64_t64 *__stat_buf);
+extern int __fxstatat64_t64 (int __ver, int __fildes,
+                             const char *__filename,
+			     struct __stat64_t64 *__stat_buf,
+                             int __flag);
 #if IS_IN (libc) || (IS_IN (rtld) && !defined NO_RTLD_HIDDEN)
 hidden_proto (__fxstat)
 hidden_proto (__fxstat64)
diff --git a/io/Versions b/io/Versions
index c35e80a..d4a3b40 100644
--- a/io/Versions
+++ b/io/Versions
@@ -134,5 +134,6 @@ libc {
     __fxstat64_t64;
     __xstat64_t64;
     __lxstat64_t64;
+    __fxstatat64_t64;
   }
 }
diff --git a/io/fstatat64.c b/io/fstatat64.c
index f4f46a9..9a427d5 100644
--- a/io/fstatat64.c
+++ b/io/fstatat64.c
@@ -50,3 +50,10 @@ fstatat64 (int fd, const char *file, struct stat64 *buf, int flag)
 {
   return __fxstatat64 (_STAT_VER, fd, file, buf, flag);
 }
+
+int
+attribute_hidden
+__fstatat64_t64 (int fd, const char *file, struct __stat64_t64 *buf, int flag)
+{
+  return __fxstatat64_t64 (_STAT_VER, fd, file, buf, flag);
+}
diff --git a/sysdeps/unix/sysv/linux/fxstatat64.c b/sysdeps/unix/sysv/linux/fxstatat64.c
index baa9a60..6723a1a 100644
--- a/sysdeps/unix/sysv/linux/fxstatat64.c
+++ b/sysdeps/unix/sysv/linux/fxstatat64.c
@@ -26,6 +26,8 @@
 #include <sysdep.h>
 #include <sys/syscall.h>
 
+#include <xstatconv.h>
+
 /* Get information about the file NAME in BUF.  */
 
 int
@@ -45,3 +47,55 @@ __fxstatat64 (int vers, int fd, const char *file, struct stat64 *st, int flag)
 								      err));
 }
 libc_hidden_def (__fxstatat64)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__fxstatat64_t64 (int vers, int fd, const char *file, struct __stat64_t64 *buf, int flag)
+{
+  if (__glibc_unlikely (vers != _STAT_VER_LINUX))
+    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+
+  int result;
+  struct stat64 st64;
+  INTERNAL_SYSCALL_DECL (err);
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscalls when they become available
+    }
+
+  result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag);
+  if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+    {
+      buf->st_dev          = st64.st_dev;
+      buf->__pad1		     = st64.__pad1;
+    
+      buf->__st_ino        = st64.__st_ino;
+      buf->st_mode         = st64.st_mode;
+      buf->st_nlink        = st64.st_nlink;
+      buf->st_uid          = st64.st_uid;		 
+      buf->st_gid          = st64.st_gid;		 
+      buf->st_rdev         = st64.st_rdev;		 
+      buf->__pad2          = st64.__pad2;
+      buf->st_size         = st64.st_size;		 
+      buf->st_blksize      = st64.st_blksize;
+    
+      buf->st_blocks       = st64.st_blocks;		
+      buf->st_atim.tv_sec  = st64.st_atim.tv_sec;	
+      buf->st_atim.tv_nsec = st64.st_atim.tv_nsec;	
+      buf->st_mtim.tv_sec  = st64.st_mtim.tv_sec;	
+      buf->st_mtim.tv_nsec = st64.st_mtim.tv_nsec;	
+      buf->st_ctim.tv_sec  = st64.st_ctim.tv_sec;	
+      buf->st_ctim.tv_nsec = st64.st_ctim.tv_nsec;	
+    
+      buf->st_ino          = st64.st_ino;
+    
+      return 0;
+    }
+  else
+    return INLINE_SYSCALL_ERROR_RETURN_VALUE (INTERNAL_SYSCALL_ERRNO (result,
+								      err));
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=995e1c8baceee585c27a64187ccf599e8b03ddeb

commit 995e1c8baceee585c27a64187ccf599e8b03ddeb
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:53 2017 +0200

    Y2038: add function __lstat64_t64 (and __lxstat64_t64)
    
    There is no Y2038-proof linux struct stat for now, so these
    implementations just use the existing syscalls and convert from kernel
    32-bit-time struct stat64 to GLIBC Y2038-ready struct __stat64_t64.

diff --git a/include/sys/stat.h b/include/sys/stat.h
index 48851b0..97b2692 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -43,6 +43,8 @@ extern int __fxstat64_t64 (int __ver, int __fildes,
                            struct __stat64_t64 *__stat_buf);
 extern int __xstat64_t64 (int __ver, const char *__filename,
                           struct __stat64_t64 *__stat_buf);
+extern int __lxstat64_t64 (int __ver, const char *__filename,
+		           struct __stat64_t64 *__stat_buf);
 #if IS_IN (libc) || (IS_IN (rtld) && !defined NO_RTLD_HIDDEN)
 hidden_proto (__fxstat)
 hidden_proto (__fxstat64)
diff --git a/io/Versions b/io/Versions
index a9f2b49..c35e80a 100644
--- a/io/Versions
+++ b/io/Versions
@@ -133,5 +133,6 @@ libc {
   GLIBC_Y2038 {
     __fxstat64_t64;
     __xstat64_t64;
+    __lxstat64_t64;
   }
 }
diff --git a/io/lstat64.c b/io/lstat64.c
index c3eb7a7..549208c 100644
--- a/io/lstat64.c
+++ b/io/lstat64.c
@@ -50,3 +50,10 @@ lstat64 (const char *file, struct stat64 *buf)
 {
   return __lxstat64 (_STAT_VER, file, buf);
 }
+
+int
+attribute_hidden
+__lstat64_t64 (const char *file, struct __stat64_t64 *buf)
+{
+  return __lxstat64_t64 (_STAT_VER, file, buf);
+}
diff --git a/sysdeps/unix/sysv/linux/lxstat64.c b/sysdeps/unix/sysv/linux/lxstat64.c
index d05fa14..b7dbc10 100644
--- a/sysdeps/unix/sysv/linux/lxstat64.c
+++ b/sysdeps/unix/sysv/linux/lxstat64.c
@@ -24,6 +24,8 @@
 #include <sysdep.h>
 #include <sys/syscall.h>
 
+#include <xstatconv.h>
+
 #include <kernel-features.h>
 
 /* Get information about the file NAME in BUF.  */
@@ -50,3 +52,51 @@ hidden_ver (___lxstat64, __lxstat64)
 strong_alias (___lxstat64, __lxstat64);
 hidden_def (__lxstat64)
 #endif
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__lxstat64_t64 (int vers, const char *name, struct __stat64_t64 *buf)
+{
+  int result;
+  struct stat64 st64;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscalls when they become available
+    }
+
+  result = INLINE_SYSCALL (lstat64, 2, name, &st64);
+#if defined _HAVE_STAT64___ST_INO && !__ASSUME_ST_INO_64_BIT
+  if (__builtin_expect (!result, 1) && st64.__st_ino != (__ino_t) st64.st_ino)
+    st64.st_ino = st64.__st_ino;
+#endif
+  if (!result)
+    {
+      buf->st_dev          = st64.st_dev;
+      buf->__pad1		     = st64.__pad1;
+    
+      buf->__st_ino        = st64.__st_ino;
+      buf->st_mode         = st64.st_mode;
+      buf->st_nlink        = st64.st_nlink;
+      buf->st_uid          = st64.st_uid;		 
+      buf->st_gid          = st64.st_gid;		 
+      buf->st_rdev         = st64.st_rdev;		 
+      buf->__pad2          = st64.__pad2;
+      buf->st_size         = st64.st_size;		 
+      buf->st_blksize      = st64.st_blksize;
+    
+      buf->st_blocks       = st64.st_blocks;		
+      buf->st_atim.tv_sec  = st64.st_atim.tv_sec;	
+      buf->st_atim.tv_nsec = st64.st_atim.tv_nsec;	
+      buf->st_mtim.tv_sec  = st64.st_mtim.tv_sec;	
+      buf->st_mtim.tv_nsec = st64.st_mtim.tv_nsec;	
+      buf->st_ctim.tv_sec  = st64.st_ctim.tv_sec;	
+      buf->st_ctim.tv_nsec = st64.st_ctim.tv_nsec;	
+    
+      buf->st_ino          = st64.st_ino;
+    }
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=964d2be383ea3267501f95a734c403e17ecdf73c

commit 964d2be383ea3267501f95a734c403e17ecdf73c
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:52 2017 +0200

    Y2038: add function __stat64_t64 (and __xstat64_t64)
    
    There is no Y2038-proof linux struct stat for now, so these
    implementations just use the existing syscalls and convert from kernel
    32-bit-time struct stat64 to GLIBC Y2038-ready struct __stat64_t64.

diff --git a/include/sys/stat.h b/include/sys/stat.h
index fe7055f..48851b0 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -41,6 +41,8 @@ extern int __mknod (const char *__path,
 		    __mode_t __mode, __dev_t __dev);
 extern int __fxstat64_t64 (int __ver, int __fildes,
                            struct __stat64_t64 *__stat_buf);
+extern int __xstat64_t64 (int __ver, const char *__filename,
+                          struct __stat64_t64 *__stat_buf);
 #if IS_IN (libc) || (IS_IN (rtld) && !defined NO_RTLD_HIDDEN)
 hidden_proto (__fxstat)
 hidden_proto (__fxstat64)
diff --git a/io/Versions b/io/Versions
index 6e781f0..a9f2b49 100644
--- a/io/Versions
+++ b/io/Versions
@@ -132,5 +132,6 @@ libc {
   # the right place
   GLIBC_Y2038 {
     __fxstat64_t64;
+    __xstat64_t64;
   }
 }
diff --git a/io/stat64.c b/io/stat64.c
index 5020551..bca8b73 100644
--- a/io/stat64.c
+++ b/io/stat64.c
@@ -50,3 +50,10 @@ stat64 (const char *file, struct stat64 *buf)
 {
   return __xstat64 (_STAT_VER, file, buf);
 }
+
+int
+attribute_hidden
+__stat64_t64 (const char *file, struct __stat64_t64 *buf)
+{
+  return __xstat64_t64 (_STAT_VER, file, buf);
+}
diff --git a/sysdeps/unix/sysv/linux/xstat64.c b/sysdeps/unix/sysv/linux/xstat64.c
index afc9ba2..166da13 100644
--- a/sysdeps/unix/sysv/linux/xstat64.c
+++ b/sysdeps/unix/sysv/linux/xstat64.c
@@ -24,6 +24,8 @@
 #include <sysdep.h>
 #include <sys/syscall.h>
 
+#include <xstatconv.h>
+
 #include <kernel-features.h>
 
 /* Get information about the file NAME in BUF.  */
@@ -52,3 +54,51 @@ hidden_ver (___xstat64, __xstat64)
 strong_alias (___xstat64, __xstat64)
 hidden_def (__xstat64)
 #endif
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__xstat64_t64 (int vers, const char *name, struct __stat64_t64 *buf)
+{
+  int result;
+  struct stat64 st64;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscalls when they become available
+    }
+
+  result = INLINE_SYSCALL (stat64, 2, name, &st64);
+#if defined _HAVE_STAT64___ST_INO && !__ASSUME_ST_INO_64_BIT
+  if (__builtin_expect (!result, 1) && st64.__st_ino != (__ino_t) st64.st_ino)
+    st64.st_ino = st64.__st_ino;
+#endif
+  if (!result)
+    {
+      buf->st_dev          = st64.st_dev;
+      buf->__pad1		     = st64.__pad1;
+    
+      buf->__st_ino        = st64.__st_ino;
+      buf->st_mode         = st64.st_mode;
+      buf->st_nlink        = st64.st_nlink;
+      buf->st_uid          = st64.st_uid;		 
+      buf->st_gid          = st64.st_gid;		 
+      buf->st_rdev         = st64.st_rdev;		 
+      buf->__pad2          = st64.__pad2;
+      buf->st_size         = st64.st_size;		 
+      buf->st_blksize      = st64.st_blksize;
+    
+      buf->st_blocks       = st64.st_blocks;		
+      buf->st_atim.tv_sec  = st64.st_ctim.tv_sec;	
+      buf->st_atim.tv_nsec = st64.st_atim.tv_nsec;	
+      buf->st_mtim.tv_sec  = st64.st_mtim.tv_sec;	
+      buf->st_mtim.tv_nsec = st64.st_mtim.tv_nsec;	
+      buf->st_ctim.tv_sec  = st64.st_ctim.tv_sec;	
+      buf->st_ctim.tv_nsec = st64.st_ctim.tv_nsec;	
+    
+      buf->st_ino          = st64.st_ino;
+    }
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=470885d17c58348b7bcfafc29df406ece3210ea5

commit 470885d17c58348b7bcfafc29df406ece3210ea5
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:51 2017 +0200

    Y2038: add function __fstat64_t64 (and __fxstat64_t64)
    
    There is no Y2038-proof linux struct stat for now, so these
    implementations just use the existing syscalls and convert from kernel
    32-bit-time struct stat64 to GLIBC Y2038-ready struct __stat64_t64.

diff --git a/include/sys/stat.h b/include/sys/stat.h
index 7344112..fe7055f 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -39,6 +39,8 @@ extern int __mkdir (const char *__path, __mode_t __mode);
 libc_hidden_proto (__mkdir)
 extern int __mknod (const char *__path,
 		    __mode_t __mode, __dev_t __dev);
+extern int __fxstat64_t64 (int __ver, int __fildes,
+                           struct __stat64_t64 *__stat_buf);
 #if IS_IN (libc) || (IS_IN (rtld) && !defined NO_RTLD_HIDDEN)
 hidden_proto (__fxstat)
 hidden_proto (__fxstat64)
diff --git a/io/Versions b/io/Versions
index 98898cb..6e781f0 100644
--- a/io/Versions
+++ b/io/Versions
@@ -128,4 +128,9 @@ libc {
   GLIBC_2.27 {
     copy_file_range;
   }
+  # Y2038 symbols are given their own version until they can be put in
+  # the right place
+  GLIBC_Y2038 {
+    __fxstat64_t64;
+  }
 }
diff --git a/io/fstat64.c b/io/fstat64.c
index 0f4de02..60f8a74 100644
--- a/io/fstat64.c
+++ b/io/fstat64.c
@@ -50,3 +50,10 @@ fstat64 (int fd, struct stat64 *buf)
 {
   return __fxstat64 (_STAT_VER, fd, buf);
 }
+
+int
+attribute_hidden
+__fstat64_t64 (int fd, struct __stat64_t64 *buf)
+{
+  return __fxstat64_t64 (_STAT_VER, fd, buf);
+}
diff --git a/sysdeps/unix/sysv/linux/fxstat64.c b/sysdeps/unix/sysv/linux/fxstat64.c
index 0d05389..8a8ca9b 100644
--- a/sysdeps/unix/sysv/linux/fxstat64.c
+++ b/sysdeps/unix/sysv/linux/fxstat64.c
@@ -24,6 +24,8 @@
 #include <sysdep.h>
 #include <sys/syscall.h>
 
+#include <xstatconv.h>
+
 #include <kernel-features.h>
 
 /* Get information about the file FD in BUF.  */
@@ -51,3 +53,51 @@ hidden_ver (___fxstat64, __fxstat64)
 strong_alias (___fxstat64, __fxstat64)
 hidden_def (__fxstat64)
 #endif
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__fxstat64_t64 (int vers, int fd, struct __stat64_t64 *buf)
+{
+  int result;
+  struct stat64 st64;
+
+  if (__y2038_linux_support)
+    {
+      // TODO: use 64-bit syscalls when they become available
+    }
+
+  result = INLINE_SYSCALL (fstat64, 2, fd, &st64);
+#if defined _HAVE_STAT64___ST_INO && !__ASSUME_ST_INO_64_BIT
+  if (__builtin_expect (!result, 1) && st64.__st_ino != (__ino_t) st64.st_ino)
+    st64.st_ino = st64.__st_ino;
+#endif
+  if (!result)
+    {
+      buf->st_dev          = st64.st_dev;
+      buf->__pad1		     = st64.__pad1;
+    
+      buf->__st_ino        = st64.__st_ino;
+      buf->st_mode         = st64.st_mode;
+      buf->st_nlink        = st64.st_nlink;
+      buf->st_uid          = st64.st_uid;		 
+      buf->st_gid          = st64.st_gid;		 
+      buf->st_rdev         = st64.st_rdev;		 
+      buf->__pad2          = st64.__pad2;
+      buf->st_size         = st64.st_size;		 
+      buf->st_blksize      = st64.st_blksize;
+    
+      buf->st_blocks       = st64.st_blocks;		
+      buf->st_atim.tv_sec  = st64.st_atim.tv_sec;	
+      buf->st_atim.tv_nsec = st64.st_atim.tv_nsec;	
+      buf->st_mtim.tv_sec  = st64.st_mtim.tv_sec;	
+      buf->st_mtim.tv_nsec = st64.st_mtim.tv_nsec;	
+      buf->st_ctim.tv_sec  = st64.st_ctim.tv_sec;	
+      buf->st_ctim.tv_nsec = st64.st_ctim.tv_nsec;	
+    
+      buf->st_ino          = st64.st_ino;
+    }
+  return result;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=0b055026de0083d3379a4e7bad84e242595bfb79

commit 0b055026de0083d3379a4e7bad84e242595bfb79
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:50 2017 +0200

    Y2038: add struct __stat64_t64

diff --git a/include/sys/stat.h b/include/sys/stat.h
index b82d452..7344112 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -1,7 +1,32 @@
 #ifndef _SYS_STAT_H
 #include <io/sys/stat.h>
+#include <include/time.h>
 
 #ifndef _ISOMAC
+
+/* Used for 64-bit time implementations */
+struct __stat64_t64
+  {
+    __dev_t st_dev;			/* Device.  */
+    unsigned int __pad1;
+
+    __ino_t __st_ino;			/* 32bit file serial number.	*/
+    __mode_t st_mode;			/* File mode.  */
+    __nlink_t st_nlink;			/* Link count.  */
+    __uid_t st_uid;			/* User ID of the file's owner.	*/
+    __gid_t st_gid;			/* Group ID of the file's group.*/
+    __dev_t st_rdev;			/* Device number, if device.  */
+    unsigned int __pad2;
+    __off64_t st_size;			/* Size of file, in bytes.  */
+    __blksize_t st_blksize;		/* Optimal block size for I/O.  */
+
+    __blkcnt64_t st_blocks;		/* Number 512-byte blocks allocated. */
+    struct __timespec64 st_atim;		/* Time of last access.  */
+    struct __timespec64 st_mtim;		/* Time of last modification.  */
+    struct __timespec64 st_ctim;		/* Time of last status change.  */
+    __ino64_t st_ino;			/* File serial number.		*/
+  };
+
 /* Now define the internal interfaces. */
 extern int __stat (const char *__file, struct stat *__buf);
 extern int __fstat (int __fd, struct stat *__buf);

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=bd7f2fdec0cde8e634ab2a112bd4e0348b4dc80e

commit bd7f2fdec0cde8e634ab2a112bd4e0348b4dc80e
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:49 2017 +0200

    Y2038: add function __timerfd_settime64
    
    Note: this tests the return value of function __y2038_kernel_support()
    rather than the value of variable __y2038_linux_upport, because the
    latter cannot be accessed from librt while the former can.

diff --git a/rt/Makefile b/rt/Makefile
index c0aad5d..994891e 100644
--- a/rt/Makefile
+++ b/rt/Makefile
@@ -33,7 +33,7 @@ clock-routines := get_clockfreq clock_getcpuclockid			\
 		  clock_nanosleep
 timer-routines := timer_create timer_delete timer_getoverr		\
 		  timer_gettime timer_settime                           \
-		  timerfd_gettime64
+		  timerfd_gettime64 timerfd_settime64
 shm-routines   := shm_open shm_unlink
 mq-routines    := mq_open mq_close mq_unlink mq_getattr mq_setattr	\
 		  mq_notify mq_send mq_receive mq_timedsend		\
diff --git a/rt/Versions b/rt/Versions
index 254e36d..761fbb8 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -45,5 +45,6 @@ librt {
     __timer_gettime64;
     __timer_settime64;
     __timerfd_gettime64;
+    __timerfd_settime64;
   }
 }
diff --git a/rt/timerfd_settime64.c b/rt/timerfd_settime64.c
new file mode 100644
index 0000000..b253f62
--- /dev/null
+++ b/rt/timerfd_settime64.c
@@ -0,0 +1,30 @@
+/* Set timer TIMERID to VALUE, returning old value in OVALUE.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <time.h>
+
+int
+__timerfd_settime64 (int fd, int flags, const struct itimerspec *value,
+	       struct itimerspec *ovalue)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__timerfd_settime64)
diff --git a/sysdeps/unix/sysv/linux/timerfd_settime64.c b/sysdeps/unix/sysv/linux/timerfd_settime64.c
new file mode 100644
index 0000000..c124f03
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/timerfd_settime64.c
@@ -0,0 +1,83 @@
+/* Set timer TIMERID to VALUE, returning old value in OVALUE.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sysdep.h>
+#include "kernel-posix-timers.h"
+
+int
+__timerfd_settime64 (int fd, int flags, const struct __itimerspec64 *value,
+	             struct __itimerspec64 *ovalue)
+{
+  int res;
+  struct itimerspec value32;
+  struct itimerspec ovalue32;
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timerfd_settime64
+  struct __itimerspec64 value64;
+#endif
+
+  if (value == NULL)
+    return EFAULT;
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timerfd_settime64
+  if (__y2038_kernel_support())
+    {
+      value64.it_value.tv_sec = value->it_value.tv_sec;
+      value64.it_value.tv_nsec = value->it_value.tv_nsec;
+      value64.it_value.tv_pad = 0;
+      value64.it_interval.tv_sec = value->it_interval.tv_sec;
+      value64.it_interval.tv_nsec = value->it_interval.tv_nsec;
+      value64.it_interval.tv_pad = 0;
+      
+      res = INLINE_SYSCALL (timerfd_settime64, 4, fd, flags,
+                             &value64, ovalue);
+      if (res ==0 || errno != ENOSYS)
+        return res;
+    }
+#endif
+
+  if (value->it_value.tv_sec > INT_MAX
+      || value->it_interval.tv_sec > INT_MAX)
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  value32.it_value.tv_sec = value->it_value.tv_sec;
+  value32.it_value.tv_nsec = value->it_value.tv_nsec;
+  value32.it_interval.tv_sec = value->it_interval.tv_sec;
+  value32.it_interval.tv_nsec = value->it_interval.tv_nsec;
+
+  res = INLINE_SYSCALL (timerfd_settime, 4, fd, flags,
+                        &value32, &ovalue32);
+
+  if (res == 0 && ovalue != NULL)
+    {
+      ovalue->it_value.tv_sec = ovalue32.it_value.tv_sec;
+      ovalue->it_value.tv_nsec = ovalue32.it_value.tv_nsec;
+      ovalue->it_interval.tv_sec = ovalue32.it_interval.tv_sec;
+      ovalue->it_interval.tv_nsec = ovalue32.it_interval.tv_nsec;
+    }
+
+  return res;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=57fb15c9e48bcb46659d83cff2754235c81b8bc9

commit 57fb15c9e48bcb46659d83cff2754235c81b8bc9
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:48 2017 +0200

    Y2038: add function __timerfd_gettime64
    
    Note: this tests the return value of function __y2038_kernel_support()
    rather than the value of variable __y2038_linux_upport, because the
    latter cannot be accessed from librt while the former can.

diff --git a/rt/Makefile b/rt/Makefile
index 6d6b896..c0aad5d 100644
--- a/rt/Makefile
+++ b/rt/Makefile
@@ -32,7 +32,8 @@ clock-routines := get_clockfreq clock_getcpuclockid			\
 		  clock_getres clock_gettime clock_settime		\
 		  clock_nanosleep
 timer-routines := timer_create timer_delete timer_getoverr		\
-		  timer_gettime timer_settime
+		  timer_gettime timer_settime                           \
+		  timerfd_gettime64
 shm-routines   := shm_open shm_unlink
 mq-routines    := mq_open mq_close mq_unlink mq_getattr mq_setattr	\
 		  mq_notify mq_send mq_receive mq_timedsend		\
diff --git a/rt/Versions b/rt/Versions
index 4933197..254e36d 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -44,5 +44,6 @@ librt {
   GLIBC_Y2038 {
     __timer_gettime64;
     __timer_settime64;
+    __timerfd_gettime64;
   }
 }
diff --git a/rt/timerfd_gettime64.c b/rt/timerfd_gettime64.c
new file mode 100644
index 0000000..9209d7c
--- /dev/null
+++ b/rt/timerfd_gettime64.c
@@ -0,0 +1,29 @@
+/* Get current value of timer TIMERID and store it in VALUE.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <time.h>
+
+int
+__timerfd_gettime64 (int fd, struct itimerspec *value)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__timerfd_gettime64)
diff --git a/sysdeps/unix/sysv/linux/timerfd_gettime64.c b/sysdeps/unix/sysv/linux/timerfd_gettime64.c
new file mode 100644
index 0000000..ee81443
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/timerfd_gettime64.c
@@ -0,0 +1,53 @@
+/* Get current value of timer TIMERID and store it in VALUE.
+
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sysdep.h>
+#include "kernel-posix-timers.h"
+
+int
+__timerfd_gettime64 (int fd, struct __itimerspec64 *value)
+{
+  int res;
+  struct itimerspec value32;
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timerfd_gettime64
+  if (__y2038_kernel_support())
+    {
+      res = INLINE_SYSCALL (timerfd_gettime64, 2, fd, value);
+      if (res == 0 || errno != ENOSYS)
+        return res;
+    }
+#endif
+
+  res = INLINE_SYSCALL (timerfd_gettime, 2, fd, &value32);
+
+  if (res == 0)
+    {
+      value->it_value.tv_sec = value32.it_value.tv_sec;
+      value->it_value.tv_nsec = value32.it_value.tv_nsec;
+      value->it_interval.tv_sec = value32.it_interval.tv_sec;
+      value->it_interval.tv_nsec = value32.it_interval.tv_nsec;
+    }
+
+  return res;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d8feceb1bbbe6760b6b73aa879a16d29d8e91d16

commit d8feceb1bbbe6760b6b73aa879a16d29d8e91d16
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:47 2017 +0200

    Y2038: add function __timer_settime64

diff --git a/rt/Versions b/rt/Versions
index f07c8e2..4933197 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -43,5 +43,6 @@ librt {
 
   GLIBC_Y2038 {
     __timer_gettime64;
+    __timer_settime64;
   }
 }
diff --git a/sysdeps/unix/sysv/linux/timer_settime.c b/sysdeps/unix/sysv/linux/timer_settime.c
index 7c938bd..7014973 100644
--- a/sysdeps/unix/sysv/linux/timer_settime.c
+++ b/sysdeps/unix/sysv/linux/timer_settime.c
@@ -41,3 +41,64 @@ timer_settime (timer_t timerid, int flags, const struct itimerspec *value,
 
   return res;
 }
+
+/* 64-bit time version */
+
+int
+__timer_settime64 (timer_t timerid, int flags, const struct itimerspec *value,
+                   struct itimerspec *ovalue)
+{
+  int res;
+  struct timer *kt = (struct timer *) timerid;
+  struct itimerspec value32, ovalue32;
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timer_settime64
+  struct __itimerspec64 value64;
+#endif
+
+  if (value == NULL)
+  {
+    __set_errno(EFAULT);
+    return -1;
+  }
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timer_settime64
+  if (__y2038_kernel_support())
+    {
+      value64.it_value.tv_sec = value->it_value.tv_sec;
+      value64.it_value.tv_nsec = value->it_value.tv_nsec;
+      value64.it_value.tv_pad = 0;
+      value64.it_interval.tv_sec = value->it_interval.tv_sec;
+      value64.it_interval.tv_nsec = value->it_interval.tv_nsec;
+      value64.it_interval.tv_pad = 0;
+    
+      res = INLINE_SYSCALL (timer_settime64, 4, kt->ktimerid, flags,
+                            &value64, ovalue);
+      if (res == 0 || errno != ENOSYS)
+        return res;
+    }
+#endif
+
+  if (value->it_value.tv_sec > INT_MAX
+      || value->it_interval.tv_sec > INT_MAX)
+    return EOVERFLOW;
+
+  value32.it_value.tv_sec = value->it_value.tv_sec;
+  value32.it_value.tv_nsec = value->it_value.tv_nsec;
+  value32.it_interval.tv_sec = value->it_interval.tv_sec;
+  value32.it_interval.tv_nsec = value->it_interval.tv_nsec;
+
+  res = INLINE_SYSCALL (timer_settime, 4, kt->ktimerid, flags,
+        &value32, &ovalue32);
+
+  if (res == 0 && ovalue != NULL)
+    {
+      ovalue->it_value.tv_sec = ovalue32.it_value.tv_sec;
+      ovalue->it_value.tv_nsec = ovalue32.it_value.tv_nsec;
+      ovalue->it_interval.tv_sec = ovalue32.it_interval.tv_sec;
+      ovalue->it_interval.tv_nsec = ovalue32.it_interval.tv_nsec;
+    }
+
+  return res;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=97af2a7373468e3a343c547d27d6c99f06064c86

commit 97af2a7373468e3a343c547d27d6c99f06064c86
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:46 2017 +0200

    Y2038: add function __timer_gettime64

diff --git a/rt/Versions b/rt/Versions
index 91e3fd2..f07c8e2 100644
--- a/rt/Versions
+++ b/rt/Versions
@@ -37,4 +37,11 @@ librt {
   GLIBC_2.7 {
    __mq_open_2;
   }
+
+  # Y2038 symbols are given their own version until they can be put in
+  # the right place
+
+  GLIBC_Y2038 {
+    __timer_gettime64;
+  }
 }
diff --git a/sysdeps/unix/sysv/linux/timer_gettime.c b/sysdeps/unix/sysv/linux/timer_gettime.c
index 10a19d9..a5639e8 100644
--- a/sysdeps/unix/sysv/linux/timer_gettime.c
+++ b/sysdeps/unix/sysv/linux/timer_gettime.c
@@ -39,3 +39,35 @@ timer_gettime (timer_t timerid, struct itimerspec *value)
 
   return res;
 }
+
+/* 64-bit time version */
+
+int
+__timer_gettime64 (timer_t timerid, struct __itimerspec64 *value)
+{
+  struct itimerspec value32;
+  struct timer *kt = (struct timer *) timerid;
+  int res;
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_timer_gettime64
+  if (__y2038_kernel_support())
+    {
+      res = INLINE_SYSCALL (timer_gettime64, 2, kt->ktimerid, value);
+      if (res == 0 || errno != ENOSYS)
+        return res;
+    }
+#endif
+
+  res = INLINE_SYSCALL (timer_gettime, 2, kt->ktimerid, &value32);
+
+  if (res == 0)
+    {
+      value->it_value.tv_sec = value32.it_value.tv_sec;
+      value->it_value.tv_nsec = value32.it_value.tv_nsec;
+      value->it_interval.tv_sec = value32.it_interval.tv_sec;
+      value->it_interval.tv_nsec = value32.it_interval.tv_nsec;
+    }
+
+  return res;
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=4f387e26e3f3159e6c29e72a1a3bd4992bbf95be

commit 4f387e26e3f3159e6c29e72a1a3bd4992bbf95be
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:45 2017 +0200

    Y2038: add struct __itimerspec64

diff --git a/include/time.h b/include/time.h
index 138f91c..1048b2d 100644
--- a/include/time.h
+++ b/include/time.h
@@ -49,6 +49,12 @@ struct __timeval64
   __int64_t tv_usec;		/* Microseconds */
 };
 
+struct __itimerspec64
+{
+  struct __timespec64 it_interval;
+  struct __timespec64 it_value;
+};
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=6e2ad628d5c9fe62e34bf135058468ea628bd832

commit 6e2ad628d5c9fe62e34bf135058468ea628bd832
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:44 2017 +0200

    Y2038: add function __lutimes64

diff --git a/misc/lutimes.c b/misc/lutimes.c
index b241c14..e436899 100644
--- a/misc/lutimes.c
+++ b/misc/lutimes.c
@@ -31,3 +31,11 @@ __lutimes (const char *file, const struct timeval tvp[2])
 weak_alias (__lutimes, lutimes)
 
 stub_warning (lutimes)
+
+int
+__lutimes64 (const char *file, const struct __timeval64 tvp[2])
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__lutimes64)
diff --git a/sysdeps/unix/sysv/linux/lutimes.c b/sysdeps/unix/sysv/linux/lutimes.c
index 4f2f9ec..175563e 100644
--- a/sysdeps/unix/sysv/linux/lutimes.c
+++ b/sysdeps/unix/sysv/linux/lutimes.c
@@ -42,3 +42,61 @@ lutimes (const char *file, const struct timeval tvp[2])
   return INLINE_SYSCALL (utimensat, 4, AT_FDCWD, file, tvp ? ts : NULL,
 			 AT_SYMLINK_NOFOLLOW);
 }
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__lutimes64 (const char *file, const struct __timeval64 tvp[2])
+{
+  struct timespec ts32[2], *ts32p = NULL;;
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  /* The system call expects timespec, not timeval.  */
+  struct __timespec64 ts64[2], *ts64p = NULL;
+  int result;
+#endif
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  if (__y2038_linux_support)
+    {
+      if (tvp != NULL)
+        {
+          if (tvp[0].tv_usec < 0 || tvp[0].tv_usec >= 1000000
+              || tvp[1].tv_usec < 0 || tvp[1].tv_usec >= 1000000)
+  	    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+        
+          ts64[0].tv_sec = tvp[0].tv_sec;
+          ts64[0].tv_nsec = tvp[0].tv_usec * 1000;
+          ts64[0].tv_pad = 0;
+          ts64[1].tv_sec = tvp[1].tv_sec;
+          ts64[1].tv_nsec = tvp[1].tv_usec * 1000;
+          ts64[1].tv_pad = 0;
+          ts64p = ts64;
+        }
+    
+      result = INLINE_SYSCALL (utimensat64, 4, AT_FDCWD, file, ts64p,
+  	  		     AT_SYMLINK_NOFOLLOW);
+      if (result == 0 || errno == ENOSYS)
+        return result;
+    }
+#endif
+
+  if (tvp != NULL)
+    {
+      if (tvp[0].tv_usec < 0 || tvp[0].tv_usec >= 1000000
+          || tvp[1].tv_usec < 0 || tvp[1].tv_usec >= 1000000)
+        return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+    
+      ts32[0].tv_sec = tvp[0].tv_sec;
+      ts32[0].tv_nsec = tvp[0].tv_usec * 1000;
+      ts32[1].tv_sec = tvp[1].tv_sec;
+      ts32[1].tv_nsec = tvp[1].tv_usec * 1000;
+      ts32p = ts32;
+    }
+
+  return INLINE_SYSCALL (utimensat, 4, AT_FDCWD, file, ts32p,
+                             AT_SYMLINK_NOFOLLOW);
+}
diff --git a/time/Versions b/time/Versions
index 850d431..e626dff 100644
--- a/time/Versions
+++ b/time/Versions
@@ -83,5 +83,6 @@ libc {
     __utimensat64;
     __sigtimedwait64;
     __futimes64;
+    __lutimes64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=eec10a539e18259b6414f490362bb95e880f6b2e

commit eec10a539e18259b6414f490362bb95e880f6b2e
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:43 2017 +0200

    Y2038: add function __futimes64

diff --git a/misc/futimes.c b/misc/futimes.c
index 3ffa40b..c8756a0 100644
--- a/misc/futimes.c
+++ b/misc/futimes.c
@@ -30,3 +30,12 @@ __futimes (int fd, const struct timeval tvp[2])
 weak_alias (__futimes, futimes)
 
 stub_warning (futimes)
+
+int
+__futimes64 (int fd, const struct __timeval64 tvp[2])
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+
+stub_warning (__futimes64)
diff --git a/sysdeps/unix/sysv/linux/futimes.c b/sysdeps/unix/sysv/linux/futimes.c
index 9e6267c..fb644d7 100644
--- a/sysdeps/unix/sysv/linux/futimes.c
+++ b/sysdeps/unix/sysv/linux/futimes.c
@@ -49,3 +49,62 @@ __futimes (int fd, const struct timeval tvp[2])
   return INLINE_SYSCALL (utimensat, 4, fd, NULL, tvp ? &ts : NULL, 0);
 }
 weak_alias (__futimes, futimes)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__futimes64 (int fd, const struct __timeval64 tvp[2])
+{
+  struct timespec ts32[2], *ts32p = NULL;
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  /* The utimensat system call expects timespec not timeval.  */
+  struct __timespec64 ts64[2], *ts64p = NULL;
+  int result;
+#endif
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  if (__y2038_linux_support)
+    {
+      if (tvp != NULL)
+        {
+          if (tvp[0].tv_usec < 0 || tvp[0].tv_usec >= 1000000
+              || tvp[1].tv_usec < 0 || tvp[1].tv_usec >= 1000000)
+  	    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+        
+          ts64[0].tv_sec = tvp[0].tv_sec;
+          ts64[0].tv_nsec = tvp[0].tv_usec * 1000;
+          ts64[0].tv_pad = 0;
+          ts64[1].tv_sec = tvp[1].tv_sec;
+          ts64[1].tv_nsec = tvp[1].tv_usec * 1000;
+          ts64[1].tv_pad = 0;
+          ts64p = ts64;
+        }
+    
+      result = INLINE_SYSCALL (utimensat64, 4, fd, NULL, ts64p, 0);
+      if (result == 0 || errno != ENOSYS)
+        return result;
+    }
+#endif
+
+  if (tvp != NULL)
+    {
+      if (tvp[0].tv_usec < 0 || tvp[0].tv_usec >= 1000000
+          || tvp[1].tv_usec < 0 || tvp[1].tv_usec >= 1000000)
+        return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+    
+      if (tvp[0].tv_sec > INT_MAX || tvp[1].tv_sec > INT_MAX)
+        return INLINE_SYSCALL_ERROR_RETURN_VALUE (EOVERFLOW);
+    
+      ts32[0].tv_sec = tvp[0].tv_sec;
+      ts32[0].tv_nsec = tvp[0].tv_usec * 1000;
+      ts32[1].tv_sec = tvp[1].tv_sec;
+      ts32[1].tv_nsec = tvp[1].tv_usec * 1000;
+      ts32p = ts32;
+    }
+
+  return INLINE_SYSCALL (utimensat, 4, fd, NULL, ts32p, 0);
+}
diff --git a/time/Versions b/time/Versions
index 9ddf6ad..850d431 100644
--- a/time/Versions
+++ b/time/Versions
@@ -82,5 +82,6 @@ libc {
     __futimens64;
     __utimensat64;
     __sigtimedwait64;
+    __futimes64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=e4b324fa7bf6ed88e20ddddbce7efb7164c0308e

commit e4b324fa7bf6ed88e20ddddbce7efb7164c0308e
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:42 2017 +0200

    Y2038: add struct __timeval64
    
    Also, provide static inline functions and macros for checking
    and converting between 32-bit and 64-bit timevals.

diff --git a/include/time.h b/include/time.h
index 72ee48e..138f91c 100644
--- a/include/time.h
+++ b/include/time.h
@@ -43,6 +43,12 @@ struct __timespec64
 };
 #endif
 
+struct __timeval64
+{
+  __time64_t tv_sec;		/* Seconds */
+  __int64_t tv_usec;		/* Microseconds */
+};
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)
@@ -218,5 +224,54 @@ static inline bool timespec64_to_timespec(const struct __timespec64 *ts64,
   return true;
 }
 
+/* convert a known valid struct timeval into a struct __timeval64 */
+static inline void valid_timeval_to_timeval64(const struct timeval *tv32,
+                                              struct __timeval64 *tv64)
+{
+  tv64->tv_sec = tv32->tv_sec;
+  tv64->tv_usec = tv32->tv_usec;
+}
+
+/* convert a known valid struct timeval into a struct __timeval64 */
+static inline void valid_timeval64_to_timeval(const struct __timeval64 *tv64,
+					      struct timeval *tv32)
+{
+  tv32->tv_sec = (time_t) tv64->tv_sec;
+  tv32->tv_usec = tv64->tv_usec;
+}
+
+/* check if a struct timeval/__timeval64 is valid */
+#define IS_VALID_TIMEVAL(ts) \
+  ((ts).tv_usec >= 0 && (ts).tv_usec <= 999999)
+
+/* check if a struct timeval/__timeval64 is a valid 32-bit timeval */
+#define IS_VALID_TIMEVAL32(ts) \
+  (fits_in_time_t((ts).tv_sec) && (ts).tv_usec >= 0 && (ts).tv_usec <= 999999)
+
+/* check and convert a struct timeval into a struct __timeval64 */
+static inline bool timeval_to_timeval64(const struct timeval *tv32,
+                                        struct __timeval64 *tv64)
+{
+  /* check that tv_usec holds a valid count of nanoseconds */
+  if (! IS_VALID_TIMEVAL(*tv32))
+    return false;
+  /* all ts32 fields can fit in ts64, so copy them */
+  valid_timeval_to_timeval64(tv32, tv64);
+  /* we will only zero ts64->tv_pad if we pass it to the kernel */
+  return true;
+}
+
+/* check and convert a struct __timeval64 into a struct timeval */
+static inline bool timeval64_to_timeval(const struct __timeval64 *tv64,
+                                        struct timeval *tv32)
+{
+  /* check that tv_usec holds a valid count of nanoseconds */
+  if (! IS_VALID_TIMEVAL32(*tv64))
+    return false;
+  /* all ts64 fields can fit in ts32, so copy them */
+  valid_timeval64_to_timeval(tv64, tv32);
+  return true;
+}
+
 #endif
 #endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=82914b1806297fb7109431d59647eefb0676417c

commit 82914b1806297fb7109431d59647eefb0676417c
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:41 2017 +0200

    Y2038: add function __sigtimedwait64

diff --git a/signal/sigtimedwait.c b/signal/sigtimedwait.c
index 308b9b9..d8d7165 100644
--- a/signal/sigtimedwait.c
+++ b/signal/sigtimedwait.c
@@ -30,3 +30,13 @@ libc_hidden_def (__sigtimedwait)
 weak_alias (__sigtimedwait, sigtimedwait)
 
 stub_warning (sigtimedwait)
+
+int
+__sigtimedwait64 (const sigset_t *set, siginfo_t *info,
+		const struct __timespec64 *timeout)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+
+stub_warning (__sigtimedwait64)
diff --git a/sysdeps/unix/sysv/linux/sigtimedwait.c b/sysdeps/unix/sysv/linux/sigtimedwait.c
index 051a285..9e5982d 100644
--- a/sysdeps/unix/sysv/linux/sigtimedwait.c
+++ b/sysdeps/unix/sysv/linux/sigtimedwait.c
@@ -18,6 +18,9 @@
 #include <errno.h>
 #include <signal.h>
 #include <string.h>
+#include <stdint.h>
+
+#include <nptl/pthreadP.h>
 #include <sysdep-cancel.h>
 
 int
@@ -52,3 +55,78 @@ __sigtimedwait (const sigset_t *set, siginfo_t *info,
 }
 libc_hidden_def (__sigtimedwait)
 weak_alias (__sigtimedwait, sigtimedwait)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__sigtimedwait64 (const sigset_t *set, siginfo_t *info,
+		const struct __timespec64 *timeout)
+{
+  int result;
+  struct __timespec64 ts64;
+  struct timespec ts32;
+
+#ifdef SIGCANCEL
+  sigset_t tmpset;
+  if (set != NULL
+      && (__builtin_expect (__sigismember (set, SIGCANCEL), 0)
+# ifdef SIGSETXID
+	  || __builtin_expect (__sigismember (set, SIGSETXID), 0)
+# endif
+	  ))
+    {
+      /* Create a temporary mask without the bit for SIGCANCEL set.  */
+      // We are not copying more than we have to.
+      memcpy (&tmpset, set, _NSIG / 8);
+      __sigdelset (&tmpset, SIGCANCEL);
+# ifdef SIGSETXID
+      __sigdelset (&tmpset, SIGSETXID);
+# endif
+      set = &tmpset;
+    }
+#endif
+
+    /* XXX The size argument hopefully will have to be changed to the
+       real size of the user-level sigset_t.  */
+
+  if (__y2038_linux_support)
+    {
+      if (timeout)
+        {
+          ts64.tv_sec = timeout->tv_sec;
+          ts64.tv_nsec = timeout->tv_nsec;
+          ts64.tv_pad = 0;
+          result = SYSCALL_CANCEL (rt_sigtimedwait, set, info, &ts64, _NSIG / 8);
+        }
+      else
+        result = SYSCALL_CANCEL (rt_sigtimedwait, set, info, NULL, _NSIG / 8);
+    }
+  else
+    {
+      if (timeout)
+        {
+          if (! timespec64_to_timespec(timeout, &ts32))
+            {
+              errno = EOVERFLOW;
+              return -1;
+            }
+          result = SYSCALL_CANCEL (rt_sigtimedwait, set, info, &ts32, _NSIG / 8);
+        }
+      else
+        result = SYSCALL_CANCEL (rt_sigtimedwait, set, info, NULL, _NSIG / 8);
+    }
+
+  /* The kernel generates a SI_TKILL code in si_code in case tkill is
+     used.  tkill is transparently used in raise().  Since having
+     SI_TKILL as a code is useful in general we fold the results
+     here.  */
+  if (result != -1 && info != NULL && info->si_code == SI_TKILL)
+    info->si_code = SI_USER;
+
+  return result;
+}
+#else
+# include <signal/sigtimedwait.c>
+#endif
diff --git a/time/Versions b/time/Versions
index b4d192c..9ddf6ad 100644
--- a/time/Versions
+++ b/time/Versions
@@ -81,5 +81,6 @@ libc {
     __timespec_get64;
     __futimens64;
     __utimensat64;
+    __sigtimedwait64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=29cf675b04c325d5c911a694fb571300dad74556

commit 29cf675b04c325d5c911a694fb571300dad74556
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:40 2017 +0200

    Y2038: add function __utimensat64

diff --git a/io/utimensat.c b/io/utimensat.c
index 6b67a52..2116b3d 100644
--- a/io/utimensat.c
+++ b/io/utimensat.c
@@ -30,3 +30,12 @@ utimensat (int fd, const char *file, const struct timespec tsp[2],
   return -1;
 }
 stub_warning (utimensat)
+
+int
+__utimensat64 (int fd, const char *file, const struct __timespec64 tsp[2],
+	   int flags)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__utimensat64)
diff --git a/sysdeps/unix/sysv/linux/utimensat.c b/sysdeps/unix/sysv/linux/utimensat.c
index 108d3bc..83411ed 100644
--- a/sysdeps/unix/sysv/linux/utimensat.c
+++ b/sysdeps/unix/sysv/linux/utimensat.c
@@ -19,6 +19,7 @@
 #include <errno.h>
 #include <sys/stat.h>
 #include <sysdep.h>
+#include <stdio.h>
 
 
 /* Change the access time of FILE to TSP[0] and
@@ -34,3 +35,57 @@ utimensat (int fd, const char *file, const struct timespec tsp[2],
   /* Avoid implicit array coercion in syscall macros.  */
   return INLINE_SYSCALL (utimensat, 4, fd, file, &tsp[0], flags);
 }
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__utimensat64 (int fd, const char *file, const struct __timespec64 tsp[2],
+	   int flags)
+{
+  struct timespec ts32[2];
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  struct __timespec64 ts64[2], *ts64p = NULL;
+  int res;
+#endif
+
+  if (file == NULL)
+    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL);
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimensat64
+  if (__y2038_linux_support)
+    {
+      if (tsp)
+        {
+          ts64[0].tv_sec = tsp[0].tv_sec;
+          ts64[0].tv_nsec = tsp[0].tv_nsec;
+          ts64[0].tv_pad = 0;
+          ts64[1].tv_sec = tsp[1].tv_sec;
+          ts64[1].tv_nsec = tsp[1].tv_nsec;
+          ts64[1].tv_pad = 0;
+          ts64p = ts64;
+        }
+
+      res = INLINE_SYSCALL (utimensat64, 4, fd, file, ts64p, flags);
+      if (res == 0 || errno != ENOSYS)
+        return res;
+        }
+#endif
+
+  if (! timespec64_to_timespec(&tsp[0], &ts32[0]))
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  if (! timespec64_to_timespec(&tsp[1], &ts32[1]))
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  return INLINE_SYSCALL (utimensat, 4, fd, file, &ts32, flags);
+}
diff --git a/time/Versions b/time/Versions
index 305b47f..b4d192c 100644
--- a/time/Versions
+++ b/time/Versions
@@ -80,5 +80,6 @@ libc {
     __clock_nanosleep64;
     __timespec_get64;
     __futimens64;
+    __utimensat64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=45ca5f118f22df15953d915b7d23659777d156d1

commit 45ca5f118f22df15953d915b7d23659777d156d1
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:39 2017 +0200

    Y2038: add function __futimens64

diff --git a/io/futimens.c b/io/futimens.c
index fa93096..dd82f79 100644
--- a/io/futimens.c
+++ b/io/futimens.c
@@ -21,6 +21,7 @@
 #include <string.h>
 #include <time.h>
 #include <sysdep.h>
+#include <kernel_timespec.h>
 
 
 /* Change the access time of the file associated with FD to TSP[0] and
@@ -32,3 +33,11 @@ futimens (int fd, const struct timespec tsp[2])
   return -1;
 }
 stub_warning (futimens)
+
+int
+__futimens64 (int fd, const struct __timespec64 tsp[2])
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (__futimens64)
diff --git a/sysdeps/unix/sysv/linux/futimens.c b/sysdeps/unix/sysv/linux/futimens.c
index bc7fe54..fb338b9 100644
--- a/sysdeps/unix/sysv/linux/futimens.c
+++ b/sysdeps/unix/sysv/linux/futimens.c
@@ -36,3 +36,51 @@ futimens (int fd, const struct timespec tsp[2])
   /* Avoid implicit array coercion in syscall macros.  */
   return INLINE_SYSCALL (utimensat, 4, fd, NULL, &tsp[0], 0);
 }
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__futimens64 (int fd, const struct __timespec64 tsp[2])
+{
+  struct timespec ts32[2];
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimesat64
+  struct __timespec64 ts64[2];
+  int res;
+#endif
+
+  if (fd < 0)
+    return INLINE_SYSCALL_ERROR_RETURN_VALUE (EBADF);
+
+/* Only try and use this syscall if defined by kernel */
+#ifdef __NR_utimesat64
+  if (__y2038_linux_support)
+    {
+      ts64[0].tv_sec = tsp[0].tv_sec;
+      ts64[0].tv_nsec = tsp[0].tv_nsec;
+      ts64[0].tv_pad = 0;
+      ts64[1].tv_sec = tsp[1].tv_sec;
+      ts64[1].tv_nsec = tsp[1].tv_nsec;
+      ts64[1].tv_pad = 0;
+      res = INLINE_SYSCALL (utimensat64, 4, fd, NULL, &ts64[0], 0);
+      if (res == 0 || errno != ENOSYS)
+        return res;
+    }
+#endif
+
+  if (! timespec64_to_timespec(&tsp[0], &ts32[0]))
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  if (! timespec64_to_timespec(&tsp[1], &ts32[1]))
+    {
+      __set_errno(EOVERFLOW);
+      return -1;
+    }
+
+  return INLINE_SYSCALL (utimensat, 4, fd, NULL, &ts32[0], 0);
+}
diff --git a/time/Versions b/time/Versions
index d29226e..305b47f 100644
--- a/time/Versions
+++ b/time/Versions
@@ -79,5 +79,6 @@ libc {
     __clock_getres64;
     __clock_nanosleep64;
     __timespec_get64;
+    __futimens64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=36d22c1b4413b51ab4e58cbc97e9a275a0418b38

commit 36d22c1b4413b51ab4e58cbc97e9a275a0418b38
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:38 2017 +0200

    Y2038: add function __timespec_get64

diff --git a/sysdeps/unix/sysv/linux/timespec_get.c b/sysdeps/unix/sysv/linux/timespec_get.c
index b14a302..b40bded 100644
--- a/sysdeps/unix/sysv/linux/timespec_get.c
+++ b/sysdeps/unix/sysv/linux/timespec_get.c
@@ -44,3 +44,44 @@ timespec_get (struct timespec *ts, int base)
 
   return base;
 }
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__timespec_get64 (struct __timespec64 *ts, int base)
+{
+  switch (base)
+    {
+      int res;
+      INTERNAL_SYSCALL_DECL (err);
+    case TIME_UTC:
+      if (__y2038_linux_support)
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clock_nanosleep64
+        {
+          res = INTERNAL_VSYSCALL (clock_gettime64, err, 2, CLOCK_REALTIME, ts);
+        }
+      else
+#endif
+        {
+          res = -1;
+          __set_errno(ENOSYS);
+        }
+      if (res == -1 && errno == ENOSYS)
+        {
+          struct timespec ts32;
+          res = INTERNAL_VSYSCALL (clock_gettime, err, 2, CLOCK_REALTIME, &ts32);
+          if (INTERNAL_SYSCALL_ERROR_P (res, err))
+	    return 0;
+          timespec_to_timespec64(&ts32, ts);
+        }
+      break;
+
+    default:
+      return 0;
+    }
+
+  return base;
+}
diff --git a/time/Versions b/time/Versions
index 3a3ad31..d29226e 100644
--- a/time/Versions
+++ b/time/Versions
@@ -78,5 +78,6 @@ libc {
     __clock_settime64;
     __clock_getres64;
     __clock_nanosleep64;
+    __timespec_get64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=a410965a860f200b77d2801d51ec7a2bec9c81fc

commit a410965a860f200b77d2801d51ec7a2bec9c81fc
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:37 2017 +0200

    Y2038: add function __clock_nanosleep64

diff --git a/include/time.h b/include/time.h
index 4da5acf..72ee48e 100644
--- a/include/time.h
+++ b/include/time.h
@@ -56,6 +56,9 @@ extern int __clock_settime64 (clockid_t __clock_id,
 			       const struct __timespec64 *__tp) __THROW;
 extern int __clock_getres64 (clockid_t __clock_id,
 			      struct __timespec64 *__res) __THROW;
+extern int __clock_nanosleep64 (clockid_t __clock_id, int __flags,
+				const struct __timespec64 *__req,
+				struct __timespec64 *__rem);
 
 /* Now define the internal interfaces.  */
 struct tm;
diff --git a/sysdeps/unix/sysv/linux/clock_nanosleep.c b/sysdeps/unix/sysv/linux/clock_nanosleep.c
index 93d5d6e..e45df44 100644
--- a/sysdeps/unix/sysv/linux/clock_nanosleep.c
+++ b/sysdeps/unix/sysv/linux/clock_nanosleep.c
@@ -21,7 +21,6 @@
 #include <sysdep-cancel.h>
 #include "kernel-posix-cpu-timers.h"
 
-
 /* We can simply use the syscall.  The CPU clocks are not supported
    with this function.  */
 int
@@ -52,3 +51,108 @@ __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
 	  ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
 }
 weak_alias (__clock_nanosleep, clock_nanosleep)
+
+/* 64-bit time version */
+
+extern int __y2038_linux_support;
+
+int
+__clock_nanosleep64 (clockid_t clock_id, int flags,
+		   const struct __timespec64 *req,
+                   struct __timespec64 *rem)
+{
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+  struct timespec req32, rem32;
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clock_nanosleep64
+  struct __timespec64 req64;
+#endif
+
+  if (clock_id == CLOCK_THREAD_CPUTIME_ID)
+    return EINVAL;
+  if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
+    clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
+
+  if (SINGLE_THREAD_P)
+    {
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clock_nanosleep64
+      if (__y2038_linux_support)
+        {
+          req64.tv_sec = req->tv_sec;
+          req64.tv_nsec = req->tv_nsec;
+          req64.tv_pad = 0;
+          r = INTERNAL_SYSCALL (clock_nanosleep64, err, 4, clock_id, flags,
+                                &req64, rem);
+        }
+      else
+        {
+          __set_errno(ENOSYS);
+          r = -1;
+        }
+      if (r == -1 && errno == ENOSYS)
+#endif
+        {
+          if (! timespec64_to_timespec(req, &req32))
+            {
+              __set_errno(ENOSYS);
+              r = -1;
+            }
+          else
+            {
+              r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags,
+                                    &req32, &rem32);
+              if (r == -1 && errno == EINTR && rem != NULL && flags != TIMER_ABSTIME)
+                timespec_to_timespec64(&rem32, rem);
+            }
+        }
+    }
+  else
+    {
+      int oldstate = LIBC_CANCEL_ASYNC ();
+
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clock_nanosleep64
+      if (__y2038_linux_support)
+        {
+          req64.tv_sec = req->tv_sec;
+          req64.tv_nsec = req->tv_nsec;
+          req64.tv_pad = 0;
+          r = INTERNAL_SYSCALL (clock_nanosleep64, err, 4, clock_id, flags,
+                                &req64, rem);
+        }
+      else
+        {
+          __set_errno(ENOSYS);
+          r = -1;
+        }
+      if (r == -1 && errno == ENOSYS)
+#endif
+        {
+          if (! timespec64_to_timespec(req, &req32))
+            {
+              __set_errno(ENOSYS);
+              r = -1;
+            }
+          else
+            {
+              r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags,
+                                    &req32, &rem32);
+              if (r == -1 && errno == EINTR && rem != NULL && flags != TIMER_ABSTIME)
+                timespec_to_timespec64(&rem32, rem);
+            }
+        }
+      
+      LIBC_CANCEL_RESET (oldstate);
+    }
+
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    {
+      return INTERNAL_SYSCALL_ERRNO (r, err);
+    }
+  else
+    {
+      return 0;
+    }
+}
diff --git a/time/Versions b/time/Versions
index f00b123..3a3ad31 100644
--- a/time/Versions
+++ b/time/Versions
@@ -77,5 +77,6 @@ libc {
     __y2038_kernel_support;
     __clock_settime64;
     __clock_getres64;
+    __clock_nanosleep64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=6d237d571603ff9435942b906dd713e2409330c7

commit 6d237d571603ff9435942b906dd713e2409330c7
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:36 2017 +0200

    Y2038: add function __clock_getres64

diff --git a/include/time.h b/include/time.h
index 092935d..4da5acf 100644
--- a/include/time.h
+++ b/include/time.h
@@ -54,6 +54,8 @@ extern int __clock_gettime64 (clockid_t __clock_id,
 			      struct __timespec64 *__tp) __THROW;
 extern int __clock_settime64 (clockid_t __clock_id,
 			       const struct __timespec64 *__tp) __THROW;
+extern int __clock_getres64 (clockid_t __clock_id,
+			      struct __timespec64 *__res) __THROW;
 
 /* Now define the internal interfaces.  */
 struct tm;
diff --git a/sysdeps/posix/clock_getres.c b/sysdeps/posix/clock_getres.c
index 8bc7e81..3cb2a93 100644
--- a/sysdeps/posix/clock_getres.c
+++ b/sysdeps/posix/clock_getres.c
@@ -23,7 +23,6 @@
 #include <sys/param.h>
 #include <libc-internal.h>
 
-
 #if HP_TIMING_AVAIL
 static long int nsec;		/* Clock frequency of the processor.  */
 
@@ -53,6 +52,33 @@ hp_timing_getres (struct timespec *res)
 
   return 0;
 }
+
+static int
+hp_timing_getres64 (struct __timespec64 *res)
+{
+  if (__glibc_unlikely (nsec == 0))
+    {
+      hp_timing_t freq;
+
+      /* This can only happen if we haven't initialized the `nsec'
+	 variable yet.  Do this now.  We don't have to protect this
+	 code against multiple execution since all of them should
+	 lead to the same result.  */
+      freq = __get_clockfreq ();
+      if (__glibc_unlikely (freq == 0))
+	/* Something went wrong.  */
+	return -1;
+
+      nsec = MAX (UINT64_C (1000000000) / freq, 1);
+    }
+
+  /* Fill in the values.
+     The seconds are always zero (unless we have a 1Hz machine).  */
+  res->tv_sec = 0;
+  res->tv_nsec = nsec;
+
+  return 0;
+}
 #endif
 
 static inline int
@@ -73,6 +99,28 @@ realtime_getres (struct timespec *res)
   return -1;
 }
 
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clockgetres64
+
+static inline int
+realtime_getres64 (struct __timespec64 *res)
+{
+  long int clk_tck = sysconf (_SC_CLK_TCK);
+
+  if (__glibc_likely (clk_tck != -1))
+    {
+      /* This implementation assumes that the realtime clock has a
+	 resolution higher than 1 second.  This is the case for any
+	 reasonable implementation.  */
+      res->tv_sec = 0;
+      res->tv_nsec = 1000000000 / clk_tck;
+      return 0;
+    }
+
+  return -1;
+}
+
+#endif
 
 /* Get resolution of clock.  */
 int
@@ -116,3 +164,49 @@ __clock_getres (clockid_t clock_id, struct timespec *res)
   return retval;
 }
 weak_alias (__clock_getres, clock_getres)
+
+int
+__clock_getres64 (clockid_t clock_id, struct __timespec64 *res)
+{
+  int retval = -1;
+
+  switch (clock_id)
+    {
+#ifdef SYSDEP_GETRES64
+      SYSDEP_GETRES64;
+#endif
+
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clockgetres64
+
+# ifndef HANDLED_REALTIME64
+    case CLOCK_REALTIME64:
+      retval = realtime_getres64 (res);
+      break;
+# endif	/* handled REALTIME */
+
+#endif
+
+    default:
+#ifdef SYSDEP_GETRES_CPU64
+      SYSDEP_GETRES_CPU64;
+#endif
+#if HP_TIMING_AVAIL
+      if ((clock_id & ((1 << CLOCK_IDFIELD_SIZE) - 1))
+	  == CLOCK_THREAD_CPUTIME_ID)
+	retval = hp_timing_getres64 (res);
+      else
+#endif
+	__set_errno (EINVAL);
+      break;
+
+#if HP_TIMING_AVAIL && !defined HANDLED_CPUTIME
+    case CLOCK_PROCESS_CPUTIME_ID:
+    case CLOCK_THREAD_CPUTIME_ID:
+      retval = hp_timing_getres64 (res);
+      break;
+#endif
+    }
+
+  return retval;
+}
diff --git a/sysdeps/unix/sysv/linux/clock_getres.c b/sysdeps/unix/sysv/linux/clock_getres.c
index 5d94f59..de7e9e0 100644
--- a/sysdeps/unix/sysv/linux/clock_getres.c
+++ b/sysdeps/unix/sysv/linux/clock_getres.c
@@ -48,4 +48,53 @@
 #define SYSDEP_GETRES_CPU SYSCALL_GETRES
 #define SYSDEP_GETRES_CPUTIME	/* Default catches them too.  */
 
+/* The 64-bit version */
+
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clockgetres64
+
+extern int __y2038_linux_support;
+
+#define SYSCALL_GETRES64 \
+  if (__y2038_linux_support)						      \
+    {									      \
+      retval = INLINE_VSYSCALL (clock_getres64, 2, clock_id, res);  	      \
+    }									      \
+  else									      \
+    {									      \
+      retval = -1;                                                 	      \
+      errno = ENOSYS;                                                 	      \
+    }									      \
+  if (retval == -1 && errno == ENOSYS)					      \
+    {									      \
+      retval = INLINE_VSYSCALL (clock_getres, 2, clock_id, &ts32);	      \
+        if (retval==0)							      \
+        {								      \
+          timespec_to_timespec64(&ts32, res);	                	      \
+          res->tv_pad = 0;				               	      \
+        }								      \
+    }									      \
+  break
+
+/* The REALTIME and MONOTONIC clock are definitely supported in the
+   kernel.  */
+#define SYSDEP_GETRES64							      \
+  SYSDEP_GETRES_CPUTIME64						      \
+  case CLOCK_REALTIME:							      \
+  case CLOCK_MONOTONIC:							      \
+  case CLOCK_MONOTONIC_RAW:						      \
+  case CLOCK_REALTIME_COARSE:						      \
+  case CLOCK_MONOTONIC_COARSE:						      \
+    SYSCALL_GETRES64
+
+/* We handled the REALTIME clock here.  */
+#define HANDLED_REALTIME64	1
+#define HANDLED_CPUTIME64	1
+
+#define SYSDEP_GETRES_CPU64 SYSCALL_GETRES64
+#define SYSDEP_GETRES_CPUTIME64 \
+  struct timespec ts32;
+
+#endif
+
 #include <sysdeps/posix/clock_getres.c>
diff --git a/time/Versions b/time/Versions
index 9511739..f00b123 100644
--- a/time/Versions
+++ b/time/Versions
@@ -76,5 +76,6 @@ libc {
     __vdso_clock_gettime64;
     __y2038_kernel_support;
     __clock_settime64;
+    __clock_getres64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=717d0f93eb66cf7e4581d250cca5c8539fc4b11b

commit 717d0f93eb66cf7e4581d250cca5c8539fc4b11b
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:35 2017 +0200

    Y2038: add function __clock_settime64

diff --git a/include/time.h b/include/time.h
index 60e7744..092935d 100644
--- a/include/time.h
+++ b/include/time.h
@@ -52,6 +52,8 @@ extern __typeof (clock_getcpuclockid) __clock_getcpuclockid;
 
 extern int __clock_gettime64 (clockid_t __clock_id,
 			      struct __timespec64 *__tp) __THROW;
+extern int __clock_settime64 (clockid_t __clock_id,
+			       const struct __timespec64 *__tp) __THROW;
 
 /* Now define the internal interfaces.  */
 struct tm;
diff --git a/sysdeps/unix/clock_settime.c b/sysdeps/unix/clock_settime.c
index 38813ed..9eed04b 100644
--- a/sysdeps/unix/clock_settime.c
+++ b/sysdeps/unix/clock_settime.c
@@ -68,8 +68,62 @@ hp_timing_settime (clockid_t clock_id, const struct timespec *tp)
 }
 #endif
 
+/* Set CLOCK to value TP, 64-bit Y2038-safe version.  */
+int
+__clock_settime64 (clockid_t clock_id, const struct __timespec64 *tp)
+{
+  int retval = -1;
+
+  /* Make sure the time cvalue is OK.  */
+  if (! IS_VALID_NANOSECONDS(tp->tv_nsec))
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  switch (clock_id)
+    {
+#define HANDLE_REALTIME \
+      do {								      \
+	struct timeval tv;						      \
+	TIMESPEC_TO_TIMEVAL (&tv, tp);					      \
+									      \
+	retval = settimeofday (&tv, NULL);				      \
+      } while (0)
+
+#ifdef SYSDEP_SETTIME64
+      SYSDEP_SETTIME64;
+#endif
+
+#ifndef HANDLED_REALTIME
+    case CLOCK_REALTIME:
+      HANDLE_REALTIME;
+      break;
+#endif
+
+    default:
+#ifdef SYSDEP_SETTIME64_CPU
+      SYSDEP_SETTIME64_CPU;
+#endif
+#ifndef HANDLED_CPUTIME
+# if HP_TIMING_AVAIL
+      if (CPUCLOCK_WHICH (clock_id) == CLOCK_PROCESS_CPUTIME_ID
+	  || CPUCLOCK_WHICH (clock_id) == CLOCK_THREAD_CPUTIME_ID)
+	retval = hp_timing_settime (clock_id, tp);
+      else
+# endif
+	{
+	  __set_errno (EINVAL);
+	  retval = -1;
+	}
+#endif
+      break;
+    }
+
+  return retval;
+}
 
-/* Set CLOCK to value TP.  */
+/* Set CLOCK to value TP, 64-bit Y2038-unsafe version.  */
 int
 __clock_settime (clockid_t clock_id, const struct timespec *tp)
 {
diff --git a/sysdeps/unix/sysv/linux/clock_settime.c b/sysdeps/unix/sysv/linux/clock_settime.c
index 5f3f22f..d7c24aa 100644
--- a/sysdeps/unix/sysv/linux/clock_settime.c
+++ b/sysdeps/unix/sysv/linux/clock_settime.c
@@ -35,4 +35,43 @@
 #define SYSDEP_SETTIME_CPU \
   retval = INLINE_SYSCALL (clock_settime, 2, clock_id, tp)
 
+/* 64-bit time version */
+
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clockgettime64
+
+extern int __y2038_linux_support;
+
+#define SYSDEP_SETTIME64 \
+  case CLOCK_REALTIME:							      \
+    if (__y2038_linux_support)						      \
+      {									      \
+        struct __timespec64 ts64;					      \
+        ts64.tv_sec = tp->tv_sec;					      \
+        ts64.tv_nsec = tp->tv_nsec;					      \
+        ts64.tv_pad = 0;						      \
+        retval = INLINE_SYSCALL (clock_settime64, 2, clock_id, &ts64);	      \
+      }									      \
+    else                         					      \
+      {									      \
+        retval = -1;                                                          \
+        __set_errno (EOVERFLOW);                                              \
+      }									      \
+    if (retval == -1 && errno == ENOSYS)				      \
+      {									      \
+        struct timespec ts32;						      \
+        if (! fits_in_time_t(tp->tv_sec))                                     \
+         {                                                                    \
+           __set_errno (EOVERFLOW);                                           \
+         }                                                                    \
+        else                                                                  \
+          {                                                                   \
+            valid_timespec64_to_timespec(tp, &ts32);  			      \
+            retval = INLINE_SYSCALL (clock_settime, 2, clock_id, &ts32);      \
+          }                                                                   \
+      }                                                                       \
+    break
+
+#endif
+
 #include <sysdeps/unix/clock_settime.c>
diff --git a/time/Versions b/time/Versions
index 6b02cff..9511739 100644
--- a/time/Versions
+++ b/time/Versions
@@ -75,5 +75,6 @@ libc {
     __clock_gettime64;
     __vdso_clock_gettime64;
     __y2038_kernel_support;
+    __clock_settime64;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=257be52f5815cc0f2790e13345f23c11c28576fc

commit 257be52f5815cc0f2790e13345f23c11c28576fc
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:34 2017 +0200

    Y2038: add function __clock_gettime64
    
    Note: __clock_gettime64 is implemented in VDSO.

diff --git a/include/time.h b/include/time.h
index 4f95a2d..60e7744 100644
--- a/include/time.h
+++ b/include/time.h
@@ -22,6 +22,11 @@ libc_hidden_proto (localtime)
 libc_hidden_proto (strftime)
 libc_hidden_proto (strptime)
 
+/* Indicates whether the underlying kernel has 64-bit time support.
+   This is required for e.g. librt, which cannot directly check the
+   flag variable that init-first.c sets when detecting support. */
+extern int __y2038_kernel_support (void);
+
 #if BYTE_ORDER == BIG_ENDIAN
 struct __timespec64
 {
@@ -45,6 +50,9 @@ extern __typeof (clock_settime) __clock_settime;
 extern __typeof (clock_nanosleep) __clock_nanosleep;
 extern __typeof (clock_getcpuclockid) __clock_getcpuclockid;
 
+extern int __clock_gettime64 (clockid_t __clock_id,
+			      struct __timespec64 *__tp) __THROW;
+
 /* Now define the internal interfaces.  */
 struct tm;
 
diff --git a/sysdeps/unix/clock_gettime.c b/sysdeps/unix/clock_gettime.c
index 96df78a..991f04e 100644
--- a/sysdeps/unix/clock_gettime.c
+++ b/sysdeps/unix/clock_gettime.c
@@ -134,3 +134,49 @@ __clock_gettime (clockid_t clock_id, struct timespec *tp)
 }
 weak_alias (__clock_gettime, clock_gettime)
 libc_hidden_def (__clock_gettime)
+
+/* Get current value of CLOCK and store it in TP, 64-bit version.  */
+int
+__clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
+{
+  int retval = -1;
+
+  switch (clock_id)
+    {
+#ifdef SYSDEP_GETTIME64
+      SYSDEP_GETTIME64;
+#endif
+
+#ifndef HANDLED_REALTIME
+    case CLOCK_REALTIME:
+      {
+	struct timeval tv;
+	retval = gettimeofday (&tv, NULL);
+	if (retval == 0)
+	  TIMEVAL_TO_TIMESPEC (&tv, tp);
+      }
+      break;
+#endif
+
+    default:
+#ifdef SYSDEP_GETTIME64_CPU
+      SYSDEP_GETTIME64_CPU (clock_id, tp);
+#endif
+#if HP_TIMING_AVAIL
+      if ((clock_id & ((1 << CLOCK_IDFIELD_SIZE) - 1))
+	  == CLOCK_THREAD_CPUTIME_ID)
+	retval = hp_timing_gettime (clock_id, tp);
+      else
+#endif
+	__set_errno (EINVAL);
+      break;
+
+#if HP_TIMING_AVAIL && !defined HANDLED_CPUTIME
+    case CLOCK_PROCESS_CPUTIME_ID:
+      retval = hp_timing_gettime (clock_id, tp);
+      break;
+#endif
+    }
+
+  return retval;
+}
diff --git a/sysdeps/unix/sysv/linux/arm/init-first.c b/sysdeps/unix/sysv/linux/arm/init-first.c
index f4293b1..76528bf 100644
--- a/sysdeps/unix/sysv/linux/arm/init-first.c
+++ b/sysdeps/unix/sysv/linux/arm/init-first.c
@@ -23,6 +23,14 @@
 
 int (*VDSO_SYMBOL(gettimeofday)) (struct timeval *, void *) attribute_hidden;
 int (*VDSO_SYMBOL(clock_gettime)) (clockid_t, struct timespec *);
+long (*VDSO_SYMBOL(clock_gettime64)) (clockid_t, struct __timespec64 *);
+
+int __y2038_linux_support;
+
+int __y2038_kernel_support (void)
+{
+  return __y2038_linux_support;
+}
 
 static inline void
 _libc_vdso_platform_setup (void)
@@ -36,6 +44,13 @@ _libc_vdso_platform_setup (void)
   p = _dl_vdso_vsym ("__vdso_clock_gettime", &linux26);
   PTR_MANGLE (p);
   VDSO_SYMBOL (clock_gettime) = p;
+
+  /* (aaribaud) TODO: map to version where clock_gettime64 officially appears */
+  p = _dl_vdso_vsym ("__vdso_clock_gettime64", NULL);
+  PTR_MANGLE (p);
+  VDSO_SYMBOL (clock_gettime64) = p;
+
+  __y2038_linux_support = (p != NULL) ? 1 : 0;
 }
 
 # define VDSO_SETUP _libc_vdso_platform_setup
diff --git a/sysdeps/unix/sysv/linux/arm/libc-vdso.h b/sysdeps/unix/sysv/linux/arm/libc-vdso.h
index 52dd355..dc61624 100644
--- a/sysdeps/unix/sysv/linux/arm/libc-vdso.h
+++ b/sysdeps/unix/sysv/linux/arm/libc-vdso.h
@@ -27,6 +27,7 @@
 extern int (*VDSO_SYMBOL(gettimeofday)) (struct timeval *, void *)
    attribute_hidden;
 extern int (*VDSO_SYMBOL(clock_gettime)) (clockid_t, struct timespec *);
+extern long (*VDSO_SYMBOL(clock_gettime64)) (clockid_t, struct __timespec64 *);
 
 #endif
 
diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
index d837fa3..c6fa15c 100644
--- a/sysdeps/unix/sysv/linux/clock_gettime.c
+++ b/sysdeps/unix/sysv/linux/clock_gettime.c
@@ -44,4 +44,59 @@
   break
 #define SYSDEP_GETTIME_CPUTIME	/* Default catches them too.  */
 
+/* 64-bit versions */
+
+/* Check that we are built with a 64-bit-time kernel */
+#ifdef __NR_clockgettime64
+
+/* The REALTIME and MONOTONIC clock are definitely supported in the
+   kernel.  */
+#define SYSDEP_GETTIME64 \
+  SYSDEP_GETTIME64_CPUTIME;						      \
+  case CLOCK_REALTIME:							      \
+  case CLOCK_MONOTONIC:							      \
+    if (__y2038_linux_support)						      \
+      {									      \
+        retval = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);	      \
+      }									      \
+    else								      \
+      {									      \
+        retval = -1;                                                          \
+        __set_errno(ENOSYS);                                                  \
+      }									      \
+    if (retval == -1 && errno == ENOSYS)				      \
+      {									      \
+        retval = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &ts32);	      \
+        if (retval==0)							      \
+          {								      \
+            valid_timespec_to_timespec64(&ts32, tp);			      \
+          }								      \
+      }									      \
+    break
+
+#define SYSDEP_GETTIME64_CPU(clock_id, tp) \
+  if (__y2038_linux_support)						      \
+    {									      \
+      retval = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);	      \
+    }									      \
+  else								              \
+    {									      \
+      retval = -1;                                                            \
+      __set_errno(ENOSYS);                                                    \
+    }									      \
+  if (retval == -1 && errno == ENOSYS)  			              \
+    {									      \
+      retval = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &ts32);	      \
+      if (retval==0)							      \
+        {								      \
+          valid_timespec_to_timespec64(&ts32, tp);			      \
+        }								      \
+    }									      \
+  break
+#define SYSDEP_GETTIME64_CPUTIME \
+  struct timespec ts32;							      \
+  extern int __y2038_linux_support;
+
+#endif
+
 #include <sysdeps/unix/clock_gettime.c>
diff --git a/time/Versions b/time/Versions
index c9f44f2..6b02cff 100644
--- a/time/Versions
+++ b/time/Versions
@@ -72,5 +72,8 @@ libc {
     __localtime64; __localtime64_r;
     __mktime64; __timelocal64_r;
     __timegm64;
+    __clock_gettime64;
+    __vdso_clock_gettime64;
+    __y2038_kernel_support;
   }
 }

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d6a6eff8a403ca34249d0d6103e3b603d1b82e1b

commit d6a6eff8a403ca34249d0d6103e3b603d1b82e1b
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:33 2017 +0200

    Y2038: add struct __timespec64
    
    To be Y2038-proof, struct __timespec64 needs its tv_sec field to
    be a __time64_t rather than a __time_t. However, the question is
    which type should the tv_nsec field be.
    
    Keeping tv_nsec a long (32-bit) would be compatible with Posix
    requirements but would result in the GLIBC struct timespec being
    binary-incompatible with the Linux 64-bit struct timespec, which
    contains a 64-bit, not 32-bit, signed tv_nsec field.
    
    In order to maintain Posix compatibility yet simplify conversion
    between Posix and Linux struct timespec values, the Y2038-proof
    struct time stores its tv_nsec field as a 32-bit signed integer
    plus a padding which can serve as a 64-bit sign extension. This
    both meets Posix requirements and makes the GLIBC and Linux
    struct timespec binary compatible.
    
    Note that in the API (which is not modified here, and will be
    later alongside all Y2038-sensitive APIs), this padding is made
    'invisible' by defining it as an anonymous bitfield, whereas
    the struct __timespec64 introduced here has a named field for
    the padding, allowing implementations to read and write it.
    
    Also, provide static inline functions and macros for checking
    and converting between 32-bit and 64-bit timespecs.

diff --git a/include/time.h b/include/time.h
index ad114f3..4f95a2d 100644
--- a/include/time.h
+++ b/include/time.h
@@ -5,6 +5,9 @@
 # include <bits/types/locale_t.h>
 # include <stdbool.h>
 
+#include <endian.h>
+#include <stdbool.h>
+
 extern __typeof (strftime_l) __strftime_l;
 libc_hidden_proto (__strftime_l)
 extern __typeof (strptime_l) __strptime_l;
@@ -19,6 +22,22 @@ libc_hidden_proto (localtime)
 libc_hidden_proto (strftime)
 libc_hidden_proto (strptime)
 
+#if BYTE_ORDER == BIG_ENDIAN
+struct __timespec64
+{
+  __time64_t tv_sec;		/* Seconds */
+  int tv_pad: 32;		/* Padding named for checking/setting */
+  __syscall_slong_t tv_nsec;	/* Nanoseconds */
+};
+#else
+struct __timespec64
+{
+  __time64_t tv_sec;		/* Seconds */
+  __syscall_slong_t tv_nsec;	/* Nanoseconds */
+  int tv_pad: 32;		/* Padding named for checking/setting */
+};
+#endif
+
 extern __typeof (clock_getres) __clock_getres;
 extern __typeof (clock_gettime) __clock_gettime;
 libc_hidden_proto (__clock_gettime)
@@ -134,5 +153,55 @@ fits_in_time_t (__time64_t t)
   return t == (time_t) t;
 }
 
+/* convert a known valid struct timespec into a struct __timespec64 */
+static inline void
+valid_timespec_to_timespec64(const struct timespec *ts32,
+			     struct __timespec64 *ts64)
+{
+  ts64->tv_sec = ts32->tv_sec;
+  ts64->tv_nsec = ts32->tv_nsec;
+  /* we only need to zero ts64->tv_pad if we pass it to the kernel */
+}
+
+/* convert a known valid struct timespec into a struct __timespec64 */
+static inline void
+valid_timespec64_to_timespec(const struct __timespec64 *ts64,
+			     struct timespec *ts32)
+{
+  ts32->tv_sec = (time_t) ts64->tv_sec;
+  ts32->tv_nsec = ts64->tv_nsec;
+}
+
+/* check if a value lies with the valid nanoseconds range */
+#define IS_VALID_NANOSECONDS(ns) (ns >= 0 && ns <= 999999999)
+
+/* check and convert a struct timespec into a struct __timespec64 */
+static inline bool timespec_to_timespec64(const struct timespec *ts32,
+					  struct __timespec64 *ts64)
+{
+  /* check that ts32 holds a valid count of nanoseconds */
+  if (! IS_VALID_NANOSECONDS(ts32->tv_nsec))
+    return false;
+  /* all ts32 fields can fit in ts64, so copy them */
+  valid_timespec_to_timespec64(ts32, ts64);
+  /* we only need to zero ts64->tv_pad if we pass it to the kernel */
+  return true;
+}
+
+/* check and convert a struct __timespec64 into a struct timespec */
+static inline bool timespec64_to_timespec(const struct __timespec64 *ts64,
+					  struct timespec *ts32)
+{
+  /* check that tv_nsec holds a valid count of nanoseconds */
+  if (! IS_VALID_NANOSECONDS(ts64->tv_nsec))
+    return false;
+  /* check that tv_sec can fit in a __time_t */
+  if (! fits_in_time_t(ts64->tv_sec))
+    return false;
+  /* all ts64 fields can fit in ts32, so copy them */
+  valid_timespec64_to_timespec(ts64, ts32);
+  return true;
+}
+
 #endif
 #endif

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=b80bf1a36635d8e7a238e89f4137d6d973a4f85a

commit b80bf1a36635d8e7a238e89f4137d6d973a4f85a
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Wed Apr 5 22:51:34 2017 +0200

    Y2038: add function __timegm64
    
    Implementation is based on the same __mktime64_internal function
    which was introduced in the '__mktime64' implementation change.
    
    Again, the implementation does not require a Y2038-proof kernel.

diff --git a/time/Versions b/time/Versions
index 271f989..c9f44f2 100644
--- a/time/Versions
+++ b/time/Versions
@@ -71,5 +71,6 @@ libc {
     __gmtime64; __gmtime64_r;
     __localtime64; __localtime64_r;
     __mktime64; __timelocal64_r;
+    __timegm64;
   }
 }
diff --git a/time/timegm.c b/time/timegm.c
index fb720e2..8ac9b0b 100644
--- a/time/timegm.c
+++ b/time/timegm.c
@@ -36,6 +36,9 @@
 time_t __mktime_internal (struct tm *,
 			  struct tm * (*) (time_t const *, struct tm *),
 			  time_t *);
+__time64_t __mktime64_internal (struct tm *,
+				struct tm * (*) (__time64_t const *, struct tm *),
+				__time64_t *);
 #endif
 
 time_t
@@ -45,3 +48,11 @@ timegm (struct tm *tmp)
   tmp->tm_isdst = 0;
   return __mktime_internal (tmp, __gmtime_r, &gmtime_offset);
 }
+
+__time64_t
+__timegm64 (struct tm *tmp)
+{
+  static __time64_t gmtime64_offset;
+  tmp->tm_isdst = 0;
+  return __mktime64_internal (tmp, __gmtime64_r, &gmtime64_offset);
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=365224ea71bf1566661f9d592bc9b713bb8f838f

commit 365224ea71bf1566661f9d592bc9b713bb8f838f
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:31 2017 +0200

    Y2038: add function __mktime64 (and timelocal)
    
    __mktime64 is designed similar to mktime, including checks on (64-bit)
    integer limits, and respects the same Posix requirements as __mktime does,
    i.e. calls tzset().
    
    timelocal is a macro which evaluates to mktime, so when APIs are enabled,
    both mktime and timelocal will become Y2038-proof
    
    Also, the implementation does not require a Y2038-proof kernel.

diff --git a/include/time.h b/include/time.h
index 81db483..ad114f3 100644
--- a/include/time.h
+++ b/include/time.h
@@ -59,6 +59,15 @@ extern time_t __mktime_internal (struct tm *__tp,
 				 struct tm *(*__func) (const time_t *,
 						       struct tm *),
 				 time_t *__offset) attribute_hidden;
+
+/* Subroutine of `__mktime64'.  Return the `__time64_t' representation of TP and
+   normalize TP, given that a `struct tm *' maps to a `__time64_t' as performed
+   by FUNC.  Keep track of next guess for __time64_t offset in *OFFSET.  */
+extern __time64_t __mktime64_internal (struct tm *__tp,
+				 struct tm *(*__func) (const __time64_t *,
+						       struct tm *),
+				 __time64_t *__offset) attribute_hidden;
+
 extern struct tm *__localtime_r (const time_t *__timer,
 				 struct tm *__tp) attribute_hidden;
 
diff --git a/time/Versions b/time/Versions
index 78afba0..271f989 100644
--- a/time/Versions
+++ b/time/Versions
@@ -70,5 +70,6 @@ libc {
     __ctime64; __ctime64_r;
     __gmtime64; __gmtime64_r;
     __localtime64; __localtime64_r;
+    __mktime64; __timelocal64_r;
   }
 }
diff --git a/time/mktime.c b/time/mktime.c
index 5f038a2..d85a2a3 100644
--- a/time/mktime.c
+++ b/time/mktime.c
@@ -599,6 +599,409 @@ weak_alias (mktime, timelocal)
 libc_hidden_def (mktime)
 libc_hidden_weak (timelocal)
 #endif
+
+/* Return an integer value measuring (YEAR1-YDAY1 HOUR1:MIN1:SEC1) -
+   (YEAR0-YDAY0 HOUR0:MIN0:SEC0) in seconds, assuming that the clocks
+   were not adjusted between the time stamps.
+
+   The YEAR values uses the same numbering as TP->tm_year.  Values
+   need not be in the usual range.  However, YEAR1 must not be less
+   than 2 * INT_MIN or greater than 2 * INT_MAX.
+
+   The result may overflow.  It is the caller's responsibility to
+   detect overflow.  */
+
+static __time64_t
+ydhms64_diff (long_int year1, long_int yday1, int hour1, int min1, int sec1,
+	    int year0, int yday0, int hour0, int min0, int sec0)
+{
+  verify (C99_integer_division, -1 / 2 == 0);
+
+  /* Compute intervening leap days correctly even if year is negative.
+     Take care to avoid integer overflow here.  */
+  int a4 = SHR (year1, 2) + SHR (TM_YEAR_BASE, 2) - ! (year1 & 3);
+  int b4 = SHR (year0, 2) + SHR (TM_YEAR_BASE, 2) - ! (year0 & 3);
+  int a100 = a4 / 25 - (a4 % 25 < 0);
+  int b100 = b4 / 25 - (b4 % 25 < 0);
+  int a400 = SHR (a100, 2);
+  int b400 = SHR (b100, 2);
+  int intervening_leap_days = (a4 - b4) - (a100 - b100) + (a400 - b400);
+
+  /* Compute the desired time in __time64_t precision.  Overflow might
+     occur here.  */
+  __time64_t tyear1 = year1;
+  __time64_t years = tyear1 - year0;
+  __time64_t days = 365 * years + yday1 - yday0 + intervening_leap_days;
+  __time64_t hours = 24 * days + hour1 - hour0;
+  __time64_t minutes = 60 * hours + min1 - min0;
+  __time64_t seconds = 60 * minutes + sec1 - sec0;
+  return seconds;
+}
+
+/* Return the average of A and B, even if A + B would overflow.  */
+static __time64_t
+time64_t_avg (__time64_t a, __time64_t b)
+{
+  return SHR (a, 1) + SHR (b, 1) + (a & b & 1);
+}
+
+/* Return 1 if A + B does not overflow.  If __time64_t is unsigned and if
+   B's top bit is set, assume that the sum represents A - -B, and
+   return 1 if the subtraction does not wrap around.  */
+static int
+time64_t_add_ok (__time64_t a, __time64_t b)
+{
+  if (! TYPE_SIGNED (__time64_t))
+    {
+      __time64_t sum = a + b;
+      return (sum < a) == (TIME_T_MIDPOINT <= b);
+    }
+  else if (WRAPV)
+    {
+      __time64_t sum = a + b;
+      return (sum < a) == (b < 0);
+    }
+  else
+    {
+      __time64_t avg = time64_t_avg (a, b);
+      return TIME_T_MIN / 2 <= avg && avg <= TIME_T_MAX / 2;
+    }
+}
+
+/* Return 1 if A + B does not overflow.  */
+static int
+time64_t_int_add_ok (__time64_t a, int b)
+{
+  verify (int_no_wider_than_time64_t, INT_MAX <= TIME_T_MAX);
+  if (WRAPV)
+    {
+      __time64_t sum = a + b;
+      return (sum < a) == (b < 0);
+    }
+  else
+    {
+      int a_odd = a & 1;
+      __time64_t avg = SHR (a, 1) + (SHR (b, 1) + (a_odd & b));
+      return TIME_T_MIN / 2 <= avg && avg <= TIME_T_MAX / 2;
+    }
+}
+
+/* Return a __time64_t value corresponding to (YEAR-YDAY HOUR:MIN:SEC),
+   assuming that *T corresponds to *TP and that no clock adjustments
+   occurred between *TP and the desired time.
+   If TP is null, return a value not equal to *T; this avoids false matches.
+   If overflow occurs, yield the minimal or maximal value, except do not
+   yield a value equal to *T.  */
+static __time64_t
+guess_time64_tm (long_int year, long_int yday, int hour, int min, int sec,
+	       const __time64_t *t, const struct tm *tp)
+{
+  if (tp)
+    {
+      __time64_t d = ydhms64_diff (year, yday, hour, min, sec,
+			     tp->tm_year, tp->tm_yday,
+			     tp->tm_hour, tp->tm_min, tp->tm_sec);
+      if (time64_t_add_ok (*t, d))
+	return *t + d;
+    }
+
+  /* Overflow occurred one way or another.  Return the nearest result
+     that is actually in range, except don't report a zero difference
+     if the actual difference is nonzero, as that would cause a false
+     match; and don't oscillate between two values, as that would
+     confuse the spring-forward gap detector.  */
+  return (*t < TIME_T_MIDPOINT
+	  ? (*t <= TIME_T_MIN + 1 ? *t + 1 : TIME_T_MIN)
+	  : (TIME_T_MAX - 1 <= *t ? *t - 1 : TIME_T_MAX));
+}
+
+/* Use CONVERT to convert *T to a broken down time in *TP.
+   If *T is out of range for conversion, adjust it so that
+   it is the nearest in-range value and then convert that.  */
+static struct tm *
+ranged64_convert (struct tm *(*convert) (const __time64_t *, struct tm *),
+		__time64_t *t, struct tm *tp)
+{
+  struct tm *r = convert (t, tp);
+
+  if (!r && *t)
+    {
+      __time64_t bad = *t;
+      __time64_t ok = 0;
+
+      /* BAD is a known unconvertible __time64_t, and OK is a known good one.
+	 Use binary search to narrow the range between BAD and OK until
+	 they differ by 1.  */
+      while (bad != ok + (bad < 0 ? -1 : 1))
+	{
+	  __time64_t mid = *t = time64_t_avg (ok, bad);
+	  r = convert (t, tp);
+	  if (r)
+	    ok = mid;
+	  else
+	    bad = mid;
+	}
+
+      if (!r && ok)
+	{
+	  /* The last conversion attempt failed;
+	     revert to the most recent successful attempt.  */
+	  *t = ok;
+	  r = convert (t, tp);
+	}
+    }
+
+  return r;
+}
+
+
+/* Convert *TP to a __time64_t value, inverting
+   the monotonic and mostly-unit-linear conversion function CONVERT.
+   Use *OFFSET to keep track of a guess at the offset of the result,
+   compared to what the result would be for UTC without leap seconds.
+   If *OFFSET's guess is correct, only one CONVERT call is needed.
+   This function is external because it is used also by timegm.c.  */
+__time64_t
+__mktime64_internal (struct tm *tp,
+		   struct tm *(*convert) (const __time64_t *, struct tm *),
+		   __time64_t *offset)
+{
+  __time64_t t, gt, t0, t1, t2;
+  struct tm tm;
+
+  /* The maximum number of probes (calls to CONVERT) should be enough
+     to handle any combinations of time zone rule changes, solar time,
+     leap seconds, and oscillations around a spring-forward gap.
+     POSIX.1 prohibits leap seconds, but some hosts have them anyway.  */
+  int remaining_probes = 6;
+
+  /* Time requested.  Copy it in case CONVERT modifies *TP; this can
+     occur if TP is localtime's returned value and CONVERT is localtime.  */
+  int sec = tp->tm_sec;
+  int min = tp->tm_min;
+  int hour = tp->tm_hour;
+  int mday = tp->tm_mday;
+  int mon = tp->tm_mon;
+  int year_requested = tp->tm_year;
+  int isdst = tp->tm_isdst;
+
+  /* 1 if the previous probe was DST.  */
+  int dst2;
+
+  /* Ensure that mon is in range, and set year accordingly.  */
+  int mon_remainder = mon % 12;
+  int negative_mon_remainder = mon_remainder < 0;
+  int mon_years = mon / 12 - negative_mon_remainder;
+  long_int lyear_requested = year_requested;
+  long_int year = lyear_requested + mon_years;
+
+  /* The other values need not be in range:
+     the remaining code handles minor overflows correctly,
+     assuming int and __time64_t arithmetic wraps around.
+     Major overflows are caught at the end.  */
+
+  /* Calculate day of year from year, month, and day of month.
+     The result need not be in range.  */
+  int mon_yday = ((__mon_yday[leapyear (year)]
+		   [mon_remainder + 12 * negative_mon_remainder])
+		  - 1);
+  long_int lmday = mday;
+  long_int yday = mon_yday + lmday;
+
+  __time64_t guessed_offset = *offset;
+
+  int sec_requested = sec;
+
+  if (LEAP_SECONDS_POSSIBLE)
+    {
+      /* Handle out-of-range seconds specially,
+	 since ydhms_tm_diff assumes every minute has 60 seconds.  */
+      if (sec < 0)
+	sec = 0;
+      if (59 < sec)
+	sec = 59;
+    }
+
+  /* Invert CONVERT by probing.  First assume the same offset as last
+     time.  */
+
+  t0 = ydhms64_diff (year, yday, hour, min, sec,
+		   EPOCH_YEAR - TM_YEAR_BASE, 0, 0, 0, - guessed_offset);
+
+  if (TIME_T_MAX / INT_MAX / 366 / 24 / 60 / 60 < 3)
+    {
+      /* __time64_t isn't large enough to rule out overflows, so check
+	 for major overflows.  A gross check suffices, since if t0
+	 has overflowed, it is off by a multiple of TIME_T_MAX -
+	 TIME_T_MIN + 1.  So ignore any component of the difference
+	 that is bounded by a small value.  */
+
+      /* Approximate log base 2 of the number of time units per
+	 biennium.  A biennium is 2 years; use this unit instead of
+	 years to avoid integer overflow.  For example, 2 average
+	 Gregorian years are 2 * 365.2425 * 24 * 60 * 60 seconds,
+	 which is 63113904 seconds, and rint (log2 (63113904)) is
+	 26.  */
+      int ALOG2_SECONDS_PER_BIENNIUM = 26;
+      int ALOG2_MINUTES_PER_BIENNIUM = 20;
+      int ALOG2_HOURS_PER_BIENNIUM = 14;
+      int ALOG2_DAYS_PER_BIENNIUM = 10;
+      int LOG2_YEARS_PER_BIENNIUM = 1;
+
+      int approx_requested_biennia =
+	(SHR (year_requested, LOG2_YEARS_PER_BIENNIUM)
+	 - SHR (EPOCH_YEAR - TM_YEAR_BASE, LOG2_YEARS_PER_BIENNIUM)
+	 + SHR (mday, ALOG2_DAYS_PER_BIENNIUM)
+	 + SHR (hour, ALOG2_HOURS_PER_BIENNIUM)
+	 + SHR (min, ALOG2_MINUTES_PER_BIENNIUM)
+	 + (LEAP_SECONDS_POSSIBLE
+	    ? 0
+	    : SHR (sec, ALOG2_SECONDS_PER_BIENNIUM)));
+
+      int approx_biennia = SHR (t0, ALOG2_SECONDS_PER_BIENNIUM);
+      int diff = approx_biennia - approx_requested_biennia;
+      int approx_abs_diff = diff < 0 ? -1 - diff : diff;
+
+      /* IRIX 4.0.5 cc miscalculates TIME_T_MIN / 3: it erroneously
+	 gives a positive value of 715827882.  Setting a variable
+	 first then doing math on it seems to work.
+	 (ghazi@caip.rutgers.edu) */
+      __time64_t time64_t_max = TIME_T_MAX;
+      __time64_t time64_t_min = TIME_T_MIN;
+      __time64_t overflow_threshold =
+	(time64_t_max / 3 - time64_t_min / 3) >> ALOG2_SECONDS_PER_BIENNIUM;
+
+      if (overflow_threshold < approx_abs_diff)
+	{
+	  /* Overflow occurred.  Try repairing it; this might work if
+	     the time zone offset is enough to undo the overflow.  */
+	  __time64_t repaired_t0 = -1 - t0;
+	  approx_biennia = SHR (repaired_t0, ALOG2_SECONDS_PER_BIENNIUM);
+	  diff = approx_biennia - approx_requested_biennia;
+	  approx_abs_diff = diff < 0 ? -1 - diff : diff;
+	  if (overflow_threshold < approx_abs_diff)
+	    return -1;
+	  guessed_offset += repaired_t0 - t0;
+	  t0 = repaired_t0;
+	}
+    }
+
+  /* Repeatedly use the error to improve the guess.  */
+
+  for (t = t1 = t2 = t0, dst2 = 0;
+       (gt = guess_time64_tm (year, yday, hour, min, sec, &t,
+			    ranged64_convert (convert, &t, &tm)),
+	t != gt);
+       t1 = t2, t2 = t, t = gt, dst2 = tm.tm_isdst != 0)
+    if (t == t1 && t != t2
+	&& (tm.tm_isdst < 0
+	    || (isdst < 0
+		? dst2 <= (tm.tm_isdst != 0)
+		: (isdst != 0) != (tm.tm_isdst != 0))))
+      /* We can't possibly find a match, as we are oscillating
+	 between two values.  The requested time probably falls
+	 within a spring-forward gap of size GT - T.  Follow the common
+	 practice in this case, which is to return a time that is GT - T
+	 away from the requested time, preferring a time whose
+	 tm_isdst differs from the requested value.  (If no tm_isdst
+	 was requested and only one of the two values has a nonzero
+	 tm_isdst, prefer that value.)  In practice, this is more
+	 useful than returning -1.  */
+      goto offset_found;
+    else if (--remaining_probes == 0)
+      return -1;
+
+  /* We have a match.  Check whether tm.tm_isdst has the requested
+     value, if any.  */
+  if (isdst_differ (isdst, tm.tm_isdst))
+    {
+      /* tm.tm_isdst has the wrong value.  Look for a neighboring
+	 time with the right value, and use its UTC offset.
+
+	 Heuristic: probe the adjacent timestamps in both directions,
+	 looking for the desired isdst.  This should work for all real
+	 time zone histories in the tz database.  */
+
+      /* Distance between probes when looking for a DST boundary.  In
+	 tzdata2003a, the shortest period of DST is 601200 seconds
+	 (e.g., America/Recife starting 2000-10-08 01:00), and the
+	 shortest period of non-DST surrounded by DST is 694800
+	 seconds (Africa/Tunis starting 1943-04-17 01:00).  Use the
+	 minimum of these two values, so we don't miss these short
+	 periods when probing.  */
+      int stride = 601200;
+
+      /* The longest period of DST in tzdata2003a is 536454000 seconds
+	 (e.g., America/Jujuy starting 1946-10-01 01:00).  The longest
+	 period of non-DST is much longer, but it makes no real sense
+	 to search for more than a year of non-DST, so use the DST
+	 max.  */
+      int duration_max = 536454000;
+
+      /* Search in both directions, so the maximum distance is half
+	 the duration; add the stride to avoid off-by-1 problems.  */
+      int delta_bound = duration_max / 2 + stride;
+
+      int delta, direction;
+
+      for (delta = stride; delta < delta_bound; delta += stride)
+	for (direction = -1; direction <= 1; direction += 2)
+	  if (time64_t_int_add_ok (t, delta * direction))
+	    {
+	      __time64_t ot = t + delta * direction;
+	      struct tm otm;
+	      ranged64_convert (convert, &ot, &otm);
+	      if (! isdst_differ (isdst, otm.tm_isdst))
+		{
+		  /* We found the desired tm_isdst.
+		     Extrapolate back to the desired time.  */
+		  t = guess_time64_tm (year, yday, hour, min, sec, &ot, &otm);
+		  ranged64_convert (convert, &t, &tm);
+		  goto offset_found;
+		}
+	    }
+    }
+
+ offset_found:
+  *offset = guessed_offset + t - t0;
+
+  if (LEAP_SECONDS_POSSIBLE && sec_requested != tm.tm_sec)
+    {
+      /* Adjust time to reflect the tm_sec requested, not the normalized value.
+	 Also, repair any damage from a false match due to a leap second.  */
+      int sec_adjustment = (sec == 0 && tm.tm_sec == 60) - sec;
+      if (! time64_t_int_add_ok (t, sec_requested))
+	return -1;
+      t1 = t + sec_requested;
+      if (! time64_t_int_add_ok (t1, sec_adjustment))
+	return -1;
+      t2 = t1 + sec_adjustment;
+      if (! convert (&t2, &tm))
+	return -1;
+      t = t2;
+    }
+
+  *tp = tm;
+  return t;
+}
+
+
+/* This uses a signed type wide enough to hold any UTC offset in seconds. */
+static __time64_t localtime64_offset;
+
+/* Convert *TP to a __time64_t value.  */
+__time64_t
+__mktime64 (struct tm *tp)
+{
+#ifdef _LIBC
+  /* POSIX.1 8.1.1 requires that whenever mktime() is called, the
+     time zone names contained in the external variable 'tzname' shall
+     be set as if the tzset() function had been called.  */
+  __tzset ();
+#endif
+
+  return __mktime64_internal (tp, __localtime64_r, &localtime64_offset);
+}
 
 #if defined DEBUG_MKTIME && DEBUG_MKTIME
 

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=8f56a48e73dbf897dd1cd973e409f9bf6992a663

commit 8f56a48e73dbf897dd1cd973e409f9bf6992a663
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:30 2017 +0200

    Y2038: add functions which have struct tm arguments
    
    This consists in the following implementation additions:
    
    ([file] 32-bit implementation -> [file] 64-bit addition)
    
    time/ctime.c ctime() -> __ctime64()
    time/ctime_r.c ctime_r() -> __ctime64_r()
    time/gmtime.c gmtime() -> __gmtime64()
    time/gmtime.c gmtime_r() -> __gmtime64_r()
    time/localtime.c localtime() -> __localtime64()
    time/localtime.c localtime_r() -> __localtime64_r()
    
    which require the following internal function additions
    
    time/offtime.c __offtime() -> __offtime64()
    time/tzset.c __tz_convert() -> __tz64_convert()
    
    and internal function (32-bit-time-compatible) changes
    
    time/tzfile.c __tzfile_compute()
    time/tzset.c tzset_internal()
    time/tzset.c compute_change()

diff --git a/include/time.h b/include/time.h
index 7eda265..81db483 100644
--- a/include/time.h
+++ b/include/time.h
@@ -9,6 +9,8 @@ extern __typeof (strftime_l) __strftime_l;
 libc_hidden_proto (__strftime_l)
 extern __typeof (strptime_l) __strptime_l;
 
+extern struct tm *__localtime64 (const __time64_t *__timer);
+
 libc_hidden_proto (time)
 libc_hidden_proto (asctime)
 libc_hidden_proto (mktime)
@@ -40,14 +42,14 @@ extern int __use_tzfile attribute_hidden;
 
 extern void __tzfile_read (const char *file, size_t extra,
 			   char **extrap) attribute_hidden;
-extern void __tzfile_compute (time_t timer, int use_localtime,
+extern void __tzfile_compute (__time64_t timer, int use_localtime,
 			      long int *leap_correct, int *leap_hit,
 			      struct tm *tp) attribute_hidden;
 extern void __tzfile_default (const char *std, const char *dst,
 			      long int stdoff, long int dstoff)
   attribute_hidden;
 extern void __tzset_parse_tz (const char *tz) attribute_hidden;
-extern void __tz_compute (time_t timer, struct tm *tm, int use_localtime)
+extern void __tz_compute (__time64_t timer, struct tm *tm, int use_localtime)
   __THROW attribute_hidden;
 
 /* Subroutine of `mktime'.  Return the `time_t' representation of TP and
@@ -60,18 +62,28 @@ extern time_t __mktime_internal (struct tm *__tp,
 extern struct tm *__localtime_r (const time_t *__timer,
 				 struct tm *__tp) attribute_hidden;
 
-extern struct tm *__gmtime_r (const time_t *__restrict __timer,
+extern struct tm *__localtime64_r (const __time64_t *__timer,
+				   struct tm *__tp) attribute_hidden;
+
+extern struct tm *__gmtime_r (const __time_t *__restrict __timer,
 			      struct tm *__restrict __tp);
 libc_hidden_proto (__gmtime_r)
 
+extern struct tm *__gmtime64_r (const __time64_t *__restrict __timer,
+			        struct tm *__restrict __tp);
+
 /* Compute the `struct tm' representation of *T,
    offset OFFSET seconds east of UTC,
    and store year, yday, mon, mday, wday, hour, min, sec into *TP.
    Return nonzero if successful.  */
-extern int __offtime (const time_t *__timer,
+extern int __offtime (const __time_t *__timer,
 		      long int __offset,
 		      struct tm *__tp) attribute_hidden;
 
+extern int __offtime64 (const __time64_t *__timer,
+		        long int __offset,
+		        struct tm *__tp) attribute_hidden;
+
 extern char *__asctime_r (const struct tm *__tp, char *__buf)
   attribute_hidden;
 extern void __tzset (void) attribute_hidden;
@@ -80,6 +92,10 @@ extern void __tzset (void) attribute_hidden;
 extern struct tm *__tz_convert (const time_t *timer, int use_localtime,
 				struct tm *tp) attribute_hidden;
 
+extern struct tm *__tz_convert64 (const __time64_t *timer,
+				  int use_localtime, struct tm *tp)
+                                  attribute_hidden;
+
 extern int __nanosleep (const struct timespec *__requested_time,
 			struct timespec *__remaining);
 hidden_proto (__nanosleep)
diff --git a/time/Versions b/time/Versions
index 2c1cbc1..78afba0 100644
--- a/time/Versions
+++ b/time/Versions
@@ -67,5 +67,8 @@ libc {
   }
   GLIBC_2.27 {
     __difftimes64;
+    __ctime64; __ctime64_r;
+    __gmtime64; __gmtime64_r;
+    __localtime64; __localtime64_r;
   }
 }
diff --git a/time/ctime.c b/time/ctime.c
index 1222614..fbfb61f 100644
--- a/time/ctime.c
+++ b/time/ctime.c
@@ -26,3 +26,13 @@ ctime (const time_t *t)
      In particular, ctime and asctime must yield the same pointer.  */
   return asctime (localtime (t));
 }
+
+/* Return a string as returned by asctime which
+   is the representation of *T in that form.  */
+char *
+__ctime64 (const __time64_t *t)
+{
+  /* Apply the same rule as ctime:
+     make ctime64 (t) is equivalent to asctime (localtime64 (t)).  */
+  return asctime (__localtime64 (t));
+}
diff --git a/time/ctime_r.c b/time/ctime_r.c
index c111146..742a967 100644
--- a/time/ctime_r.c
+++ b/time/ctime_r.c
@@ -27,3 +27,12 @@ ctime_r (const time_t *t, char *buf)
   struct tm tm;
   return __asctime_r (__localtime_r (t, &tm), buf);
 }
+
+/* Return a string as returned by asctime which is the representation
+   of *T in that form.  Reentrant Y2038-proof version.  */
+char *
+__ctime64_r (const __time64_t *t, char *buf)
+{
+  struct tm tm;
+  return __asctime_r (__localtime64_r (t, &tm), buf);
+}
diff --git a/time/gmtime.c b/time/gmtime.c
index dc33b3e..6d6878a 100644
--- a/time/gmtime.c
+++ b/time/gmtime.c
@@ -35,3 +35,18 @@ gmtime (const time_t *t)
 {
   return __tz_convert (t, 0, &_tmbuf);
 }
+
+/* Return the `struct tm' representation of 64-bit-time *T
+   in UTC, using *TP to store the result.  */
+struct tm *
+__gmtime64_r (const __time64_t *t, struct tm *tp)
+{
+  return __tz_convert64 (t, 0, tp);
+}
+
+/* Return the `struct tm' representation of 64-bit-time *T in UTC.	*/
+struct tm *
+__gmtime64 (const __time64_t *t)
+{
+  return __tz_convert64 (t, 0, &_tmbuf);
+}
diff --git a/time/localtime.c b/time/localtime.c
index 8684a8a..63dd125 100644
--- a/time/localtime.c
+++ b/time/localtime.c
@@ -39,3 +39,20 @@ localtime (const time_t *t)
   return __tz_convert (t, 1, &_tmbuf);
 }
 libc_hidden_def (localtime)
+
+/* 64-bit-time versions */
+
+/* Return the `struct tm' representation of *T in local time,
+   using *TP to store the result.  */
+struct tm *
+__localtime64_r (const __time64_t *t, struct tm *tp)
+{
+  return __tz_convert64 (t, 1, tp);
+}
+
+/* Return the `struct tm' representation of *T in local time.  */
+struct tm *
+__localtime64 (const __time64_t *t)
+{
+  return __tz_convert64 (t, 1, &_tmbuf);
+}
diff --git a/time/offtime.c b/time/offtime.c
index 04c4838..650ddcc 100644
--- a/time/offtime.c
+++ b/time/offtime.c
@@ -84,3 +84,67 @@ __offtime (const time_t *t, long int offset, struct tm *tp)
   tp->tm_mday = days + 1;
   return 1;
 }
+
+/* Compute the `struct tm' representation of 64-bit-time *T,
+   offset OFFSET seconds east of UTC,
+   and store year, yday, mon, mday, wday, hour, min, sec into *TP.
+   Return nonzero if successful.  */
+int
+__offtime64 (const __time64_t *t, long int offset, struct tm *tp)
+{
+  __time64_t days, rem, y;
+  const unsigned short int *ip;
+
+  days = *t / SECS_PER_DAY;
+  rem = *t % SECS_PER_DAY;
+  rem += offset;
+  while (rem < 0)
+    {
+      rem += SECS_PER_DAY;
+      --days;
+    }
+  while (rem >= SECS_PER_DAY)
+    {
+      rem -= SECS_PER_DAY;
+      ++days;
+    }
+  tp->tm_hour = rem / SECS_PER_HOUR;
+  rem %= SECS_PER_HOUR;
+  tp->tm_min = rem / 60;
+  tp->tm_sec = rem % 60;
+  /* January 1, 1970 was a Thursday.  */
+  tp->tm_wday = (4 + days) % 7;
+  if (tp->tm_wday < 0)
+    tp->tm_wday += 7;
+  y = 1970;
+
+#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
+#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
+
+  while (days < 0 || days >= (__isleap (y) ? 366 : 365))
+    {
+      /* Guess a corrected year, assuming 365 days per year.  */
+      __time64_t yg = y + days / 365 - (days % 365 < 0);
+
+      /* Adjust DAYS and Y to match the guessed year.  */
+      days -= ((yg - y) * 365
+	       + LEAPS_THRU_END_OF (yg - 1)
+	       - LEAPS_THRU_END_OF (y - 1));
+      y = yg;
+    }
+  tp->tm_year = y - 1900;
+  if (tp->tm_year != y - 1900)
+    {
+      /* The year cannot be represented due to overflow.  */
+      __set_errno (EOVERFLOW);
+      return 0;
+    }
+  tp->tm_yday = days;
+  ip = __mon_yday[__isleap(y)];
+  for (y = 11; days < (long int) ip[y]; --y)
+    continue;
+  days -= ip[y];
+  tp->tm_mon = y;
+  tp->tm_mday = days + 1;
+  return 1;
+}
diff --git a/time/tzfile.c b/time/tzfile.c
index 1e59fd5..8ec9834 100644
--- a/time/tzfile.c
+++ b/time/tzfile.c
@@ -630,7 +630,7 @@ __tzfile_default (const char *std, const char *dst,
 }
 
 void
-__tzfile_compute (time_t timer, int use_localtime,
+__tzfile_compute (__time64_t timer, int use_localtime,
 		  long int *leap_correct, int *leap_hit,
 		  struct tm *tp)
 {
@@ -685,7 +685,7 @@ __tzfile_compute (time_t timer, int use_localtime,
 
 	  /* Convert to broken down structure.  If this fails do not
 	     use the string.  */
-	  if (__glibc_unlikely (! __offtime (&timer, 0, tp)))
+	  if (__glibc_unlikely (! __offtime64 (&timer, 0, tp)))
 	    goto use_last;
 
 	  /* Use the rules from the TZ string to compute the change.  */
diff --git a/time/tzset.c b/time/tzset.c
index b517867..106966f 100644
--- a/time/tzset.c
+++ b/time/tzset.c
@@ -55,7 +55,7 @@ typedef struct
 
     /* We cache the computed time of change for a
        given year so we don't have to recompute it.  */
-    time_t change;	/* When to change to this zone.  */
+    __time64_t change;	/* When to change to this zone.  */
     int computed_for;	/* Year above is computed for.  */
   } tz_rule;
 
@@ -416,7 +416,7 @@ tzset_internal (int always)
       tz_rules[0].name = tz_rules[1].name = "UTC";
       if (J0 != 0)
 	tz_rules[0].type = tz_rules[1].type = J0;
-      tz_rules[0].change = tz_rules[1].change = (time_t) -1;
+      tz_rules[0].change = tz_rules[1].change = (__time64_t) -1;
       update_vars ();
       return;
     }
@@ -514,9 +514,10 @@ compute_change (tz_rule *rule, int year)
 
 
 /* Figure out the correct timezone for TM and set `__tzname',
-   `__timezone', and `__daylight' accordingly.  */
+   `__timezone', and `__daylight' accordingly.
+   NOTE: this takes a __time64_t value, so passing a __time_t value is OK. */
 void
-__tz_compute (time_t timer, struct tm *tm, int use_localtime)
+__tz_compute (__time64_t timer, struct tm *tm, int use_localtime)
 {
   compute_change (&tz_rules[0], 1900 + tm->tm_year);
   compute_change (&tz_rules[1], 1900 + tm->tm_year);
@@ -617,6 +618,61 @@ __tz_convert (const time_t *timer, int use_localtime, struct tm *tp)
 }
 
 
+/* Return the `struct tm' representation of *TIMER in the local timezone.
+   Use local time if USE_LOCALTIME is nonzero, UTC otherwise.  */
+struct tm *
+__tz_convert64 (const __time64_t *timer, int use_localtime, struct tm *tp)
+{
+  long int leap_correction;
+  int leap_extra_secs;
+
+  if (timer == NULL)
+    {
+      __set_errno (EINVAL);
+      return NULL;
+    }
+
+  __libc_lock_lock (tzset_lock);
+
+  /* Update internal database according to current TZ setting.
+     POSIX.1 8.3.7.2 says that localtime_r is not required to set tzname.
+     This is a good idea since this allows at least a bit more parallelism.  */
+  tzset_internal (tp == &_tmbuf && use_localtime);
+
+  if (__use_tzfile)
+    __tzfile_compute (*timer, use_localtime, &leap_correction,
+		      &leap_extra_secs, tp);
+  else
+    {
+      if (! __offtime64 (timer, 0, tp))
+	tp = NULL;
+      else
+	__tz_compute (*timer, tp, use_localtime);
+      leap_correction = 0L;
+      leap_extra_secs = 0;
+    }
+
+  __libc_lock_unlock (tzset_lock);
+
+  if (tp)
+    {
+      if (! use_localtime)
+	{
+	  tp->tm_isdst = 0;
+	  tp->tm_zone = "GMT";
+	  tp->tm_gmtoff = 0L;
+	}
+
+      if (__offtime64 (timer, tp->tm_gmtoff - leap_correction, tp))
+        tp->tm_sec += leap_extra_secs;
+      else
+	tp = NULL;
+    }
+
+  return tp;
+}
+
+
 libc_freeres_fn (free_mem)
 {
   while (tzstring_list != NULL)

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d33032063150412329b23dc65c2f35583d61bcdc

commit d33032063150412329b23dc65c2f35583d61bcdc
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Wed Mar 29 11:57:53 2017 +0200

    Y2038: add function __difftime64
    
    Note: the implementation expects __time64_t arguments but returns
    a double like its 32-bit-time counterpart does, in order to remain
    as source-code-compatible as possible, even though the precision
    of a double is only about 55 bits.
    
    The implementation is simpler than its 32-bit counterpart, as it
    assumes that all __time64_t implementations are just 64-bit integers.
    
    Also, the implementation does not require a Y2038-proof kernel.

diff --git a/time/Versions b/time/Versions
index fd83818..2c1cbc1 100644
--- a/time/Versions
+++ b/time/Versions
@@ -65,4 +65,7 @@ libc {
   GLIBC_2.16 {
     timespec_get;
   }
+  GLIBC_2.27 {
+    __difftimes64;
+  }
 }
diff --git a/time/difftime.c b/time/difftime.c
index 7c5dd98..e68b9a2 100644
--- a/time/difftime.c
+++ b/time/difftime.c
@@ -119,3 +119,12 @@ __difftime (time_t time1, time_t time0)
   return time1 < time0 ? - subtract (time0, time1) : subtract (time1, time0);
 }
 strong_alias (__difftime, difftime)
+
+/* Return the difference between 64-bit TIME1 and TIME0.  */
+double
+__difftime64 (__time64_t time1, __time64_t time0)
+{
+  /* Subtract the smaller integer from the larger, convert the difference to
+     double, and then negate if needed.  */
+  return time1 < time0 ? - (time0 - time1) : (time1 - time0);
+}

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=1d922d6e4d145dcb2446bb21f0ec9f7f326d1a1c

commit 1d922d6e4d145dcb2446bb21f0ec9f7f326d1a1c
Author: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>
Date:   Fri Sep 8 00:41:28 2017 +0200

    Y2038: add type __time64_t
    
    Also, provide a function to check if a __time64_t value
    fits in a (32-bit) __time_t.

diff --git a/bits/typesizes.h b/bits/typesizes.h
index 85eacf2..0b6a19c 100644
--- a/bits/typesizes.h
+++ b/bits/typesizes.h
@@ -48,6 +48,7 @@
 #define	__ID_T_TYPE		__U32_TYPE
 #define __CLOCK_T_TYPE		__SLONGWORD_TYPE
 #define __TIME_T_TYPE		__SLONGWORD_TYPE
+#define __TIME64_T_TYPE		__SQUAD_TYPE
 #define __USECONDS_T_TYPE	__U32_TYPE
 #define __SUSECONDS_T_TYPE	__SLONGWORD_TYPE
 #define __DADDR_T_TYPE		__S32_TYPE
diff --git a/include/time.h b/include/time.h
index aab26d7..7eda265 100644
--- a/include/time.h
+++ b/include/time.h
@@ -3,6 +3,7 @@
 
 #ifndef _ISOMAC
 # include <bits/types/locale_t.h>
+# include <stdbool.h>
 
 extern __typeof (strftime_l) __strftime_l;
 libc_hidden_proto (__strftime_l)
@@ -97,10 +98,16 @@ extern char * __strptime_internal (const char *rp, const char *fmt,
 
 extern double __difftime (time_t time1, time_t time0);
 
-
 /* Use in the clock_* functions.  Size of the field representing the
    actual clock ID.  */
 #define CLOCK_IDFIELD_SIZE	3
 
+/* check whether a time64_t value fits in a time_t */
+static inline bool
+fits_in_time_t (__time64_t t)
+{
+  return t == (time_t) t;
+}
+
 #endif
 #endif
diff --git a/posix/bits/types.h b/posix/bits/types.h
index bd06e2d..01ca585 100644
--- a/posix/bits/types.h
+++ b/posix/bits/types.h
@@ -145,7 +145,8 @@ __STD_TYPE __CLOCK_T_TYPE __clock_t;	/* Type of CPU usage counts.  */
 __STD_TYPE __RLIM_T_TYPE __rlim_t;	/* Type for resource measurement.  */
 __STD_TYPE __RLIM64_T_TYPE __rlim64_t;	/* Type for resource measurement (LFS).  */
 __STD_TYPE __ID_T_TYPE __id_t;		/* General type for IDs.  */
-__STD_TYPE __TIME_T_TYPE __time_t;	/* Seconds since the Epoch.  */
+__STD_TYPE __TIME_T_TYPE __time_t;	/* Seconds since the Epoch, Y2038-unsafe.  */
+__STD_TYPE __TIME64_T_TYPE __time64_t;	/* Seconds since the Epoch, Y2038-safe.  */
 __STD_TYPE __USECONDS_T_TYPE __useconds_t; /* Count of microseconds.  */
 __STD_TYPE __SUSECONDS_T_TYPE __suseconds_t; /* Signed count of microseconds.  */
 

-----------------------------------------------------------------------


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]