This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCHv3] powerpc: Fix write-after-destroy in lock elision


LGTM.

On 12/21/2016 04:03 PM, Tulio Magno Quites Machado Filho wrote:
> From: Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com>
>
> Changes since v2:
>  - Fixed coding style.
>
> Changes since v1:
>  - Removed references to data race by the actual error: write-after-destroy.
>  - Enclosed adapt_count accesses in C11 atomics.
>
> --- 8< ---
>
> The update of *adapt_count after the release of the lock causes a race
> condition when thread A unlocks, thread B continues and destroys the
> mutex, and thread A writes to *adapt_count.
>
> 2016-12-21  Rajalakshmi Srinivasaraghavan  <raji@linux.vnet.ibm.com>
> 	    Steven Munroe  <sjmunroe@us.ibm.com>
> 	    Tulio Magno Quites Machado Filho  <tuliom@linux.vnet.ibm.com>
>
> 	[BZ #20822]
> 	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> 	(__lll_lock_elision): Access adapt_count via C11 atomics.
> 	* sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
> 	(__lll_trylock_elision): Likewise.
> 	* sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
> 	(__lll_unlock_elision):  Update adapt_count variable inside the
> 	critical section.
> ---
>  sysdeps/unix/sysv/linux/powerpc/elision-lock.c    |  8 +++++---
>  sysdeps/unix/sysv/linux/powerpc/elision-trylock.c |  7 ++++---
>  sysdeps/unix/sysv/linux/powerpc/elision-unlock.c  | 14 +++++++++-----
>  3 files changed, 18 insertions(+), 11 deletions(-)
>
> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> index dd1e4c3..86adbc9 100644
> --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> @@ -45,7 +45,7 @@
>  int
>  __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>  {
> -  if (*adapt_count > 0)
> +  if (atomic_load_relaxed (adapt_count) > 0)
>      {
>        goto use_lock;
>      }
> @@ -67,7 +67,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>  	  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
>  	    {
>  	      if (aconf.skip_lock_internal_abort > 0)
> -		*adapt_count = aconf.skip_lock_internal_abort;
> +		atomic_store_relaxed (adapt_count,
> +				      aconf.skip_lock_internal_abort);
>  	      goto use_lock;
>  	    }
>  	}
> @@ -75,7 +76,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>
>    /* Fall back to locks for a bit if retries have been exhausted */
>    if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
> -    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
> +    atomic_store_relaxed (adapt_count,
> +			  aconf.skip_lock_out_of_tbegin_retries);
>
>  use_lock:
>    return LLL_LOCK ((*lock), pshared);
> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
> index 0807a6a..6061856 100644
> --- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
> @@ -34,7 +34,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
>    __libc_tabort (_ABORT_NESTED_TRYLOCK);
>
>    /* Only try a transaction if it's worth it.  */
> -  if (*adapt_count > 0)
> +  if (atomic_load_relaxed (adapt_count) > 0)
>      {
>        goto use_lock;
>      }
> @@ -49,7 +49,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
>        __libc_tend (0);
>
>        if (aconf.skip_lock_busy > 0)
> -	*adapt_count = aconf.skip_lock_busy;
> +	atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
>      }
>    else
>      {
> @@ -59,7 +59,8 @@ __lll_trylock_elision (int *futex, short *adapt_count)
>  	     result in another failure.  Use normal locking now and
>  	     for the next couple of calls.  */
>  	  if (aconf.skip_trylock_internal_abort > 0)
> -	    *adapt_count = aconf.skip_trylock_internal_abort;
> +	    atomic_store_relaxed (adapt_count,
> +				aconf.skip_trylock_internal_abort);
>  	}
>      }
>
> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
> index 43c5a67..0e0baf5 100644
> --- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
> @@ -28,13 +28,17 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
>      __libc_tend (0);
>    else
>      {
> -      lll_unlock ((*lock), pshared);
> -
> -      /* Update the adapt count AFTER completing the critical section.
> -         Doing this here prevents unneeded stalling when entering
> -         a critical section.  Saving about 8% runtime on P8.  */
> +      /* Update adapt_count in the critical section to prevent a
> +	 write-after-destroy error as mentioned in BZ 20822.  The
> +	 following update of adapt_count is clearly contained within
> +	 the critical region of the fall-back lock as it immediately
> +	 precedes the unlock.  Attempting to use C11 atomics to access
> +	 adapt_count would be (at minimum) misleading and (at worse) a
> +	 serious error forcing a larx-hit-stcx flush.  */
>        if (*adapt_count > 0)
>  	(*adapt_count)--;
> +
> +      lll_unlock ((*lock), pshared);
>      }
>    return 0;
>  }


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]