pthread_spin_lock.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. /* pthread_spin_lock -- lock a spin lock. Generic version.
  2. Copyright (C) 2012-2016 Free Software Foundation, Inc.
  3. The GNU C Library is free software; you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public
  5. License as published by the Free Software Foundation; either
  6. version 2.1 of the License, or (at your option) any later version.
  7. The GNU C Library is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public
  12. License along with the GNU C Library; if not, see
  13. <http://www.gnu.org/licenses/>. */
  14. #include <atomic.h>
  15. #include "pthreadP.h"
  16. /* A machine-specific version can define SPIN_LOCK_READS_BETWEEN_CMPXCHG
  17. to the number of plain reads that it's optimal to spin on between uses
  18. of atomic_compare_and_exchange_val_acq. If spinning forever is optimal
  19. then use -1. If no plain reads here would ever be optimal, use 0. */
  20. #define SPIN_LOCK_READS_BETWEEN_CMPXCHG 1000
  21. int
  22. pthread_spin_lock (pthread_spinlock_t *lock)
  23. {
  24. /* atomic_exchange usually takes less instructions than
  25. atomic_compare_and_exchange. On the other hand,
  26. atomic_compare_and_exchange potentially generates less bus traffic
  27. when the lock is locked.
  28. We assume that the first try mostly will be successful, and we use
  29. atomic_exchange. For the subsequent tries we use
  30. atomic_compare_and_exchange. */
  31. if (atomic_exchange_acq (lock, 1) == 0)
  32. return 0;
  33. do {
  34. /* The lock is contended and we need to wait. Going straight back
  35. to cmpxchg is not a good idea on many targets as that will force
  36. expensive memory synchronizations among processors and penalize other
  37. running threads.
  38. On the other hand, we do want to update memory state on the local core
  39. once in a while to avoid spinning indefinitely until some event that
  40. will happen to update local memory as a side-effect. */
  41. if (SPIN_LOCK_READS_BETWEEN_CMPXCHG >= 0) {
  42. int wait = SPIN_LOCK_READS_BETWEEN_CMPXCHG;
  43. while (*lock != 0 && wait > 0)
  44. --wait;
  45. } else {
  46. while (*lock != 0)
  47. ;
  48. }
  49. } while (atomic_compare_and_exchange_val_acq (lock, 1, 0) != 0);
  50. return 0;
  51. }