https://rigtorp.se/spinlock/

不多说,上代码

struct alignas(64) spinlock {
  std::atomic<bool> lock_ = {0};

  void lock() noexcept {
    for (;;) {
      // Optimistically assume the lock is free on the first try
      if (!lock_.exchange(true, std::memory_order_acquire)) {
        return;
      }
      // Wait for lock to be released without generating cache misses
      while (lock_.load(std::memory_order_relaxed)) {
        // Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
        // hyper-threads
        __builtin_ia32_pause();
      }
    }
  }

  bool try_lock() noexcept {
    // First do a relaxed load to check if lock is free in order to prevent
    // unnecessary cache misses if someone does while(!try_lock())
    return !lock_.load(std::memory_order_relaxed) &&
           !lock_.exchange(true, std::memory_order_acquire);
  }

  void unlock() noexcept {
    lock_.store(false, std::memory_order_release);
  }
};

Ticket spinlocks

https://mfukar.github.io/2017/09/08/ticketspinlock.html

struct TicketSpinLock {
    /**
     * Attempt to grab the lock:
     * 1. Get a ticket number
     * 2. Wait for it
     */
    void enter() {
        const auto ticket = next_ticket.fetch_add(1, std::memory_order_relaxed);

        while (true) {
            const auto currently_serving = now_serving.load(std::memory_order_acquire);
            if (currently_serving == ticket) {
                break;
            }

            const size_t previous_ticket = ticket - currently_serving;
            const size_t delay_slots = BACKOFF_MIN * previous_ticket;

            while (delay_slots--) {
                spin_wait();
            }
        }
    }
    static inline void spin_wait(void) {
    #if (COMPILER == GCC || COMPILER == LLVM)
        /* volatile here prevents the asm block from being moved by the optimiser: */
        asm volatile("pause" ::: "memory");
    #elif (COMPILER == MVCC)
        __mm_pause();
    #endif
    }

    /**
     * Since we're in the critical section, no one can modify `now_serving`
     * but this thread. We just want the update to be atomic. Therefore we can use
     * a simple store instead of `now_serving.fetch_add()`:
     */
    void leave() {
        const auto successor = now_serving.load(std::memory_order_relaxed) + 1;
        now_serving.store(successor, std::memory_order_release);
    }

    /* These are aligned on a cache line boundary in order to avoid false sharing: */
    alignas(CACHELINE_SIZE) std::atomic_size_t now_serving = {0};
    alignas(CACHELINE_SIZE) std::atomic_size_t next_ticket = {0};
};

static_assert(sizeof(TicketSpinLock) == 2*CACHELINE_SIZE,
    "TicketSpinLock members may not be aligned on a cache-line boundary");