我们在armv8的文档中找到答案:
stlrh是带有释放语义的存储指令,会改变global monitor的状态,等效于sevl指令
附上armv7和armv8的spin_lock和spin_unlock的底层代码:
(armv7)
(linux/arch/arm/asm/spinlock.h) static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; u32 newval; arch_spinlock_t lockval; prefetchw(&lock->slock); __asm__ __volatile__( "1: ldrex %0, [%3]\n" " add %1, %0, %4\n" " strex %2, %1, [%3]\n" " teq %2, #0\n" " bne 1b" : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); while (lockval.tickets.next != lockval.tickets.owner) { wfe(); lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); } smp_mb(); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); lock->tickets.owner++; dsb_sev(); }
(armv8)
(linux/arch/arm64/asm/spinlock.h) static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int tmp; arch_spinlock_t lockval, newval; asm volatile( /* Atomically increment the next ticket. */ ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " prfm pstl1strm, %3\n" "1: ldaxr %w0, %3\n" " add %w1, %w0, %w5\n" " stxr %w2, %w1, %3\n" " cbnz %w2, 1b\n", /* LSE atomics */ " mov %w2, %w5\n" " ldadda %w2, %w0, %3\n" " nop\n" " nop\n" " nop\n" ) /* Did we get the lock? */ " eor %w1, %w0, %w0, ror #16\n" " cbz %w1, 3f\n" /* * No: spin on the owner. Send a local event to avoid missing an * unlock before the exclusive load. */ " sevl\n" "2: wfe\n" " ldaxrh %w2, %4\n" " eor %w1, %w2, %w0, lsr #16\n" " cbnz %w1, 2b\n" /* We got the lock. Critical section starts here. */ "3:" : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) : "memory"); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " ldrh %w1, %0\n" " add %w1, %w1, #1\n" " stlrh %w1, %0", /* LSE atomics */ " mov %w1, #1\n" " nop\n" " staddlh %w1, %0") : "=Q" (lock->owner), "=&r" (tmp) : : "memory"); }