From c32615dbdaa1bab83a1f71250fc9ffc866fa6134 Mon Sep 17 00:00:00 2001 From: mutouyun Date: Sun, 27 Feb 2022 21:22:43 +0800 Subject: [PATCH] =?UTF-8?q?=E8=B0=83=E6=95=B4=E4=BB=A3=E7=A0=81=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F=E5=92=8C=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- include/libipc/spin_lock.h | 74 +++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/include/libipc/spin_lock.h b/include/libipc/spin_lock.h index 49c4acd..1ed94d9 100644 --- a/include/libipc/spin_lock.h +++ b/include/libipc/spin_lock.h @@ -16,57 +16,57 @@ #include "libipc/detect_plat.h" #include "libipc/def.h" -//////////////////////////////////////////////////////////////// -/// Gives hint to processor that improves performance of spin-wait loops. -//////////////////////////////////////////////////////////////// +/** + * @brief Gives hint to processor that improves performance of spin-wait loops. +*/ #pragma push_macro("LIBIPC_LOCK_PAUSE_") #undef LIBIPC_LOCK_PAUSE_ #if defined(LIBIPC_CC_MSVC) # include // YieldProcessor -/* - * @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms687419(v=vs.85).aspx - * Not for intel c++ compiler, so ignore http://software.intel.com/en-us/forums/topic/296168 +/** + * @brief Not for intel c++ compiler, so ignore http://software.intel.com/en-us/forums/topic/296168 + * @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms687419(v=vs.85).aspx */ # define LIBIPC_LOCK_PAUSE_() YieldProcessor() #elif defined(LIBIPC_CC_GNUC) # if defined(LIBIPC_INSTR_X86_64) -/* - * @see Intel(R) 64 and IA-32 Architectures Software Developer's Manual V2 - * PAUSE-Spin Loop Hint, 4-57 - * http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.html?wapkw=instruction+set+reference +/** + * @brief Intel(R) 64 and IA-32 Architectures Software Developer's Manual V2 + * PAUSE-Spin Loop Hint, 4-57 + * @see http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.html?wapkw=instruction+set+reference */ # define LIBIPC_LOCK_PAUSE_() __asm__ __volatile__("pause") # elif defined(LIBIPC_INSTR_I64) -/* - * @see Intel(R) Itanium(R) Architecture Developer's Manual, Vol.3 - * hint - Performance Hint, 3:145 - * http://www.intel.com/content/www/us/en/processors/itanium/itanium-architecture-vol-3-manual.html +/** + * @brief Intel(R) Itanium(R) Architecture Developer's Manual, Vol.3 + * hint - Performance Hint, 3:145 + * @see http://www.intel.com/content/www/us/en/processors/itanium/itanium-architecture-vol-3-manual.html */ # define LIBIPC_LOCK_PAUSE_() __asm__ __volatile__ ("hint @pause") -#elif defined(LIBIPC_INSTR_ARM) -/* - * @see ARM Architecture Reference Manuals (YIELD) - * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.subset.architecture.reference/index.html +# elif defined(LIBIPC_INSTR_ARM) +/** + * @brief ARM Architecture Reference Manuals (YIELD) + * @see http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.subset.architecture.reference/index.html */ # define LIBIPC_LOCK_PAUSE_() __asm__ __volatile__ ("yield") # endif #endif /*compilers*/ #if !defined(LIBIPC_LOCK_PAUSE_) -/* - * Just use a compiler fence, prevent compiler from optimizing loop +/** + * @brief Just use a compiler fence, prevent compiler from optimizing loop */ # define LIBIPC_LOCK_PAUSE_() std::atomic_signal_fence(std::memory_order_seq_cst) #endif /*!defined(LIBIPC_LOCK_PAUSE_)*/ -//////////////////////////////////////////////////////////////// -/// Yield to other threads -//////////////////////////////////////////////////////////////// - LIBIPC_NAMESPACE_BEG_ +/** + * @brief Yield to other threads +*/ + template inline void yield(K &k) noexcept { if (k < 4) { /* Do nothing */ } @@ -100,38 +100,38 @@ inline void sleep(K &k) { }); } -} // namespace ipc - -#pragma pop_macro("LIBIPC_LOCK_PAUSE_") - -namespace ipc { - +/** + * @brief Basic spin lock +*/ class spin_lock { - std::atomic lc_ { 0 }; + std::atomic lc_ {0}; public: void lock(void) noexcept { for (unsigned k = 0; - lc_.exchange(1, std::memory_order_acquire); - yield(k)) ; + lc_.exchange(1, std::memory_order_acquire); + yield(k)) ; } void unlock(void) noexcept { lc_.store(0, std::memory_order_release); } }; +/** + * @brief Support for shared mode spin lock +*/ class rw_lock { using lc_ui_t = unsigned; - std::atomic lc_ { 0 }; + std::atomic lc_ {0}; enum : lc_ui_t { w_mask = (std::numeric_limits>::max)(), // b 0111 1111 - w_flag = w_mask + 1 // b 1000 0000 + w_flag = w_mask + 1, // b 1000 0000 }; public: - rw_lock() = default; + rw_lock() noexcept = default; rw_lock(const rw_lock &) = delete; rw_lock &operator=(const rw_lock &) = delete; @@ -177,3 +177,5 @@ public: }; LIBIPC_NAMESPACE_END_ + +#pragma pop_macro("LIBIPC_LOCK_PAUSE_")