def ipc::uint<N>; optimize code

This commit is contained in:
mutouyun 2018-12-11 16:58:51 +08:00
parent caabc24b71
commit d5b5b3e0f4
3 changed files with 72 additions and 59 deletions

View File

@ -10,35 +10,62 @@
namespace ipc {
namespace circ {
template <std::size_t N>
struct alignas(std::max_align_t) elem_array_head {
using ui_t = std::uint8_t;
using uc_t = std::uint16_t;
using ac_t = std::atomic<uc_t>;
using u1_t = uint_t<N>;
using u2_t = uint_t<N * 2>;
std::atomic<std::size_t> lc_ { 0 }; // write spin lock flag
std::atomic<u2_t> cc_ { 0 }; // connection counter, using for broadcast
std::atomic<u2_t> wt_ { 0 }; // write index
std::atomic<u2_t> lc_ { 0 }; // write spin lock flag
ac_t cc_ { 0 }; // connection counter, using for broadcast
ac_t wt_ { 0 }; // write index
static u1_t index_of(u2_t c) { return static_cast<u1_t>(c); }
std::size_t connect(void) {
return cc_.fetch_add(1, std::memory_order_release);
}
std::size_t disconnect(void) {
return cc_.fetch_sub(1, std::memory_order_release);
}
std::size_t conn_count(void) const {
return cc_.load(std::memory_order_acquire);
}
auto acquire(void) {
while (lc_.exchange(1, std::memory_order_acquire)) {
std::this_thread::yield();
}
return index_of(wt_.load(std::memory_order_relaxed));
}
void commit(void) {
wt_.fetch_add(1, std::memory_order_relaxed);
lc_.store(0, std::memory_order_release);
}
};
enum : std::size_t {
elem_array_head_size =
(sizeof(elem_array_head) % alignof(std::max_align_t)) ?
((sizeof(elem_array_head) / alignof(std::max_align_t)) + 1) * alignof(std::max_align_t) :
sizeof(elem_array_head)
template <std::size_t N>
constexpr std::size_t elem_array_head_size =
(sizeof(elem_array_head<N>) % alignof(std::max_align_t)) ?
((sizeof(elem_array_head<N>) / alignof(std::max_align_t)) + 1) * alignof(std::max_align_t) :
sizeof(elem_array_head<N>);
struct elem_head {
std::atomic<uint_t<32>> rc_ { 0 }; // read counter
};
template <std::size_t DataSize>
class elem_array : private elem_array_head {
struct head_t {
std::atomic<std::uint32_t> rc_ { 0 }; // read counter
};
template <std::size_t DataSize, std::size_t BaseIntSize = 8>
class elem_array : private elem_array_head<BaseIntSize> {
public:
using base_t = elem_array_head<BaseIntSize>;
using head_t = elem_head;
enum : std::size_t {
head_size = elem_array_head_size,
head_size = elem_array_head_size<BaseIntSize>,
data_size = DataSize,
elem_max = std::numeric_limits<ui_t>::max() + 1, // default is 255 + 1
elem_max = std::numeric_limits<u1_t>::max() + 1, // default is 255 + 1
elem_size = sizeof(head_t) + DataSize,
block_size = elem_size * elem_max
};
@ -54,21 +81,8 @@ private:
return block_;
}
static elem_t* elem(void* ptr) {
return reinterpret_cast<elem_t*>(static_cast<byte_t*>(ptr) - sizeof(head_t));
}
elem_t* elem(ui_t i) {
return elem_start() + i;
}
static ui_t index_of(uc_t c) {
return static_cast<ui_t>(c);
}
ui_t index_of(elem_t* el) {
return static_cast<ui_t>(el - elem_start());
}
static elem_t* elem(void* ptr) { return reinterpret_cast<elem_t*>(static_cast<byte_t*>(ptr) - sizeof(head_t)); }
elem_t* elem(u1_t i ) { return elem_start() + i; }
public:
elem_array(void) = default;
@ -78,48 +92,35 @@ public:
elem_array(elem_array&&) = delete;
elem_array& operator=(elem_array&&) = delete;
std::size_t connect(void) {
return cc_.fetch_add(1, std::memory_order_release);
}
std::size_t disconnect(void) {
return cc_.fetch_sub(1, std::memory_order_release);
}
std::size_t conn_count(void) const {
return cc_.load(std::memory_order_consume);
}
using base_t::connect;
using base_t::disconnect;
using base_t::conn_count;
void* acquire(void) {
while (lc_.exchange(1, std::memory_order_acquire)) {
std::this_thread::yield();
}
elem_t* el = elem(index_of(wt_.load(std::memory_order_relaxed)));
elem_t* el = elem(base_t::acquire());
// check all consumers have finished reading
while(1) {
std::uint32_t expected = 0;
uint_t<32> expected = 0;
if (el->head_.rc_.compare_exchange_weak(
expected,
static_cast<std::uint32_t>(cc_.load(std::memory_order_relaxed)),
static_cast<uint_t<32>>(conn_count()),
std::memory_order_release)) {
break;
}
std::this_thread::yield();
std::atomic_thread_fence(std::memory_order_acquire);
}
return el->data_;
}
void commit(void* /*ptr*/) {
wt_.fetch_add(1, std::memory_order_relaxed);
lc_.store(0, std::memory_order_release);
base_t::commit();
}
uc_t cursor(void) const {
return wt_.load(std::memory_order_consume);
u2_t cursor(void) const {
return wt_.load(std::memory_order_acquire);
}
void* take(uc_t cursor) {
void* take(u2_t cursor) {
return elem(index_of(cursor))->data_;
}

View File

@ -10,6 +10,16 @@ namespace ipc {
using byte_t = std::uint8_t;
template <std::size_t N>
struct uint;
template <> struct uint<8 > { using type = std::uint8_t ; };
template <> struct uint<16> { using type = std::uint16_t; };
template <> struct uint<32> { using type = std::uint32_t; };
template <std::size_t N>
using uint_t = typename uint<N>::type;
// constants
enum : std::size_t {

View File

@ -39,6 +39,8 @@ private slots:
using cq_t = ipc::circ::elem_array<12>;
cq_t* cq__;
constexpr int LoopCount = 1000000;
void Unit::initTestCase(void) {
TestSuite::initTestCase();
cq__ = new cq_t;
@ -233,7 +235,7 @@ struct test_cq<ipc::circ::queue<T>> {
}
};
template <int N, int M, bool V = true, int Loops = 1000000, typename T>
template <int N, int M, bool V = true, int Loops = LoopCount, typename T>
void test_prod_cons(T* cq) {
test_cq<T> tcq { cq };
@ -281,7 +283,7 @@ void test_prod_cons(T* cq) {
for (auto& t : consumers) t.join();
}
template <int N, int M, bool V = true, int Loops = 1000000>
template <int N, int M, bool V = true, int Loops = LoopCount>
void test_prod_cons(void) {
test_prod_cons<N, M, V, Loops>(cq__);
}