use chunk_variable_alloc as allocator-policy of static_async_fixed

This commit is contained in:
zhangyi 2019-07-02 18:48:08 +08:00
parent 56103e08dd
commit 0ef1d27521
3 changed files with 216 additions and 108 deletions

View File

@ -14,8 +14,8 @@ namespace mem {
class static_alloc {
public:
static void clear() {}
static void swap(static_alloc&) {}
static void clear() {}
static void* alloc(std::size_t size) {
return size ? std::malloc(size) : nullptr;
@ -36,12 +36,19 @@ public:
namespace detail {
constexpr std::size_t aligned(std::size_t size, size_t alignment) noexcept {
return ((size - 1) & ~(alignment - 1)) + alignment;
}
class scope_alloc_base {
protected:
struct block_t {
block_t* next_;
} * list_ = nullptr;
enum : std::size_t {
aligned_block_size = aligned(sizeof(block_t), alignof(std::max_align_t))
};
block_t* list_ = nullptr;
public:
void swap(scope_alloc_base & rhs) {
@ -63,33 +70,42 @@ public:
private:
alloc_policy alloc_;
public:
scope_alloc() = default;
scope_alloc(scope_alloc&& rhs) { this->swap(rhs); }
scope_alloc& operator=(scope_alloc&& rhs) { this->swap(rhs); return (*this); }
~scope_alloc() { clear(); }
void swap(scope_alloc& rhs) {
std::swap(this->alloc_, rhs.alloc_);
base_t::swap(rhs);
}
void clear() {
void free_all() {
while (list_ != nullptr) {
auto curr = list_;
list_ = list_->next_;
alloc_.free(curr);
}
// now list_ is nullptr
alloc_.clear();
}
public:
scope_alloc() = default;
scope_alloc(scope_alloc&& rhs) { this->swap(rhs); }
scope_alloc& operator=(scope_alloc&& rhs) { this->swap(rhs); return (*this); }
~scope_alloc() { free_all(); }
template <typename A>
void set_allocator(A && alc) {
alloc_ = std::forward<A>(alc);
}
void swap(scope_alloc& rhs) {
alloc_.swap(rhs.alloc_);
base_t::swap(rhs);
}
void clear() {
free_all();
alloc_.~alloc_policy();
}
void* alloc(std::size_t size) {
auto curr = static_cast<block_t*>(alloc_.alloc(sizeof(block_t) + size));
auto curr = static_cast<block_t*>(alloc_.alloc(aligned_block_size + size));
curr->next_ = list_;
return ((list_ = curr) + 1);
return (reinterpret_cast<byte_t*>(list_ = curr) + aligned_block_size);
}
};
@ -99,13 +115,6 @@ public:
namespace detail {
template <std::size_t BlockSize>
struct fixed_expand_policy {
static std::size_t next(std::size_t & e) {
return (ipc::detail::max)(BlockSize, static_cast<std::size_t>(2048)) * (e *= 2);
}
};
class fixed_alloc_base {
protected:
std::size_t init_expand_;
@ -141,11 +150,18 @@ public:
}
};
struct fixed_expand_policy {
template <std::size_t BlockSize>
IPC_CONSTEXPR_ static std::size_t next(std::size_t & e) {
return ipc::detail::max<std::size_t>(BlockSize, (sizeof(void*) * 1024) / 2) * (e *= 2);
}
};
} // namespace detail
template <std::size_t BlockSize,
template <std::size_t> class ExpandP = detail::fixed_expand_policy,
typename AllocP = scope_alloc<>>
typename AllocP = scope_alloc<>,
typename ExpandP = detail::fixed_expand_policy>
class fixed_alloc : public detail::fixed_alloc_base {
public:
using base_t = detail::fixed_alloc_base;
@ -162,7 +178,7 @@ private:
if (this->cursor_ != nullptr) {
return this->cursor_;
}
auto size = ExpandP<block_size>::next(this->init_expand_);
auto size = ExpandP::template next<block_size>(this->init_expand_);
auto p = this->node_p(this->cursor_ = alloc_.alloc(size));
for (std::size_t i = 0; i < (size / block_size) - 1; ++i)
p = this->node_p((*p) = reinterpret_cast<byte_t*>(p) + block_size);
@ -175,16 +191,21 @@ public:
this->init(init_expand);
}
fixed_alloc(fixed_alloc&& rhs) { this->swap(rhs); }
fixed_alloc& operator=(fixed_alloc&& rhs) { this->swap(rhs); return (*this); }
fixed_alloc(fixed_alloc&& rhs) : fixed_alloc() { this->swap(rhs); }
fixed_alloc& operator=(fixed_alloc&& rhs) { this->swap(rhs); return (*this); }
template <typename A>
void set_allocator(A && alc) {
alloc_ = std::forward<A>(alc);
}
void swap(fixed_alloc& rhs) {
std::swap(this->alloc_, rhs.alloc_);
alloc_.swap(rhs.alloc_);
base_t::swap(rhs);
}
void clear() {
alloc_.clear();
alloc_.~alloc_policy();
this->init(this->init_expand_);
}
@ -200,7 +221,7 @@ public:
};
////////////////////////////////////////////////////////////////
/// Variable-size blocks allocation
/// Variable-size blocks allocation (without alignment)
////////////////////////////////////////////////////////////////
namespace detail {
@ -211,23 +232,19 @@ protected:
head_t * next_;
size_t size_;
size_t free_;
} * head_ = nullptr;
enum : std::size_t {
aligned_head_size = aligned(sizeof(head_t), alignof(std::max_align_t))
};
char * head_, * tail_;
void init() {
// makes chain to nullptr
head_ = tail_ = reinterpret_cast<char*>(sizeof(head_t));
}
head_t* chain() {
return reinterpret_cast<head_t*>(head_ - sizeof(head_t));
static byte_t * buffer(head_t* p) {
return reinterpret_cast<byte_t*>(p) + aligned_head_size;
}
public:
void swap(variable_alloc_base& rhs) {
std::swap(this->head_, rhs.head_);
std::swap(this->tail_, rhs.tail_);
}
void free(void* /*p*/) {}
@ -236,7 +253,7 @@ public:
} // namespace detail
template <std::size_t ChunkSize = 4096, typename AllocP = scope_alloc<>>
template <std::size_t ChunkSize = (sizeof(void*) * 1024), typename AllocP = static_alloc>
class variable_alloc : public detail::variable_alloc_base {
public:
using base_t = detail::variable_alloc_base;
@ -247,76 +264,61 @@ private:
alloc_policy alloc_;
head_t* alloc_head(std::size_t size) {
size = (ipc::detail::max)(ChunkSize, (ipc::detail::max)(size, sizeof(head_t)));
size = (ipc::detail::max)(ChunkSize, ipc::detail::max<std::size_t>(size, aligned_head_size));
head_t* p = static_cast<head_t*>(alloc_.alloc(size));
p->free_ = (p->size_ = size) - sizeof(head_t);
p->free_ = (p->size_ = size) - aligned_head_size;
return p;
}
void free_head(head_t* curr) {
alloc_.free(curr, curr->size_);
}
std::size_t remain() const {
return (tail_ - head_);
}
void* alloc_new_chunk(std::size_t size) {
head_t* p = alloc_head(sizeof(head_t) + size);
head_t* p = alloc_head(aligned_head_size + size);
if (p == nullptr) return nullptr;
head_t* list = chain();
if (size > (ChunkSize - sizeof(head_t)) && list != nullptr) {
p->next_ = list->next_;
list->next_ = p;
char* head = reinterpret_cast<char*>(p + 1);
char* tail = head + p->free_ - size;
p->free_ = tail - head;
return tail;
if (size > (ChunkSize - aligned_head_size) && head_ != nullptr) {
p->next_ = head_->next_;
head_->next_ = p;
return base_t::buffer(p) + (p->free_ -= size);
}
else {
p->next_ = list;
head_ = reinterpret_cast<char*>(p + 1);
tail_ = head_ + p->free_ - size;
p->free_ = remain();
return tail_;
p->next_ = head_;
return base_t::buffer(head_ = p) + (p->free_ -= size);
}
void free_all() {
while (head_ != nullptr) {
head_t* curr = head_;
head_ = head_->next_;
alloc_.free(curr, curr->size_);
}
// now head_ is nullptr
}
public:
variable_alloc() { this->init(); }
variable_alloc() = default;
variable_alloc(variable_alloc&& rhs) { this->swap(rhs); }
variable_alloc& operator=(variable_alloc&& rhs) { this->swap(rhs); return (*this); }
~variable_alloc() { clear(); }
~variable_alloc() { free_all(); }
template <typename A>
void set_allocator(A && alc) {
alloc_ = std::forward<A>(alc);
}
void swap(variable_alloc& rhs) {
std::swap(this->alloc_, rhs.alloc_);
alloc_.swap(rhs.alloc_);
base_t::swap(rhs);
}
void clear() {
head_t* list = chain();
while (list != nullptr) {
head_t* curr = list;
list = list->next_;
free_head(curr);
}
alloc_.clear();
this->init();
free_all();
alloc_.~alloc_policy();
}
void* alloc(size_t size) {
if (remain() < size) {
if ((head_ == nullptr) || head_->free_ < size) {
return alloc_new_chunk(size);
}
char* buff = tail_ - size;
if (buff < head_) {
return alloc_new_chunk(size);
}
tail_ = buff;
chain()->free_ = remain();
return tail_;
return base_t::buffer(head_) + (head_->free_ -= size);
}
};

View File

@ -18,8 +18,44 @@ namespace ipc {
namespace mem {
template <std::size_t Size>
using static_async_fixed = static_wrapper<async_wrapper<fixed_alloc<Size>>>;
using async_pool_alloc = variable_wrapper<static_async_fixed>;
using static_sync_fixed = static_wrapper<sync_wrapper<fixed_alloc<Size>>>;
namespace detail {
struct chunk_mapping_policy {
enum : std::size_t {
base_size = sizeof(void*) * 1024 * 1024, /* 8MB(x64) */
classes_size = 1
};
constexpr static std::size_t classify(std::size_t size) {
return (size <= base_size) ? 0 : classes_size;
}
};
template <typename AllocP>
struct chunk_alloc_recoverer {
public:
using alloc_policy = AllocP;
constexpr static void swap(chunk_alloc_recoverer &) {}
constexpr static void clear() {}
constexpr static void try_recover(alloc_policy &) {}
constexpr static void collect(alloc_policy &&) {}
};
} // namespace detail
using static_chunk_alloc = variable_wrapper<static_sync_fixed, detail::chunk_mapping_policy>;
using chunk_variable_alloc = variable_alloc<detail::chunk_mapping_policy::base_size, static_chunk_alloc>;
template <std::size_t Size>
using static_async_fixed =
static_wrapper<async_wrapper<fixed_alloc<Size, chunk_variable_alloc>, detail::chunk_alloc_recoverer>>;
using async_pool_alloc = variable_wrapper<static_async_fixed>;
//using async_pool_alloc = static_wrapper<async_wrapper<chunk_variable_alloc, detail::chunk_alloc_recoverer>>;
template <typename T>
using allocator = allocator_wrapper<T, async_pool_alloc>;

View File

@ -130,35 +130,65 @@ constexpr bool operator!=(const allocator_wrapper<T, AllocP>&, const allocator_w
////////////////////////////////////////////////////////////////
template <typename AllocP>
class default_alloc_recoverer {
public:
using alloc_policy = AllocP;
private:
ipc::spin_lock master_lock_;
std::vector<alloc_policy> master_allocs_;
public:
void swap(default_alloc_recoverer& rhs) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(master_lock_);
master_allocs_.swap(rhs.master_allocs_);
}
void clear() {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(master_lock_);
master_allocs_.clear();
}
void try_recover(alloc_policy & alc) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(master_lock_);
if (!master_allocs_.empty()) {
alc.swap(master_allocs_.back());
master_allocs_.pop_back();
}
}
void collect(alloc_policy && alc) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(master_lock_);
master_allocs_.emplace_back(std::move(alc));
}
};
template <typename AllocP,
template <typename> class RecovererP = default_alloc_recoverer>
class async_wrapper {
public:
using alloc_policy = AllocP;
private:
spin_lock master_lock_;
std::vector<alloc_policy> master_allocs_;
RecovererP<alloc_policy> recoverer_;
class alloc_proxy : public AllocP {
async_wrapper * w_ = nullptr;
public:
alloc_proxy(alloc_proxy&& rhs)
alloc_proxy(alloc_proxy && rhs)
: AllocP(std::move(rhs))
{}
alloc_proxy(async_wrapper* w) : w_(w) {
alloc_proxy(async_wrapper* w)
: AllocP(), w_(w) {
if (w_ == nullptr) return;
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(w_->master_lock_);
if (!w_->master_allocs_.empty()) {
AllocP::swap(w_->master_allocs_.back());
w_->master_allocs_.pop_back();
}
w_->recoverer_.try_recover(*this);
}
~alloc_proxy() {
if (w_ == nullptr) return;
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(w_->master_lock_);
w_->master_allocs_.emplace_back(std::move(*this));
w_->recoverer_.collect(std::move(*this));
}
};
@ -170,13 +200,12 @@ private:
}
public:
~async_wrapper() {
clear();
void swap(async_wrapper& rhs) {
recoverer_.swap(rhs.recoverer_);
}
void clear() {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(master_lock_);
master_allocs_.clear();
recoverer_.clear();
}
void* alloc(std::size_t size) {
@ -188,6 +217,42 @@ public:
}
};
////////////////////////////////////////////////////////////////
/// Thread-safe allocation wrapper (with spin_lock)
////////////////////////////////////////////////////////////////
template <typename AllocP, typename MutexT = ipc::spin_lock>
class sync_wrapper {
public:
using alloc_policy = AllocP;
using mutex_type = MutexT;
private:
mutex_type lock_;
alloc_policy alloc_;
public:
void swap(sync_wrapper& rhs) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(lock_);
alloc_.swap(rhs.alloc_);
}
void clear() {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(lock_);
alloc_.~alloc_policy();
}
void* alloc(std::size_t size) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(lock_);
return alloc_.alloc(size);
}
void free(void* p, std::size_t size) {
IPC_UNUSED_ auto guard = ipc::detail::unique_lock(lock_);
alloc_.free(p, size);
}
};
////////////////////////////////////////////////////////////////
/// Static allocation wrapper
////////////////////////////////////////////////////////////////
@ -202,6 +267,8 @@ public:
return alloc;
}
static void swap(static_wrapper&) {}
static void clear() {
instance().clear();
}
@ -246,8 +313,8 @@ const std::size_t default_mapping_policy<B>::table[default_mapping_policy<B>::cl
};
template <template <std::size_t> class Fixed,
typename StaticAlloc = mem::static_alloc,
typename MappingP = default_mapping_policy<>>
typename MappingP = default_mapping_policy<>,
typename StaticAlloc = mem::static_alloc>
class variable_wrapper {
template <typename F>
@ -260,10 +327,13 @@ class variable_wrapper {
}
public:
static void swap(variable_wrapper&) {}
static void clear() {
ipc::detail::static_for<MappingP::classes_size>([](auto index) {
Fixed<(decltype(index)::value + 1) * MappingP::base_size>::clear();
});
StaticAlloc::clear();
}
static void* alloc(std::size_t size) {