Merge pull request #11 from coldtobi/bugfixes_mmap

Bugfixes mmap
This commit is contained in:
sstefani 2022-06-24 13:14:22 +02:00 committed by GitHub
commit 237bf3aa60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 73 additions and 37 deletions

View File

@ -604,7 +604,7 @@ static struct rb_block *process_rb_search_range(struct rb_root *root, unsigned l
while (node) {
struct rb_block *this = container_of(node, struct rb_block, node);
if ((addr <= this->addr) && (addr + size > this->addr))
if ((this->addr <= addr) && (this->addr + this->size > addr))
return this;
if (addr < this->addr)
@ -1298,20 +1298,32 @@ void process_munmap(struct process *process, struct mt_msg *mt_msg, void *payloa
do {
block = process_rb_search_range(&process->block_table, ptr, size);
if (!block)
break;
if (!is_mmap(block->stack_node->stack->operation)) {
if (unlikely(options.kill)) {
fprintf(stderr, ">>> block missmatch pid:%d MAP<>MALLOC %#lx\n", process->pid, ptr);
abort();
}
break;
if (block && !is_mmap(block->stack_node->stack->operation)) {
// ignore blocks not mmap'ed
block = NULL;
}
if (!block) {
// printf("## no block found for %lx, %ld. Trying next page\n", ptr, size);
// it is legal to unmap arbitrary addresses, so ptr might be actually before the mmap and it might span muplitple mmaps areas.
// so we'll might need to search our blocks.
// eg this is legal: void* p=mmap(0,512*1024, PROT_WRITE|PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); munmap(p+4096,4096); munmap(p-4096, 512*1024+4096);
// FIXME pagesize is hardcoded as 4096 bytes, which should be safe (AFAIK 4k is the minimum "out there".) A more efficient way would be to transmit
// the target's PAGE_SIZE on startup.
if (size > 4096) {
size -= 4096;
ptr += 4096;
continue;
}
else {
break;
}
}
// ptr in block -- this is already checked.
if (block->addr >= ptr) {
unsigned off = block->addr - ptr;
unsigned long off = block->addr - ptr;
size -= off;
ptr += off;
@ -1331,33 +1343,32 @@ void process_munmap(struct process *process, struct mt_msg *mt_msg, void *payloa
process_rb_delete_block(process, block);
}
else {
unsigned off = ptr - block->addr;
unsigned long off = ptr - block->addr;
if (off + size < block->size) {
unsigned long new_addr = block->addr + (off + size);
unsigned long new_size = block->size - (off + size);
process_release_mem(process, block, block->size - off - new_size);
process_release_mem(process, block, block->size - off);
block->size = off;
if (process_rb_insert_block(process, new_addr, new_size, block->stack_node, 0, mt_msg->operation))
break;
process->n_allocations++;
process->total_allocations++;
process->bytes_used += new_size;
break;
}
else {
// freeing a chunk at the end of the mmap block.
size_t amount_freed = block->size - off;
process_release_mem(process, block, amount_freed);
process_release_mem(process, block, off);
block->addr += off;
block->size -= off;
size -= block->size;
ptr += block->size;
block->size -= amount_freed;
size -= amount_freed ;
ptr += amount_freed;
}
}
} while(size);
}
@ -1489,7 +1500,7 @@ void process_free(struct process *process, struct mt_msg *mt_msg, void *payload)
void process_realloc_done(struct process *process, struct mt_msg *mt_msg, void *payload)
{
unsigned long ptr;
unsigned int pid;
unsigned long pid;
struct list_head *it;
(void)mt_msg;
@ -1525,7 +1536,7 @@ void process_realloc_done(struct process *process, struct mt_msg *mt_msg, void *
}
if (unlikely(options.kill)) {
fprintf(stderr, ">>> unexpected realloc done pid: %u ptr: %#lx\n", pid, ptr);
fprintf(stderr, ">>> unexpected realloc done pid: %lu ptr: %#lx\n", pid, ptr);
abort();
}
return;

View File

@ -241,6 +241,9 @@ static void _report_calloc(struct task *task, struct library_symbol *libsym)
report_alloc(task, MT_MALLOC, ret, size, options.bt_depth, libsym);
}
static ssize_t arch_pagesize = -1;
static void _report_mmap(struct task *task, struct library_symbol *libsym)
{
unsigned long ret = fetch_retval(task);
@ -249,6 +252,11 @@ static void _report_mmap(struct task *task, struct library_symbol *libsym)
return;
unsigned long size = fetch_param(task, 1);
if (unlikely(arch_pagesize==-1)) arch_pagesize=getpagesize();
// fixup size, if size is not a multiple of the pagesize, we get the "partial" page too. -
if (size % arch_pagesize) {
size += arch_pagesize - size % arch_pagesize;
}
report_alloc(task, MT_MMAP, ret, size, options.bt_depth, libsym);
}
@ -277,17 +285,34 @@ static void _report_mmap64(struct task *task, struct library_symbol *libsym)
else
size.l = fetch_param(task, 1);
if (unlikely(arch_pagesize == -1)) arch_pagesize=getpagesize();
// fixup size, if size is not a multiple of the pagesize, we get the "partial" page too. -
if (size.l % arch_pagesize) {
size.l += arch_pagesize - size.l % arch_pagesize;
}
report_alloc(task, MT_MMAP64, ret, size.l, options.bt_depth, libsym);
}
static void report_munmap(struct task *task, struct library_symbol *libsym)
static void _report_munmap(struct task *task, struct library_symbol *libsym)
{
unsigned long addr = fetch_param(task, 0);
unsigned long size = fetch_param(task, 1);
unsigned long ret = fetch_retval(task);
if(ret != 0 ) return;
if(unlikely(arch_pagesize==-1)) arch_pagesize=getpagesize();
// fixup size, if needed: all pages in [addr, addr+size] are unmapped -- see munmap(2)
if(size % arch_pagesize) {
size += arch_pagesize - size % arch_pagesize;
}
report_alloc(task, MT_MUNMAP, addr, size, 0, libsym);
}
static void _report_memalign(struct task *task, struct library_symbol *libsym)
{
unsigned long size = fetch_param(task, 1);
@ -344,20 +369,20 @@ static void _report_pvalloc(struct task *task, struct library_symbol *libsym)
return report_alloc(task, MT_PVALLOC, ret, size, options.bt_depth, libsym);
}
static void report_mremap(struct task *task, struct library_symbol *libsym)
{
unsigned long addr = fetch_param(task, 0);
unsigned long size = fetch_param(task, 1);
report_alloc(task, MT_MUNMAP, addr, size, 0, libsym);
}
static void _report_mremap(struct task *task, struct library_symbol *libsym)
{
unsigned long size = fetch_param(task, 2);
unsigned long addr = fetch_param(task, 0);
unsigned long oldsize = fetch_param(task, 1);
unsigned long newsize = fetch_param(task, 2);
unsigned long ret = fetch_retval(task);
report_alloc(task, MT_MMAP, ret, size, options.bt_depth, libsym);
if( (void*)ret != MAP_FAILED) {
// mremap(2): if oldsize is zero and the mapping a shared mapping, a new mapping
// (Of the existing) will be created.
if (oldsize) report_alloc(task, MT_MUNMAP, addr, oldsize, 0, libsym);
report_alloc(task, MT_MMAP, ret, newsize, options.bt_depth, libsym);
}
}
static const struct function flist[] = {
@ -368,12 +393,12 @@ static const struct function flist[] = {
{ "posix_memalign", "posix_memalign", 0, NULL, _report_posix_memalign },
{ "mmap", "mmap", 0, NULL, _report_mmap },
{ "mmap64", "mmap64", 1, NULL, _report_mmap64 },
{ "munmap", "munmap", 0, report_munmap, NULL },
{ "munmap", "munmap", 0, NULL, _report_munmap },
{ "memalign", "memalign", 0, NULL, _report_memalign },
{ "aligned_alloc", "aligned_alloc", 1, NULL, _report_aligned_alloc },
{ "valloc", "valloc", 1, NULL, _report_valloc },
{ "pvalloc", "pvalloc", 1, NULL, _report_pvalloc },
{ "mremap", "mremap", 0, report_mremap, _report_mremap },
{ "mremap", "mremap", 0, NULL, _report_mremap },
{ "cfree", "cfree", 1, report_free, NULL },
{ "reallocarray", "reallocarray", 0, NULL, _report_reallocarray },
#if 0