some cleanup

master
Shuxin Yang 2014-08-27 17:00:21 -07:00
parent f6c4ca0d94
commit 1db717f783
13 changed files with 142 additions and 38 deletions

View File

@ -6,8 +6,8 @@ default : all
AR_NAME := libljmm.a
SO_NAME := libljmm.so
OPT_FLAGS = -O3 -march=native -g -DDEBUG
CFLAGS = -fvisibility=hidden -MMD -Wall $(OPT_FLAGS)
OPT_FLAGS = -O3 -g -march=native -DDEBUG
CFLAGS = -DENABLE_TESTING -fvisibility=hidden -MMD -Wall $(OPT_FLAGS)
CXXFLAGS = $(CFLAGS)
# Addition flag for building libljmm.a and libljmm.so respectively.

View File

@ -75,6 +75,12 @@
* 5. run application with libptmalloc3.so and libadaptor.so .e.g
* LD_PRELOAD="/the/path/to/libptmalloc3.so /the/path/to/libadaptor.so" \
* /my/application [arguments]
*
* Miscellaneous
* -------------
* Some functionalities can be turned on/off via following environment variables.
* - ENABLE_LJMM = {0|1}
* - ENABLE_LJMM_TRACE = {0|1}
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
@ -94,7 +100,7 @@
* right after main() is hit.
*/
static int enable_ljmm = 1;
static int enable_trace = 1;
static int enable_trace = 0;
/* Set to non-zero after initialization is successfully done.*/
static int init_done = 0;
@ -126,6 +132,16 @@ init_before_main() {
}
}
t = getenv("ENABLE_LJMM_TRACE");
if (t) {
int res = sscanf(t, "%d", &enable_trace);
if (res < 0 || t[res] != '\0' ||
(enable_trace != 0 && enable_trace != 1)) {
fprintf(stderr, "ENABLE_LJMM_TRACE={0|1}\n");
enable_trace = 0;
}
}
if (enable_ljmm)
init_adaptor();
}
@ -166,8 +182,7 @@ __wrap_mmap64(void *addr, size_t length, int prot, int flags,
void*
__wrap_mmap(void *addr, size_t length, int prot, int flags,
int fd, off_t offset) {
return __wrap_mmap64(addr, length, prot, flags, fd, offset);
}
return __wrap_mmap64(addr, length, prot, flags, fd, offset); }
int
__wrap_munmap(void *addr, size_t length) {

View File

@ -23,9 +23,6 @@
#define LRU_MAX_ENTRY 64
#define INVALID_LRU_IDX (-1)
/* About 2M if page-size is 4k in byte. */
#define MAX_CACHE_PAGE_NUM 512
typedef struct blk_lru {
page_idx_t start_page;
short order;
@ -43,10 +40,18 @@ typedef struct {
int total_page_num;
} block_cache_t;
/* Block-cache paprameters */
static int MAX_CACHE_PAGE_NUM = 512;
static block_cache_t* blk_cache;
static char enable_blk_cache = 1;
static char enable_blk_cache = 0;
static char blk_cache_init = 0;
/***************************************************************************
*
* LRU related functions
*
***************************************************************************
*/
static void
lru_init() {
int i;
@ -140,6 +145,12 @@ lru_popback(void) {
return 0;
}
/***************************************************************************
*
* block-cache related functions
*
***************************************************************************
*/
int
bc_init(void) {
if (unlikely(blk_cache_init))
@ -221,8 +232,7 @@ bc_remove_block(page_idx_t start_page, int order, int zap_page) {
if (!rbt_delete(blk_cache->blks, start_page, &idx))
return 0;
blk_lru_t* lru = blk_cache->lru_v + idx;
ASSERT(lru->order == order);
ASSERT(blk_cache->lru_v[idx].order == order);
blk_cache->total_page_num -= (1 << order);
ASSERT(blk_cache->total_page_num >= 0);
@ -244,3 +254,12 @@ bc_evict_oldest() {
return 1;
}
int
bc_set_parameter(int enable_bc, int cache_sz_in_page) {
if (cache_sz_in_page > 0)
MAX_CACHE_PAGE_NUM = cache_sz_in_page;
enable_blk_cache = enable_bc;
return 1;
}

View File

@ -6,6 +6,8 @@
struct blk_lru;
typedef struct blk_lru blk_lru_t;
int bc_set_parameter(int enable_bc, int cache_sz_in_page);
int bc_init(void);
int bc_fini(void);
int bc_add_blk(page_idx_t start_page, int order);

10
chunk.c
View File

@ -1,3 +1,7 @@
/* This file is to allocate a big chunk of memory right after .bss. Subsequent
* lm_mmap() is to serve the allocation request by carving smaller blocker out
* of this big chunk of memory.
*/
#include <sys/mman.h>
#include <unistd.h>
#include <stdlib.h>
@ -25,6 +29,9 @@ lm_alloc_chunk (void) {
uintptr_t cur_brk = (uintptr_t)sbrk(0);
uintptr_t page_sz = sysconf(_SC_PAGESIZE);
/* The chunk or memory must be page-aligned, and are multiple pages in size.
*/
cur_brk = (page_sz - 1 + cur_brk) & ~(page_sz - 1);
uint avail = LJMM_AS_UPBOUND - ((intptr_t)cur_brk);
@ -41,6 +48,9 @@ lm_alloc_chunk (void) {
if (!chunk)
return NULL;
/* If the program linked to this lib generates code-dump, do not dump those
* portions which are not allocated at all.
*/
madvise((void*)chunk, avail, MADV_DONTNEED|MADV_DONTDUMP);
big_chunk.base = (char*)chunk;

View File

@ -3,16 +3,15 @@
#include <stdio.h> /* for FILE */
#define LJMM_ADDR_UPBOUND ((unsigned int)0x80000000)
/* "Huge" chunk of memmory. Memmory allocations are to carve blocks
* from the big chunk.
*/
typedef struct {
char* base; /* the starting address of the big chunk */
char* start; /* the starting address of the usable portion */
unsigned alloc_size; /* the size of the big chunk */
unsigned usable_size; /* the size of the usable portion.
char* start; /* "base" + page-alignment */
unsigned alloc_size; /* the size of the entire chunk */
unsigned usable_size; /* the size of the usable portion,
* must be multiple of page size.
* usabe_size = page_num * page_size.
*/
unsigned page_num; /* number of available pages */

4
demo.c
View File

@ -3,6 +3,7 @@
#include <sys/mman.h>
#include "lj_mm.h"
/* TODO: this demo is junk, we need better demo... */
static void*
mmap_wrap(size_t len) {
return lm_mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_32BIT | MAP_PRIVATE, -1, 0);
@ -10,6 +11,7 @@ mmap_wrap(size_t len) {
int
main(int argc, char** argv) {
#if defined(DEBUG)
lm_init(1);
dump_page_alloc(stderr);
@ -48,6 +50,6 @@ main(int argc, char** argv) {
fprintf(stderr, "\n\nAfter delete all allocations\n");
dump_page_alloc(stderr);
/*lm_fini(); */
#endif
return 0;
}

28
lj_mm.h
View File

@ -18,12 +18,32 @@ extern "C" {
* and testing purpose.
*/
typedef struct {
/* < 0 : the initial chunk contains as many pages as possible:
* otherwise : the init chunk contains *exactly* as many pages as specified.
*/
int page_num;
int chunk_sz_in_page; /* "< 0" : default behavior */
int enable_block_cache; /* 0 : disable, 1: enable */
int blk_cache_in_page; /* "< 0": use default value */
} lj_mm_opt_t;
/* Populate lj_mm_opt_t with default value */
static inline void
lm_init_mm_opt(lj_mm_opt_t* opt) {
opt->chunk_sz_in_page = -1;
opt->enable_block_cache = 1;
opt->blk_cache_in_page = -1;
}
/* All exported symbols are prefixed with ljmm_ to reduce the chance of
* conflicting with applications being benchmarked.
*/
#define lm_init ljmm_init
#define lm_init2 ljmm_init2
#define lm_fini ljmm_fini
#define lm_mmap ljmm_mmap
#define lm_munmap ljmm_munmap
#define lm_mremap ljmm_mremap
#define lm_malloc ljmm_malloc
#define lm_free ljmm_free
/* Inititalize the memory-management system. If auto_fini is set
* (i.e. auto_fini != 0), there is no need to call lm_fini() at exit.
*/

View File

@ -1,3 +1,6 @@
/* This file contains the implementation to following exported functions:
* lm_mmap(), lm_munmap(), lm_mremap(), lm_malloc(), lm_free().
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
@ -11,6 +14,9 @@
/* Forward Decl */
static int lm_unmap_helper(void* addr, size_t um_size);
/* For allocating "big" blocks (about one page in size, or across multiple
* pages). The return value is page-aligned.
*/
void*
lm_malloc(size_t sz) {
errno = 0;
@ -78,23 +84,27 @@ lm_free(void* mem) {
return 0;
long page_sz = alloc_info->page_size;
if (unlikely ((ofst & (page_sz - 1))))
if (unlikely ((ofst & (page_sz - 1)))) {
/* the lm_malloc()/lm_mmap() return page-aligned block */
return 0;
}
long page_id = ofst >> log2_int32(page_sz);
long page_idx = ofst >> log2_int32(page_sz);
int page_num = alloc_info->page_num;
if (unlikely(page_id >= page_num))
if (unlikely(page_idx >= page_num))
return 0;
lm_page_t* pi = alloc_info->page_info;
lm_page_t* page = pi + page_id;
lm_page_t* page = pi + page_idx;
/* Check to see if it's a previously allocated block */
if (unlikely(!is_page_leader(page)))
return 0;
if (unlikely(!is_allocated_blk(page)))
return 0;
return free_block(page_id);
return free_block(page_idx);
}
/*****************************************************************************
@ -103,6 +113,10 @@ lm_free(void* mem) {
*
*****************************************************************************
*/
/* lm_mremap() herlper. Return NULL instead of MAP_FAILED in case it was not
* successful. It also tries to set errno if fails.
*/
static void*
lm_mremap_helper(void* old_addr, size_t old_size, size_t new_size, int flags) {
long ofst = ((char*)old_addr) - ((char*)alloc_info->first_page);
@ -131,7 +145,9 @@ lm_mremap_helper(void* old_addr, size_t old_size, size_t new_size, int flags) {
int old_page_num = (old_size + page_sz - 1) >> page_sz_log2;
int new_page_num = (new_size + page_sz - 1) >> page_sz_log2;
/* Shrink the existing allocated block */
/* case 1: Shrink the existing allocated block by reducing the number of
* mapping pages.
*/
if (old_page_num > new_page_num) {
char* unmap_start = (char*)alloc_info->first_page +
(new_page_num << page_sz_log2);
@ -144,7 +160,7 @@ lm_mremap_helper(void* old_addr, size_t old_size, size_t new_size, int flags) {
return NULL;
}
/* Expand the existing allocated block */
/* case 2: Expand the existing allocated block by adding more pages. */
if (old_page_num < new_page_num) {
int order = alloc_info->page_info[page_idx].order;
/* Block is big enough to accommodate the old-size byte.*/
@ -172,11 +188,14 @@ lm_mremap_helper(void* old_addr, size_t old_size, size_t new_size, int flags) {
return NULL;
}
/* case 3: Change the mapping size, but we don't need to change the number
* of mapping pages, as the new- and old-end of mapping area reside in
* the same block.
*/
ASSERT(old_page_num == new_page_num);
rbt_set_value(&alloc_info->alloc_blks, page_idx, new_size);
return old_addr;
}
return old_addr; }
void*
lm_mremap(void* old_addr, size_t old_size, size_t new_size, int flags) {
@ -198,7 +217,7 @@ typedef struct {
int order; /* The order of the mapped block */
int m_page_idx; /* The index of the 1st page of the mapped block*/
int m_end_idx;
int um_page_idx;
int um_page_idx; /* The index of the 1st page to be unmapped*/
int um_end_idx;
size_t m_size; /* The mmap size in byte.*/
} unmap_info_t;
@ -281,6 +300,7 @@ unmap_higher_part(const unmap_info_t* ui) {
return split;
}
/* Helper function of lm_munmap() */
static int
lm_unmap_helper(void* addr, size_t um_size) {

View File

@ -13,7 +13,7 @@
/* Forward Decl */
lm_alloc_t* alloc_info = NULL;
/* Initialize the page allocator, return 0 on success, 1 otherwise. */
/* Initialize the page allocator, return 1 on success, 0 otherwise. */
int
lm_init_page_alloc(lm_chunk_t* chunk, lj_mm_opt_t* mm_opt) {
if (!chunk) {
@ -28,10 +28,15 @@ lm_init_page_alloc(lm_chunk_t* chunk, lj_mm_opt_t* mm_opt) {
int page_num = chunk->page_num;
if (unlikely(mm_opt != NULL)) {
int pn = mm_opt->page_num;
int pn = mm_opt->chunk_sz_in_page;
if (((pn > 0) && (pn > page_num)) || !pn)
return 0;
page_num = pn;
if (!bc_set_parameter(mm_opt->enable_block_cache,
mm_opt->blk_cache_in_page)) {
return 0;
}
}
int alloc_sz = sizeof(lm_alloc_t) +
@ -135,7 +140,11 @@ extend_alloc_block(page_idx_t block_idx, size_t new_sz) {
rb_tree_t* rbt = &alloc_info->alloc_blks;
intptr_t alloc_sz;
int res = rbt_search(rbt, block_idx, &alloc_sz);
#ifdef DEBUG
ASSERT(res);
#else
(void)res;
#endif
int page_sz = alloc_info->page_size;
int page_sz_log2 = alloc_info->page_size_log2;

View File

@ -103,7 +103,7 @@ verify_order(page_idx_t blk_leader, int order) {
return 0 == (page_idx_to_id(blk_leader) & ((1<<order) - 1));
}
static int
static inline int
find_block(page_idx_t block, int order, intptr_t* value) {
ASSERT(order >= 0 && order <= alloc_info->max_order &&
verify_order(block, order));
@ -114,10 +114,13 @@ find_block(page_idx_t block, int order, intptr_t* value) {
/* If zap_pages is set, the corresponding pages will be removed via madvise()*/
static inline int
remove_free_block(page_idx_t block, int order, int zap_pages) {
#ifdef DEBUG
{
lm_page_t* page = alloc_info->page_info + block;
ASSERT(page->order == order && find_block(block, order, NULL));
ASSERT(!is_allocated_blk(page) && verify_order(block, order));
}
#endif
bc_remove_block(block, order, zap_pages);
@ -175,7 +178,11 @@ static inline void
migrade_alloc_block(page_idx_t block, int ord_was, int ord_is, size_t new_map_sz) {
rb_tree_t* rbt = &alloc_info->alloc_blks;
int res = rbt_set_value(rbt, block, new_map_sz);
#ifdef DEBUG
ASSERT(res != 0 && alloc_info->page_info[block].order == ord_was);
#else
(void)res;
#endif
alloc_info->page_info[block].order = ord_is;
}

View File

@ -1,6 +1,4 @@
#ifdef DEBUG
#include <stdio.h>
#endif
#include <stdlib.h>
#include "rbtree.h"

View File

@ -92,7 +92,10 @@ MemExt::getStartAddr() const {
UNIT_TEST::UNIT_TEST(int test_id, int page_num)
: _test_id(test_id) {
lj_mm_opt_t mm_opt;
mm_opt.page_num = _page_num = page_num;
lm_init_mm_opt(&mm_opt);
mm_opt.chunk_sz_in_page = _page_num = page_num;
mm_opt.enable_block_cache = 0;
_init_succ = lm_init2(0, &mm_opt);
_test_succ = _init_succ ? true : false;
_page_size = sysconf(_SC_PAGESIZE);