Garbage collector colours change (#9756)

master
Sadiq Jaffer 2020-09-17 16:24:04 +01:00 committed by GitHub
parent f6a5b755f8
commit c10217818f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 329 additions and 184 deletions

View File

@ -24,6 +24,11 @@ Working version
### Runtime system: ### Runtime system:
- #9756: garbage collector colors change
removes the gray color from the major gc
(Sadiq Jaffer and Stephen Dolan reviewed by Xavier Leroy,
KC Sivaramakrishnan, Damien Doligez and Jacques-Henri Jourdan)
- #1795, #9543: modernize signal handling on Linux i386, PowerPC, and s390x, - #1795, #9543: modernize signal handling on Linux i386, PowerPC, and s390x,
adding support for Musl ppc64le along the way. adding support for Musl ppc64le along the way.
(Xavier Leroy and Anil Madhavapeddy, review by Stephen Dolan) (Xavier Leroy and Anil Madhavapeddy, review by Stephen Dolan)

View File

@ -36,6 +36,9 @@ DOMAIN_STATE(struct caml_ephe_ref_table*, ephe_ref_table)
DOMAIN_STATE(struct caml_custom_table*, custom_table) DOMAIN_STATE(struct caml_custom_table*, custom_table)
/* See minor_gc.c */ /* See minor_gc.c */
DOMAIN_STATE(struct mark_stack*, mark_stack)
/* See major_gc.c */
DOMAIN_STATE(value*, stack_low) DOMAIN_STATE(value*, stack_low)
DOMAIN_STATE(value*, stack_high) DOMAIN_STATE(value*, stack_high)
DOMAIN_STATE(value*, stack_threshold) DOMAIN_STATE(value*, stack_threshold)

View File

@ -68,7 +68,6 @@ extern uintnat caml_spacetime_my_profinfo(struct ext_table**, uintnat);
#endif #endif
#define Is_white_val(val) (Color_val(val) == Caml_white) #define Is_white_val(val) (Color_val(val) == Caml_white)
#define Is_gray_val(val) (Color_val(val) == Caml_gray)
#define Is_blue_val(val) (Color_val(val) == Caml_blue) #define Is_blue_val(val) (Color_val(val) == Caml_blue)
#define Is_black_val(val) (Color_val(val) == Caml_black) #define Is_black_val(val) (Color_val(val) == Caml_black)

View File

@ -26,12 +26,16 @@ typedef struct {
asize_t alloc; /* in bytes, used for compaction */ asize_t alloc; /* in bytes, used for compaction */
asize_t size; /* in bytes */ asize_t size; /* in bytes */
char *next; char *next;
value* redarken_start; /* first block in chunk to redarken */
value* redarken_end; /* last block in chunk that needs redarkening */
} heap_chunk_head; } heap_chunk_head;
#define Chunk_size(c) (((heap_chunk_head *) (c)) [-1]).size #define Chunk_size(c) (((heap_chunk_head *) (c)) [-1]).size
#define Chunk_alloc(c) (((heap_chunk_head *) (c)) [-1]).alloc #define Chunk_alloc(c) (((heap_chunk_head *) (c)) [-1]).alloc
#define Chunk_next(c) (((heap_chunk_head *) (c)) [-1]).next #define Chunk_next(c) (((heap_chunk_head *) (c)) [-1]).next
#define Chunk_block(c) (((heap_chunk_head *) (c)) [-1]).block #define Chunk_block(c) (((heap_chunk_head *) (c)) [-1]).block
#define Chunk_redarken_start(c) (((heap_chunk_head *) (c)) [-1]).redarken_start
#define Chunk_redarken_end(c) (((heap_chunk_head *) (c)) [-1]).redarken_end
extern int caml_gc_phase; extern int caml_gc_phase;
extern int caml_gc_subphase; extern int caml_gc_subphase;
@ -80,6 +84,7 @@ void caml_init_major_heap (asize_t); /* size in bytes */
asize_t caml_clip_heap_chunk_wsz (asize_t wsz); asize_t caml_clip_heap_chunk_wsz (asize_t wsz);
void caml_darken (value, value *); void caml_darken (value, value *);
void caml_major_collection_slice (intnat); void caml_major_collection_slice (intnat);
void caml_shrink_mark_stack ();
void major_collection (void); void major_collection (void);
void caml_finish_major_cycle (void); void caml_finish_major_cycle (void);
void caml_set_major_window (int); void caml_set_major_window (int);

View File

@ -368,6 +368,9 @@ static void do_compaction (intnat new_allocation_policy)
} }
} }
++ Caml_state->stat_compactions; ++ Caml_state->stat_compactions;
caml_shrink_mark_stack();
caml_gc_message (0x10, "done.\n"); caml_gc_message (0x10, "done.\n");
} }

View File

@ -1670,7 +1670,6 @@ static header_t *bf_merge_block (value bp, char *limit)
switch (Color_val (cur)){ switch (Color_val (cur)){
case Caml_white: goto white; case Caml_white: goto white;
case Caml_blue: bf_remove (cur); goto next; case Caml_blue: bf_remove (cur); goto next;
case Caml_gray:
case Caml_black: case Caml_black:
goto end_of_run; goto end_of_run;
} }

View File

@ -173,7 +173,7 @@ static value heap_stats (int returnstats)
} }
} }
break; break;
case Caml_gray: case Caml_black: case Caml_black:
CAMLassert (Wosize_hd (cur_hd) > 0); CAMLassert (Wosize_hd (cur_hd) > 0);
++ live_blocks; ++ live_blocks;
live_words += Whsize_hd (cur_hd); live_words += Whsize_hd (cur_hd);

View File

@ -30,6 +30,7 @@
#include "caml/misc.h" #include "caml/misc.h"
#include "caml/mlvalues.h" #include "caml/mlvalues.h"
#include "caml/roots.h" #include "caml/roots.h"
#include "caml/skiplist.h"
#include "caml/signals.h" #include "caml/signals.h"
#include "caml/weak.h" #include "caml/weak.h"
#include "caml/memprof.h" #include "caml/memprof.h"
@ -41,17 +42,25 @@ Caml_inline double fmin(double a, double b) {
} }
#endif #endif
#define MARK_STACK_INIT_SIZE 2048
typedef struct {
value block;
uintnat offset;
} mark_entry;
struct mark_stack {
mark_entry* stack;
uintnat count;
uintnat size;
};
uintnat caml_percent_free; uintnat caml_percent_free;
uintnat caml_major_heap_increment; uintnat caml_major_heap_increment;
CAMLexport char *caml_heap_start; CAMLexport char *caml_heap_start;
char *caml_gc_sweep_hp; char *caml_gc_sweep_hp;
int caml_gc_phase; /* always Phase_mark, Pase_clean, int caml_gc_phase; /* always Phase_mark, Pase_clean,
Phase_sweep, or Phase_idle */ Phase_sweep, or Phase_idle */
static value *gray_vals;
static value *gray_vals_cur, *gray_vals_end;
static asize_t gray_vals_size;
static int heap_is_pure; /* The heap is pure if the only gray objects
below [markhp] are also in [gray_vals]. */
uintnat caml_allocated_words; uintnat caml_allocated_words;
uintnat caml_dependent_size, caml_dependent_allocated; uintnat caml_dependent_size, caml_dependent_allocated;
double caml_extra_heap_resources; double caml_extra_heap_resources;
@ -59,7 +68,11 @@ uintnat caml_fl_wsz_at_phase_change = 0;
extern value caml_fl_merge; /* Defined in freelist.c. */ extern value caml_fl_merge; /* Defined in freelist.c. */
static char *markhp, *chunk, *limit; /* redarken_first_chunk is the first chunk needing redarkening, if NULL no
redarkening required */
static char *redarken_first_chunk = NULL;
static char *sweep_chunk, *sweep_limit;
static double p_backlog = 0.0; /* backlog for the gc speedup parameter */ static double p_backlog = 0.0; /* backlog for the gc speedup parameter */
int caml_gc_subphase; /* Subphase_{mark_roots,mark_main,mark_final} */ int caml_gc_subphase; /* Subphase_{mark_roots,mark_main,mark_final} */
@ -100,7 +113,7 @@ int caml_gc_subphase; /* Subphase_{mark_roots,mark_main,mark_final} */
*/ */
static int ephe_list_pure; static int ephe_list_pure;
/** The ephemerons is pure if since the start of its iteration /** The ephemerons is pure if since the start of its iteration
no value have been darken. */ no value have been darkened. */
static value *ephes_checked_if_pure; static value *ephes_checked_if_pure;
static value *ephes_to_check; static value *ephes_to_check;
@ -116,34 +129,150 @@ static unsigned long major_gc_counter = 0;
void (*caml_major_gc_hook)(void) = NULL; void (*caml_major_gc_hook)(void) = NULL;
static void realloc_gray_vals (void) /* This function prunes the mark stack if it's about to overflow. It does so
by building a skiplist of major heap chunks and then iterating through the
mark stack and setting redarken_start/redarken_end on each chunk to indicate
the range that requires redarkening. */
static void mark_stack_prune (struct mark_stack* stk)
{ {
value *new; int entry;
uintnat mark_stack_count = stk->count;
mark_entry* mark_stack = stk->stack;
CAMLassert (gray_vals_cur == gray_vals_end); char* heap_chunk = caml_heap_start;
if (gray_vals_size < Caml_state->stat_heap_wsz / 32){ struct skiplist chunk_sklist = SKIPLIST_STATIC_INITIALIZER;
caml_gc_message (0x08, "Growing gray_vals to %"
ARCH_INTNAT_PRINTF_FORMAT "uk bytes\n", do {
(intnat) gray_vals_size * sizeof (value) / 512); caml_skiplist_insert(&chunk_sklist, (uintnat)heap_chunk,
new = (value *) caml_stat_resize_noexc ((char *) gray_vals, (uintnat)(heap_chunk+Chunk_size(heap_chunk)));
2 * gray_vals_size * heap_chunk = Chunk_next(heap_chunk);
sizeof (value)); } while( heap_chunk != NULL );
if (new == NULL){
caml_gc_message (0x08, "No room for growing gray_vals\n"); for( entry = 0; entry < mark_stack_count ; entry++ ) {
gray_vals_cur = gray_vals; mark_entry me = mark_stack[entry];
heap_is_pure = 0; value* block_op = Op_val(me.block);
}else{ uintnat chunk_addr = 0, chunk_addr_below = 0;
gray_vals = new;
gray_vals_cur = gray_vals + gray_vals_size; if( caml_skiplist_find_below(&chunk_sklist, (uintnat)me.block,
gray_vals_size *= 2; &chunk_addr, &chunk_addr_below)
gray_vals_end = gray_vals + gray_vals_size; && me.block < chunk_addr_below ) {
if( Chunk_redarken_start(chunk_addr) > block_op ) {
Chunk_redarken_start(chunk_addr) = block_op;
}
if( Chunk_redarken_end(chunk_addr) < block_op ) {
Chunk_redarken_end(chunk_addr) = block_op;
}
if( redarken_first_chunk == NULL
|| redarken_first_chunk > (char*)chunk_addr ) {
redarken_first_chunk = (char*)chunk_addr;
}
} }
}else{
gray_vals_cur = gray_vals + gray_vals_size / 2;
heap_is_pure = 0;
} }
caml_skiplist_empty(&chunk_sklist);
caml_gc_message(0x08, "Mark stack overflow.\n");
stk->count = 0;
} }
static void realloc_mark_stack (struct mark_stack* stk)
{
mark_entry* new;
uintnat mark_stack_bsize = stk->size * sizeof(mark_entry);
if ( Wsize_bsize(mark_stack_bsize) < Caml_state->stat_heap_wsz / 64 ) {
caml_gc_message (0x08, "Growing mark stack to %"
ARCH_INTNAT_PRINTF_FORMAT "uk bytes\n",
(intnat) mark_stack_bsize * 2 / 1024);
new = (mark_entry*) caml_stat_resize_noexc ((char*) stk->stack,
2 * mark_stack_bsize);
if (new != NULL) {
stk->stack = new;
stk->size *= 2;
return;
}
}
caml_gc_message (0x08, "No room for growing mark stack. Pruning..\n");
mark_stack_prune(stk);
}
/* This function pushes the provided mark_entry [me] onto the current mark
stack [stk]. It first checks, if the block is small enough, whether there
are any fields we would actually do mark work on. If so then it enqueues
the entry. */
Caml_inline void mark_stack_push(struct mark_stack* stk, value block,
uintnat offset, intnat* work)
{
value v;
int i, block_wsz = Wosize_val(block), end;
mark_entry* me;
CAMLassert(Is_block(block) && Is_in_heap (block)
&& Is_black_val(block));
CAMLassert(Tag_val(block) != Infix_tag);
CAMLassert(Tag_val(block) < No_scan_tag);
#ifdef NO_NAKED_POINTERS
if (Tag_val(block) == Closure_tag) {
/* Skip the code pointers and integers at beginning of closure;
start scanning at the first word of the environment part. */
/* It might be the case that [mark_stack_push] has been called
while we are traversing a closure block but have not enough
budget to finish the block. In that specific case, we should not
update [m.offset] */
if (offset == 0)
offset = Start_env_closinfo(Closinfo_val(block));
CAMLassert(offset <= Wosize_val(block)
&& offset >= Start_env_closinfo(Closinfo_val(block)));
}
#endif
end = (block_wsz < 8 ? block_wsz : 8);
/* Optimisation to avoid pushing small, unmarkable objects such as [Some 42]
* into the mark stack. */
for (i = offset; i < end; i++) {
v = Field(block, i);
if (Is_block(v) && !Is_black_val(v))
/* found something to mark */
break;
}
if (i == block_wsz) {
/* nothing left to mark */
if( work != NULL ) {
/* we should take credit for it though */
*work -= Whsize_wosize(block_wsz - offset);
}
return;
}
if( work != NULL ) {
/* take credit for the work we skipped due to the optimisation.
we will take credit for the header later as part of marking. */
*work -= (i - offset);
}
offset = i;
if (stk->count == stk->size)
realloc_mark_stack(stk);
me = &stk->stack[stk->count++];
me->block = block;
me->offset = offset;
}
void caml_darken (value v, value *p /* not used */) void caml_darken (value v, value *p /* not used */)
{ {
#ifdef NO_NAKED_POINTERS #ifdef NO_NAKED_POINTERS
@ -167,26 +296,81 @@ void caml_darken (value v, value *p /* not used */)
CAMLassert (!Is_blue_hd (h)); CAMLassert (!Is_blue_hd (h));
if (Is_white_hd (h)){ if (Is_white_hd (h)){
ephe_list_pure = 0; ephe_list_pure = 0;
Hd_val (v) = Blackhd_hd (h);
if (t < No_scan_tag){ if (t < No_scan_tag){
Hd_val (v) = Grayhd_hd (h); mark_stack_push(Caml_state->mark_stack, v, 0, NULL);
*gray_vals_cur++ = v;
if (gray_vals_cur >= gray_vals_end) realloc_gray_vals ();
}else{
Hd_val (v) = Blackhd_hd (h);
} }
} }
} }
} }
/* This function shrinks the mark stack back to the MARK_STACK_INIT_SIZE size
and is called at the end of a GC compaction to avoid a mark stack greater
than 1/32th of the heap. */
void caml_shrink_mark_stack () {
struct mark_stack* stk = Caml_state->mark_stack;
intnat init_stack_bsize = MARK_STACK_INIT_SIZE * sizeof(mark_entry);
mark_entry* shrunk_stack;
caml_gc_message (0x08, "Shrinking mark stack to %"
ARCH_INTNAT_PRINTF_FORMAT "uk bytes\n",
init_stack_bsize);
shrunk_stack = (mark_entry*) caml_stat_resize_noexc ((char*) stk->stack,
init_stack_bsize);
if (shrunk_stack != NULL) {
stk->stack = shrunk_stack;
stk->size = MARK_STACK_INIT_SIZE;
}else{
caml_gc_message (0x08, "Mark stack shrinking failed");
}
}
/* This function adds blocks in the passed heap chunk [heap_chunk] to
the mark stack. It returns 1 when the supplied chunk has no more
range to redarken. It returns 0 if there are still blocks in the
chunk that need redarkening because pushing them onto the stack
would make it grow more than a quarter full. This is to lower the
chance of triggering another overflow, which would be
wasteful. Subsequent calls will continue progress.
*/
static int redarken_chunk(char* heap_chunk, struct mark_stack* stk) {
value* p = Chunk_redarken_start(heap_chunk);
value* end = Chunk_redarken_end(heap_chunk);
while (p <= end) {
header_t hd = Hd_op(p);
if( Is_black_hd(hd) && Tag_hd(hd) < No_scan_tag ) {
if( stk->count < stk->size/4 ) {
mark_stack_push(stk, Val_op(p), 0, NULL);
} else {
/* Only fill up a quarter of the mark stack, we can resume later
for more if we need to */
Chunk_redarken_start(heap_chunk) = p;
return 0;
}
}
p += Whsize_hp(Hp_op(p));
}
Chunk_redarken_start(heap_chunk) =
(value*)(heap_chunk + Chunk_size(heap_chunk));
Chunk_redarken_end(heap_chunk) = 0;
return 1;
}
static void start_cycle (void) static void start_cycle (void)
{ {
CAMLassert (caml_gc_phase == Phase_idle); CAMLassert (caml_gc_phase == Phase_idle);
CAMLassert (gray_vals_cur == gray_vals); CAMLassert (Caml_state->mark_stack->count == 0);
CAMLassert (redarken_first_chunk == NULL);
caml_gc_message (0x01, "Starting new major GC cycle\n"); caml_gc_message (0x01, "Starting new major GC cycle\n");
caml_darken_all_roots_start (); caml_darken_all_roots_start ();
caml_gc_phase = Phase_mark; caml_gc_phase = Phase_mark;
caml_gc_subphase = Subphase_mark_roots; caml_gc_subphase = Subphase_mark_roots;
markhp = NULL;
ephe_list_pure = 1; ephe_list_pure = 1;
ephes_checked_if_pure = &caml_ephe_list_head; ephes_checked_if_pure = &caml_ephe_list_head;
ephes_to_check = &caml_ephe_list_head; ephes_to_check = &caml_ephe_list_head;
@ -196,13 +380,6 @@ static void start_cycle (void)
#endif #endif
} }
/* We may stop the slice inside values, in order to avoid large latencies
on large arrays. In this case, [current_value] is the partially-marked
value and [current_index] is the index of the next field to be marked.
*/
static value current_value = 0;
static mlsize_t current_index = 0;
static void init_sweep_phase(void) static void init_sweep_phase(void)
{ {
/* Phase_clean is done. */ /* Phase_clean is done. */
@ -210,17 +387,17 @@ static void init_sweep_phase(void)
caml_gc_sweep_hp = caml_heap_start; caml_gc_sweep_hp = caml_heap_start;
caml_fl_init_merge (); caml_fl_init_merge ();
caml_gc_phase = Phase_sweep; caml_gc_phase = Phase_sweep;
chunk = caml_heap_start; sweep_chunk = caml_heap_start;
caml_gc_sweep_hp = chunk; caml_gc_sweep_hp = sweep_chunk;
limit = chunk + Chunk_size (chunk); sweep_limit = sweep_chunk + Chunk_size (sweep_chunk);
caml_fl_wsz_at_phase_change = caml_fl_cur_wsz; caml_fl_wsz_at_phase_change = caml_fl_cur_wsz;
if (caml_major_gc_hook) (*caml_major_gc_hook)(); if (caml_major_gc_hook) (*caml_major_gc_hook)();
} }
/* auxiliary function of mark_slice */ /* auxiliary function of mark_slice */
Caml_inline value* mark_slice_darken(value *gray_vals_ptr, Caml_inline void mark_slice_darken(struct mark_stack* stk, value v, mlsize_t i,
value v, mlsize_t i, int in_ephemeron, int *slice_pointers,
int in_ephemeron, int *slice_pointers) intnat *work)
{ {
value child; value child;
header_t chd; header_t chd;
@ -228,7 +405,7 @@ Caml_inline value* mark_slice_darken(value *gray_vals_ptr,
child = Field (v, i); child = Field (v, i);
#ifdef NO_NAKED_POINTERS #ifdef NO_NAKED_POINTERS
if (Is_block (child) && ! Is_young (child)) { if (Is_block (child) && !Is_young (child)) {
#else #else
if (Is_block (child) && Is_in_heap (child)) { if (Is_block (child) && Is_in_heap (child)) {
#endif #endif
@ -267,20 +444,17 @@ Caml_inline value* mark_slice_darken(value *gray_vals_ptr,
#endif #endif
if (Is_white_hd (chd)){ if (Is_white_hd (chd)){
ephe_list_pure = 0; ephe_list_pure = 0;
Hd_val (child) = Grayhd_hd (chd); Hd_val (child) = Blackhd_hd (chd);
*gray_vals_ptr++ = child; if( Tag_hd(chd) < No_scan_tag ) {
if (gray_vals_ptr >= gray_vals_end) { mark_stack_push(stk, child, 0, work);
gray_vals_cur = gray_vals_ptr; } else {
realloc_gray_vals (); *work -= 1; /* Account for header */
gray_vals_ptr = gray_vals_cur;
} }
} }
} }
return gray_vals_ptr;
} }
static value* mark_ephe_aux (value *gray_vals_ptr, intnat *work, static void mark_ephe_aux (struct mark_stack *stk, intnat *work,
int *slice_pointers) int *slice_pointers)
{ {
value v, data, key; value v, data, key;
@ -342,13 +516,11 @@ static value* mark_ephe_aux (value *gray_vals_ptr, intnat *work,
*work -= Whsize_wosize(i); *work -= Whsize_wosize(i);
if (alive_data){ if (alive_data){
gray_vals_ptr = mark_slice_darken(gray_vals_ptr,v, mark_slice_darken(stk, v, CAML_EPHE_DATA_OFFSET, /*in_ephemeron=*/1,
CAML_EPHE_DATA_OFFSET, slice_pointers, work);
/*in_ephemeron=*/1,
slice_pointers);
} else { /* not triggered move to the next one */ } else { /* not triggered move to the next one */
ephes_to_check = &Field(v,CAML_EPHE_LINK_OFFSET); ephes_to_check = &Field(v,CAML_EPHE_LINK_OFFSET);
return gray_vals_ptr; return;
} }
} else { /* a simily weak pointer or an already alive data */ } else { /* a simily weak pointer or an already alive data */
*work -= 1; *work -= 1;
@ -368,119 +540,80 @@ static value* mark_ephe_aux (value *gray_vals_ptr, intnat *work,
*ephes_checked_if_pure = v; *ephes_checked_if_pure = v;
ephes_checked_if_pure = &Field(v,CAML_EPHE_LINK_OFFSET); ephes_checked_if_pure = &Field(v,CAML_EPHE_LINK_OFFSET);
} }
return gray_vals_ptr;
} }
static void mark_slice (intnat work) static void mark_slice (intnat work)
{ {
value *gray_vals_ptr; /* Local copy of [gray_vals_cur] */ mark_entry me = {0, 0};
value v; mlsize_t me_end = 0;
header_t hd;
mlsize_t size, i, start, end; /* [start] is a local copy of [current_index] */
#ifdef CAML_INSTR #ifdef CAML_INSTR
int slice_fields = 0; /** eventlog counters */ int slice_fields = 0; /** eventlog counters */
#endif /*CAML_INSTR*/ #endif /*CAML_INSTR*/
int slice_pointers = 0; int slice_pointers = 0;
struct mark_stack* stk = Caml_state->mark_stack;
caml_gc_message (0x40, "Marking %"ARCH_INTNAT_PRINTF_FORMAT"d words\n", work); caml_gc_message (0x40, "Marking %"ARCH_INTNAT_PRINTF_FORMAT"d words\n", work);
caml_gc_message (0x40, "Subphase = %d\n", caml_gc_subphase); caml_gc_message (0x40, "Subphase = %d\n", caml_gc_subphase);
gray_vals_ptr = gray_vals_cur;
v = current_value; while (1){
start = current_index; int can_mark = 0;
while (work > 0){
if (v == 0 && gray_vals_ptr > gray_vals){ if (me.offset == me_end) {
CAMLassert (start == 0); if (stk->count > 0)
v = *--gray_vals_ptr; {
CAMLassert (Is_gray_val (v)); me = stk->stack[--stk->count];
#ifdef NO_NAKED_POINTERS me_end = Wosize_val(me.block);
if (Tag_val(v) == Closure_tag) { can_mark = 1;
/* Skip the code pointers and integers at beginning of closure;
start scanning at the first word of the environment part. */
start = Start_env_closinfo(Closinfo_val(v));
CAMLassert(start <= Wosize_val(v));
} }
#endif } else {
can_mark = 1;
} }
if (v != 0){
hd = Hd_val(v); if (work <= 0) {
CAMLassert (Is_gray_hd (hd)); if( can_mark ) {
size = Wosize_hd (hd); mark_stack_push(stk, me.block, me.offset, NULL);
end = start + work;
if (Tag_hd (hd) < No_scan_tag){
start = size < start ? size : start;
end = size < end ? size : end;
CAMLassert (end >= start);
CAML_EVENTLOG_DO({ CAML_EVENTLOG_DO({
slice_fields += end - start; CAML_EV_COUNTER(EV_C_MAJOR_MARK_SLICE_REMAIN, me_end - me.offset);
if (size > end)
CAML_EV_COUNTER (EV_C_MAJOR_MARK_SLICE_REMAIN, size - end);
}); });
for (i = start; i < end; i++){
gray_vals_ptr = mark_slice_darken(gray_vals_ptr,v,i,
/*in_ephemeron=*/ 0,
&slice_pointers);
}
if (end < size){
work = 0;
start = end;
/* [v] doesn't change. */
CAMLassert (Is_gray_val (v));
}else{
CAMLassert (end == size);
Hd_val (v) = Blackhd_hd (hd);
work -= Whsize_wosize(end - start);
start = 0;
v = 0;
}
}else{
/* The block doesn't contain any pointers. */
CAMLassert (start == 0);
Hd_val (v) = Blackhd_hd (hd);
work -= Whsize_wosize(size);
v = 0;
} }
}else if (markhp != NULL){ break;
if (markhp == limit){ }
chunk = Chunk_next (chunk);
if (chunk == NULL){ if( can_mark ) {
markhp = NULL; CAMLassert(Is_block(me.block) &&
}else{ Is_black_val (me.block) &&
markhp = chunk; Tag_val(me.block) < No_scan_tag);
limit = chunk + Chunk_size (chunk);
} mark_slice_darken(stk, me.block, me.offset++, /*in_ephemeron=*/ 0,
}else{ &slice_pointers, &work);
if (Is_gray_val (Val_hp (markhp))){
CAMLassert (gray_vals_ptr == gray_vals); work--;
CAMLassert (v == 0 && start == 0);
v = Val_hp (markhp); CAML_EVENTLOG_DO({
#ifdef NO_NAKED_POINTERS slice_fields++;
if (Tag_val(v) == Closure_tag) { });
start = Start_env_closinfo(Closinfo_val(v));
CAMLassert(start <= Wosize_val(v)); if( me.offset == me_end ) {
} work--; /* Include header word */
#endif }
} } else if( redarken_first_chunk != NULL ) {
markhp += Bhsize_hp (markhp); /* There are chunks that need to be redarkened because we
overflowed our mark stack */
if( redarken_chunk(redarken_first_chunk, stk) ) {
redarken_first_chunk = Chunk_next(redarken_first_chunk);
} }
}else if (!heap_is_pure){
heap_is_pure = 1;
chunk = caml_heap_start;
markhp = chunk;
limit = chunk + Chunk_size (chunk);
} else if (caml_gc_subphase == Subphase_mark_roots) { } else if (caml_gc_subphase == Subphase_mark_roots) {
CAML_EV_BEGIN(EV_MAJOR_MARK_ROOTS); CAML_EV_BEGIN(EV_MAJOR_MARK_ROOTS);
gray_vals_cur = gray_vals_ptr;
work = caml_darken_all_roots_slice (work); work = caml_darken_all_roots_slice (work);
gray_vals_ptr = gray_vals_cur;
CAML_EV_END(EV_MAJOR_MARK_ROOTS); CAML_EV_END(EV_MAJOR_MARK_ROOTS);
if (work > 0){ if (work > 0){
caml_gc_subphase = Subphase_mark_main; caml_gc_subphase = Subphase_mark_main;
} }
} else if (*ephes_to_check != (value) NULL) { } else if (*ephes_to_check != (value) NULL) {
/* Continue to scan the list of ephe */ /* Continue to scan the list of ephe */
gray_vals_ptr = mark_ephe_aux(gray_vals_ptr,&work,&slice_pointers); mark_ephe_aux(stk,&work,&slice_pointers);
} else if (!ephe_list_pure){ } else if (!ephe_list_pure){
/* We must scan again the list because some value have been darken */ /* We must scan again the list because some value have been darken */
ephe_list_pure = 1; ephe_list_pure = 1;
@ -491,19 +624,7 @@ static void mark_slice (intnat work)
/* Subphase_mark_main is done. /* Subphase_mark_main is done.
Mark finalised values. */ Mark finalised values. */
CAML_EV_BEGIN(EV_MAJOR_MARK_MAIN); CAML_EV_BEGIN(EV_MAJOR_MARK_MAIN);
gray_vals_cur = gray_vals_ptr;
caml_final_update_mark_phase (); caml_final_update_mark_phase ();
gray_vals_ptr = gray_vals_cur;
if (gray_vals_ptr > gray_vals){
v = *--gray_vals_ptr;
CAMLassert (start == 0);
#ifdef NO_NAKED_POINTERS
if (Tag_val(v) == Closure_tag) {
start = Start_env_closinfo(Closinfo_val(v));
CAMLassert(start <= Wosize_val(v));
}
#endif
}
/* Complete the marking */ /* Complete the marking */
ephes_to_check = ephes_checked_if_pure; ephes_to_check = ephes_checked_if_pure;
CAML_EV_END(EV_MAJOR_MARK_MAIN); CAML_EV_END(EV_MAJOR_MARK_MAIN);
@ -532,9 +653,6 @@ static void mark_slice (intnat work)
} }
} }
} }
gray_vals_cur = gray_vals_ptr;
current_value = v;
current_index = start;
CAML_EV_COUNTER(EV_C_MAJOR_MARK_SLICE_FIELDS, slice_fields); CAML_EV_COUNTER(EV_C_MAJOR_MARK_SLICE_FIELDS, slice_fields);
CAML_EV_COUNTER(EV_C_MAJOR_MARK_SLICE_POINTERS, slice_pointers); CAML_EV_COUNTER(EV_C_MAJOR_MARK_SLICE_POINTERS, slice_pointers);
} }
@ -575,14 +693,15 @@ static void sweep_slice (intnat work)
caml_gc_message (0x40, "Sweeping %" caml_gc_message (0x40, "Sweeping %"
ARCH_INTNAT_PRINTF_FORMAT "d words\n", work); ARCH_INTNAT_PRINTF_FORMAT "d words\n", work);
while (work > 0){ while (work > 0){
if (caml_gc_sweep_hp < limit){ if (caml_gc_sweep_hp < sweep_limit){
hp = caml_gc_sweep_hp; hp = caml_gc_sweep_hp;
hd = Hd_hp (hp); hd = Hd_hp (hp);
work -= Whsize_hd (hd); work -= Whsize_hd (hd);
caml_gc_sweep_hp += Bhsize_hd (hd); caml_gc_sweep_hp += Bhsize_hd (hd);
switch (Color_hd (hd)){ switch (Color_hd (hd)){
case Caml_white: case Caml_white:
caml_gc_sweep_hp = (char *) caml_fl_merge_block (Val_hp (hp), limit); caml_gc_sweep_hp =
(char *)caml_fl_merge_block(Val_hp (hp), sweep_limit);
break; break;
case Caml_blue: case Caml_blue:
/* Only the blocks of the free-list are blue. See [freelist.c]. */ /* Only the blocks of the free-list are blue. See [freelist.c]. */
@ -593,18 +712,18 @@ static void sweep_slice (intnat work)
Hd_hp (hp) = Whitehd_hd (hd); Hd_hp (hp) = Whitehd_hd (hd);
break; break;
} }
CAMLassert (caml_gc_sweep_hp <= limit); CAMLassert (caml_gc_sweep_hp <= sweep_limit);
}else{ }else{
chunk = Chunk_next (chunk); sweep_chunk = Chunk_next (sweep_chunk);
if (chunk == NULL){ if (sweep_chunk == NULL){
/* Sweeping is done. */ /* Sweeping is done. */
++ Caml_state->stat_major_collections; ++ Caml_state->stat_major_collections;
work = 0; work = 0;
caml_gc_phase = Phase_idle; caml_gc_phase = Phase_idle;
caml_request_minor_gc (); caml_request_minor_gc ();
}else{ }else{
caml_gc_sweep_hp = chunk; caml_gc_sweep_hp = sweep_chunk;
limit = chunk + Chunk_size (chunk); sweep_limit = sweep_chunk + Chunk_size (sweep_chunk);
} }
} }
} }
@ -848,6 +967,7 @@ void caml_finish_major_cycle (void)
while (caml_gc_phase == Phase_mark) mark_slice (LONG_MAX); while (caml_gc_phase == Phase_mark) mark_slice (LONG_MAX);
while (caml_gc_phase == Phase_clean) clean_slice (LONG_MAX); while (caml_gc_phase == Phase_clean) clean_slice (LONG_MAX);
CAMLassert (caml_gc_phase == Phase_sweep); CAMLassert (caml_gc_phase == Phase_sweep);
CAMLassert (redarken_first_chunk == NULL);
while (caml_gc_phase == Phase_sweep) sweep_slice (LONG_MAX); while (caml_gc_phase == Phase_sweep) sweep_slice (LONG_MAX);
CAMLassert (caml_gc_phase == Phase_idle); CAMLassert (caml_gc_phase == Phase_idle);
Caml_state->stat_major_words += caml_allocated_words; Caml_state->stat_major_words += caml_allocated_words;
@ -906,13 +1026,20 @@ void caml_init_major_heap (asize_t heap_size)
caml_make_free_blocks ((value *) caml_heap_start, caml_make_free_blocks ((value *) caml_heap_start,
Caml_state->stat_heap_wsz, 1, Caml_white); Caml_state->stat_heap_wsz, 1, Caml_white);
caml_gc_phase = Phase_idle; caml_gc_phase = Phase_idle;
gray_vals_size = 2048;
gray_vals = (value *) caml_stat_alloc_noexc (gray_vals_size * sizeof (value)); Caml_state->mark_stack = caml_stat_alloc_noexc(sizeof(struct mark_stack));
if (gray_vals == NULL) if (Caml_state->mark_stack == NULL)
caml_fatal_error ("not enough memory for the gray cache"); caml_fatal_error ("not enough memory for the mark stack");
gray_vals_cur = gray_vals;
gray_vals_end = gray_vals + gray_vals_size; Caml_state->mark_stack->stack =
heap_is_pure = 1; caml_stat_alloc_noexc(MARK_STACK_INIT_SIZE * sizeof(mark_entry));
if(Caml_state->mark_stack->stack == NULL)
caml_fatal_error("not enough memory for the mark stack");
Caml_state->mark_stack->count = 0;
Caml_state->mark_stack->size = MARK_STACK_INIT_SIZE;
caml_allocated_words = 0; caml_allocated_words = 0;
caml_extra_heap_resources = 0.0; caml_extra_heap_resources = 0.0;
for (i = 0; i < Max_major_window; i++) caml_major_ring[i] = 0.0; for (i = 0; i < Max_major_window; i++) caml_major_ring[i] = 0.0;
@ -944,9 +1071,9 @@ void caml_finalise_heap (void)
/* Finalising all values (by means of forced sweeping) */ /* Finalising all values (by means of forced sweeping) */
caml_fl_init_merge (); caml_fl_init_merge ();
caml_gc_phase = Phase_sweep; caml_gc_phase = Phase_sweep;
chunk = caml_heap_start; sweep_chunk = caml_heap_start;
caml_gc_sweep_hp = chunk; caml_gc_sweep_hp = sweep_chunk;
limit = chunk + Chunk_size (chunk); sweep_limit = sweep_chunk + Chunk_size (sweep_chunk);
while (caml_gc_phase == Phase_sweep) while (caml_gc_phase == Phase_sweep)
sweep_slice (LONG_MAX); sweep_slice (LONG_MAX);
} }

View File

@ -262,6 +262,8 @@ char *caml_alloc_for_heap (asize_t request)
mem = (char *) block + sizeof (heap_chunk_head); mem = (char *) block + sizeof (heap_chunk_head);
Chunk_size (mem) = size - sizeof (heap_chunk_head); Chunk_size (mem) = size - sizeof (heap_chunk_head);
Chunk_block (mem) = block; Chunk_block (mem) = block;
Chunk_redarken_start(mem) = (value*)(mem + Chunk_size(mem));
Chunk_redarken_end(mem) = (value*)mem;
return mem; return mem;
#else #else
return NULL; return NULL;
@ -277,6 +279,8 @@ char *caml_alloc_for_heap (asize_t request)
mem += sizeof (heap_chunk_head); mem += sizeof (heap_chunk_head);
Chunk_size (mem) = request; Chunk_size (mem) = request;
Chunk_block (mem) = block; Chunk_block (mem) = block;
Chunk_redarken_start(mem) = (value*)(mem + Chunk_size(mem));
Chunk_redarken_end(mem) = (value*)mem;
return mem; return mem;
} }
} }