2016-02-18 07:11:59 -08:00
|
|
|
/**************************************************************************/
|
|
|
|
/* */
|
|
|
|
/* OCaml */
|
|
|
|
/* */
|
|
|
|
/* Damien Doligez, projet Para, INRIA Rocquencourt */
|
|
|
|
/* */
|
|
|
|
/* Copyright 1996 Institut National de Recherche en Informatique et */
|
|
|
|
/* en Automatique. */
|
|
|
|
/* */
|
|
|
|
/* All rights reserved. This file is distributed under the terms of */
|
|
|
|
/* the GNU Lesser General Public License version 2.1, with the */
|
|
|
|
/* special exception on linking described in the file LICENSE. */
|
|
|
|
/* */
|
|
|
|
/**************************************************************************/
|
1995-08-09 08:06:35 -07:00
|
|
|
|
2016-07-04 10:00:57 -07:00
|
|
|
#define CAML_INTERNALS
|
|
|
|
|
1995-05-04 03:15:53 -07:00
|
|
|
#include <string.h>
|
2015-08-16 14:23:47 -07:00
|
|
|
#include "caml/custom.h"
|
2014-12-27 06:41:49 -08:00
|
|
|
#include "caml/config.h"
|
|
|
|
#include "caml/fail.h"
|
|
|
|
#include "caml/finalise.h"
|
|
|
|
#include "caml/gc.h"
|
|
|
|
#include "caml/gc_ctrl.h"
|
|
|
|
#include "caml/major_gc.h"
|
|
|
|
#include "caml/memory.h"
|
|
|
|
#include "caml/minor_gc.h"
|
|
|
|
#include "caml/misc.h"
|
|
|
|
#include "caml/mlvalues.h"
|
|
|
|
#include "caml/roots.h"
|
|
|
|
#include "caml/signals.h"
|
|
|
|
#include "caml/weak.h"
|
2019-05-29 06:32:05 -07:00
|
|
|
#include "caml/memprof.h"
|
2019-11-15 04:52:35 -08:00
|
|
|
#include "caml/eventlog.h"
|
1995-05-04 03:15:53 -07:00
|
|
|
|
2015-11-20 08:54:26 -08:00
|
|
|
/* Pointers into the minor heap.
|
2019-06-05 23:39:26 -07:00
|
|
|
[Caml_state->young_base]
|
2015-11-20 08:54:26 -08:00
|
|
|
The [malloc] block that contains the heap.
|
2019-06-05 23:39:26 -07:00
|
|
|
[Caml_state->young_start] ... [Caml_state->young_end]
|
2015-11-20 08:54:26 -08:00
|
|
|
The whole range of the minor heap: all young blocks are inside
|
|
|
|
this interval.
|
2019-06-05 23:39:26 -07:00
|
|
|
[Caml_state->young_alloc_start]...[Caml_state->young_alloc_end]
|
2015-11-20 08:54:26 -08:00
|
|
|
The allocation arena: newly-allocated blocks are carved from
|
2019-06-05 23:39:26 -07:00
|
|
|
this interval, starting at [Caml_state->young_alloc_end].
|
|
|
|
[Caml_state->young_alloc_mid] is the mid-point of this interval.
|
2019-06-17 11:07:49 -07:00
|
|
|
[Caml_state->young_ptr], [Caml_state->young_trigger],
|
|
|
|
[Caml_state->young_limit]
|
2015-11-20 08:54:26 -08:00
|
|
|
These pointers are all inside the allocation arena.
|
2019-06-03 07:13:15 -07:00
|
|
|
- [Caml_state->young_ptr] is where the next allocation will take place.
|
2019-06-17 11:07:49 -07:00
|
|
|
- [Caml_state->young_trigger] is how far we can allocate before
|
|
|
|
triggering [caml_gc_dispatch]. Currently, it is either
|
|
|
|
[Caml_state->young_alloc_start] or the mid-point of the allocation
|
|
|
|
arena.
|
2019-06-03 07:13:15 -07:00
|
|
|
- [Caml_state->young_limit] is the pointer that is compared to
|
|
|
|
[Caml_state->young_ptr] for allocation. It is either:
|
2019-10-09 11:18:44 -07:00
|
|
|
+ [Caml_state->young_alloc_end] if a signal handler or
|
|
|
|
finaliser or memprof callback is pending, or if a major
|
|
|
|
or minor collection has been requested, or an
|
|
|
|
asynchronous callback has just raised an exception,
|
2019-05-09 08:39:35 -07:00
|
|
|
+ [caml_memprof_young_trigger] if a memprof sample is planned,
|
2019-06-05 23:39:26 -07:00
|
|
|
+ or [Caml_state->young_trigger].
|
2015-11-20 08:54:26 -08:00
|
|
|
*/
|
|
|
|
|
2016-01-27 13:47:31 -08:00
|
|
|
struct generic_table CAML_TABLE_STRUCT(char);
|
2014-04-02 04:46:19 -07:00
|
|
|
|
2019-08-01 23:28:09 -07:00
|
|
|
void caml_alloc_minor_tables ()
|
2019-06-05 23:39:26 -07:00
|
|
|
{
|
2019-08-01 23:28:09 -07:00
|
|
|
Caml_state->ref_table =
|
|
|
|
caml_stat_alloc_noexc(sizeof(struct caml_ref_table));
|
|
|
|
if (Caml_state->ref_table == NULL)
|
2019-06-05 23:39:26 -07:00
|
|
|
caml_fatal_error ("cannot initialize minor heap");
|
2019-08-01 23:28:09 -07:00
|
|
|
memset(Caml_state->ref_table, 0, sizeof(struct caml_ref_table));
|
|
|
|
|
|
|
|
Caml_state->ephe_ref_table =
|
|
|
|
caml_stat_alloc_noexc(sizeof(struct caml_ephe_ref_table));
|
|
|
|
if (Caml_state->ephe_ref_table == NULL)
|
|
|
|
caml_fatal_error ("cannot initialize minor heap");
|
|
|
|
memset(Caml_state->ephe_ref_table, 0, sizeof(struct caml_ephe_ref_table));
|
|
|
|
|
|
|
|
Caml_state->custom_table =
|
|
|
|
caml_stat_alloc_noexc(sizeof(struct caml_custom_table));
|
|
|
|
if (Caml_state->custom_table == NULL)
|
|
|
|
caml_fatal_error ("cannot initialize minor heap");
|
|
|
|
memset(Caml_state->custom_table, 0, sizeof(struct caml_custom_table));
|
2019-06-05 23:39:26 -07:00
|
|
|
}
|
2018-03-28 07:45:54 -07:00
|
|
|
|
2014-12-12 07:18:04 -08:00
|
|
|
/* [sz] and [rsv] are numbers of entries */
|
2014-04-02 04:46:19 -07:00
|
|
|
static void alloc_generic_table (struct generic_table *tbl, asize_t sz,
|
|
|
|
asize_t rsv, asize_t element_size)
|
2007-05-04 07:05:13 -07:00
|
|
|
{
|
2014-04-02 04:46:19 -07:00
|
|
|
void *new_table;
|
2007-05-04 07:05:13 -07:00
|
|
|
|
|
|
|
tbl->size = sz;
|
|
|
|
tbl->reserve = rsv;
|
2014-05-28 16:11:47 -07:00
|
|
|
new_table = (void *) caml_stat_alloc_noexc((tbl->size + tbl->reserve) *
|
|
|
|
element_size);
|
2018-05-17 06:17:04 -07:00
|
|
|
if (new_table == NULL) caml_fatal_error ("not enough memory");
|
2007-05-04 07:05:13 -07:00
|
|
|
if (tbl->base != NULL) caml_stat_free (tbl->base);
|
|
|
|
tbl->base = new_table;
|
|
|
|
tbl->ptr = tbl->base;
|
2016-02-29 04:17:45 -08:00
|
|
|
tbl->threshold = tbl->base + tbl->size * element_size;
|
2007-05-04 07:05:13 -07:00
|
|
|
tbl->limit = tbl->threshold;
|
2016-02-29 04:17:45 -08:00
|
|
|
tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size;
|
2007-05-04 07:05:13 -07:00
|
|
|
}
|
|
|
|
|
2014-04-02 04:46:19 -07:00
|
|
|
void caml_alloc_table (struct caml_ref_table *tbl, asize_t sz, asize_t rsv)
|
|
|
|
{
|
|
|
|
alloc_generic_table ((struct generic_table *) tbl, sz, rsv, sizeof (value *));
|
|
|
|
}
|
|
|
|
|
2013-12-25 11:15:39 -08:00
|
|
|
void caml_alloc_ephe_table (struct caml_ephe_ref_table *tbl, asize_t sz,
|
|
|
|
asize_t rsv)
|
|
|
|
{
|
|
|
|
alloc_generic_table ((struct generic_table *) tbl, sz, rsv,
|
|
|
|
sizeof (struct caml_ephe_ref_elt));
|
|
|
|
}
|
|
|
|
|
2016-04-28 07:00:18 -07:00
|
|
|
void caml_alloc_custom_table (struct caml_custom_table *tbl, asize_t sz,
|
|
|
|
asize_t rsv)
|
|
|
|
{
|
|
|
|
alloc_generic_table ((struct generic_table *) tbl, sz, rsv,
|
|
|
|
sizeof (struct caml_custom_elt));
|
|
|
|
}
|
|
|
|
|
2014-04-02 04:46:19 -07:00
|
|
|
static void reset_table (struct generic_table *tbl)
|
2007-05-04 07:05:13 -07:00
|
|
|
{
|
|
|
|
tbl->size = 0;
|
|
|
|
tbl->reserve = 0;
|
|
|
|
if (tbl->base != NULL) caml_stat_free (tbl->base);
|
|
|
|
tbl->base = tbl->ptr = tbl->threshold = tbl->limit = tbl->end = NULL;
|
|
|
|
}
|
|
|
|
|
2014-04-02 04:46:19 -07:00
|
|
|
static void clear_table (struct generic_table *tbl)
|
2007-05-04 07:05:13 -07:00
|
|
|
{
|
|
|
|
tbl->ptr = tbl->base;
|
|
|
|
tbl->limit = tbl->threshold;
|
|
|
|
}
|
|
|
|
|
2015-11-20 08:54:26 -08:00
|
|
|
void caml_set_minor_heap_size (asize_t bsz)
|
1995-05-04 03:15:53 -07:00
|
|
|
{
|
|
|
|
char *new_heap;
|
2008-01-03 01:37:10 -08:00
|
|
|
void *new_heap_base;
|
1995-05-04 03:15:53 -07:00
|
|
|
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (bsz >= Bsize_wsize(Minor_heap_min));
|
|
|
|
CAMLassert (bsz <= Bsize_wsize(Minor_heap_max));
|
2019-11-19 14:41:50 -08:00
|
|
|
CAMLassert (bsz % Page_size == 0);
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (bsz % sizeof (value) == 0);
|
2019-06-05 23:39:26 -07:00
|
|
|
if (Caml_state->young_ptr != Caml_state->young_alloc_end){
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_COUNTER (EV_C_FORCE_MINOR_SET_MINOR_HEAP_SIZE, 1);
|
2019-06-09 10:37:42 -07:00
|
|
|
Caml_state->requested_minor_gc = 0;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_trigger = Caml_state->young_alloc_mid;
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_update_young_limit();
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_empty_minor_heap ();
|
|
|
|
}
|
2019-06-05 23:39:26 -07:00
|
|
|
CAMLassert (Caml_state->young_ptr == Caml_state->young_alloc_end);
|
2016-01-05 09:03:36 -08:00
|
|
|
new_heap = caml_stat_alloc_aligned_noexc(bsz, 0, &new_heap_base);
|
2008-01-03 01:37:10 -08:00
|
|
|
if (new_heap == NULL) caml_raise_out_of_memory();
|
2015-11-20 08:54:26 -08:00
|
|
|
if (caml_page_table_add(In_young, new_heap, new_heap + bsz) != 0)
|
2008-01-03 01:37:10 -08:00
|
|
|
caml_raise_out_of_memory();
|
|
|
|
|
2019-06-05 23:39:26 -07:00
|
|
|
if (Caml_state->young_start != NULL){
|
2019-06-17 11:07:49 -07:00
|
|
|
caml_page_table_remove(In_young, Caml_state->young_start,
|
|
|
|
Caml_state->young_end);
|
2019-06-05 23:39:26 -07:00
|
|
|
caml_stat_free (Caml_state->young_base);
|
1995-05-04 03:15:53 -07:00
|
|
|
}
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_base = new_heap_base;
|
|
|
|
Caml_state->young_start = (value *) new_heap;
|
|
|
|
Caml_state->young_end = (value *) (new_heap + bsz);
|
|
|
|
Caml_state->young_alloc_start = Caml_state->young_start;
|
2019-06-17 11:07:49 -07:00
|
|
|
Caml_state->young_alloc_mid =
|
|
|
|
Caml_state->young_alloc_start + Wsize_bsize (bsz) / 2;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_alloc_end = Caml_state->young_end;
|
|
|
|
Caml_state->young_trigger = Caml_state->young_alloc_start;
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_update_young_limit();
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_ptr = Caml_state->young_alloc_end;
|
|
|
|
Caml_state->minor_heap_wsz = Wsize_bsize (bsz);
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_memprof_renew_minor_sample();
|
1995-05-04 03:15:53 -07:00
|
|
|
|
2019-08-01 23:28:09 -07:00
|
|
|
reset_table ((struct generic_table *) Caml_state->ref_table);
|
|
|
|
reset_table ((struct generic_table *) Caml_state->ephe_ref_table);
|
|
|
|
reset_table ((struct generic_table *) Caml_state->custom_table);
|
1995-05-04 03:15:53 -07:00
|
|
|
}
|
|
|
|
|
2002-02-05 09:11:33 -08:00
|
|
|
static value oldify_todo_list = 0;
|
2002-01-18 07:13:26 -08:00
|
|
|
|
2002-01-20 09:39:10 -08:00
|
|
|
/* Note that the tests on the tag depend on the fact that Infix_tag,
|
|
|
|
Forward_tag, and No_scan_tag are contiguous. */
|
|
|
|
|
2003-12-31 06:20:40 -08:00
|
|
|
void caml_oldify_one (value v, value *p)
|
2000-01-02 08:10:21 -08:00
|
|
|
{
|
2002-09-18 06:59:27 -07:00
|
|
|
value result;
|
2000-01-02 08:10:21 -08:00
|
|
|
header_t hd;
|
|
|
|
mlsize_t sz, i;
|
|
|
|
tag_t tag;
|
|
|
|
|
|
|
|
tail_call:
|
|
|
|
if (Is_block (v) && Is_young (v)){
|
2019-06-03 07:13:15 -07:00
|
|
|
CAMLassert ((value *) Hp_val (v) >= Caml_state->young_ptr);
|
2000-01-02 08:10:21 -08:00
|
|
|
hd = Hd_val (v);
|
2002-01-18 07:13:26 -08:00
|
|
|
if (hd == 0){ /* If already forwarded */
|
|
|
|
*p = Field (v, 0); /* then forward pointer is first field. */
|
2000-01-02 08:10:21 -08:00
|
|
|
}else{
|
2019-09-18 04:21:24 -07:00
|
|
|
CAMLassert_young_header(hd);
|
2002-01-18 07:13:26 -08:00
|
|
|
tag = Tag_hd (hd);
|
2002-01-20 09:39:10 -08:00
|
|
|
if (tag < Infix_tag){
|
2002-09-18 06:59:27 -07:00
|
|
|
value field0;
|
|
|
|
|
2002-01-18 07:13:26 -08:00
|
|
|
sz = Wosize_hd (hd);
|
2019-04-23 00:27:31 -07:00
|
|
|
result = caml_alloc_shr_for_minor_gc (sz, tag, hd);
|
2002-01-18 07:13:26 -08:00
|
|
|
*p = result;
|
|
|
|
field0 = Field (v, 0);
|
|
|
|
Hd_val (v) = 0; /* Set forward flag */
|
|
|
|
Field (v, 0) = result; /* and forward pointer. */
|
|
|
|
if (sz > 1){
|
|
|
|
Field (result, 0) = field0;
|
|
|
|
Field (result, 1) = oldify_todo_list; /* Add this block */
|
|
|
|
oldify_todo_list = v; /* to the "to do" list. */
|
2002-07-23 07:12:03 -07:00
|
|
|
}else{
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (sz == 1);
|
2002-01-18 07:13:26 -08:00
|
|
|
p = &Field (result, 0);
|
2002-07-23 07:12:03 -07:00
|
|
|
v = field0;
|
2002-01-18 07:13:26 -08:00
|
|
|
goto tail_call;
|
2002-07-23 07:12:03 -07:00
|
|
|
}
|
2002-01-20 09:39:10 -08:00
|
|
|
}else if (tag >= No_scan_tag){
|
|
|
|
sz = Wosize_hd (hd);
|
2019-04-23 00:27:31 -07:00
|
|
|
result = caml_alloc_shr_for_minor_gc (sz, tag, hd);
|
2002-01-20 09:39:10 -08:00
|
|
|
for (i = 0; i < sz; i++) Field (result, i) = Field (v, i);
|
|
|
|
Hd_val (v) = 0; /* Set forward flag */
|
|
|
|
Field (v, 0) = result; /* and forward pointer. */
|
|
|
|
*p = result;
|
|
|
|
}else if (tag == Infix_tag){
|
|
|
|
mlsize_t offset = Infix_offset_hd (hd);
|
2003-12-31 06:20:40 -08:00
|
|
|
caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */
|
2002-01-20 09:39:10 -08:00
|
|
|
*p += offset;
|
|
|
|
}else{
|
2002-09-18 06:59:27 -07:00
|
|
|
value f = Forward_val (v);
|
|
|
|
tag_t ft = 0;
|
2008-07-28 05:03:55 -07:00
|
|
|
int vv = 1;
|
2002-09-18 06:59:27 -07:00
|
|
|
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (tag == Forward_tag);
|
2008-07-28 05:03:55 -07:00
|
|
|
if (Is_block (f)){
|
2012-03-06 11:17:29 -08:00
|
|
|
if (Is_young (f)){
|
|
|
|
vv = 1;
|
2008-07-28 05:03:55 -07:00
|
|
|
ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f);
|
2012-03-06 11:17:29 -08:00
|
|
|
}else{
|
|
|
|
vv = Is_in_value_area(f);
|
|
|
|
if (vv){
|
|
|
|
ft = Tag_val (f);
|
|
|
|
}
|
2008-07-28 05:03:55 -07:00
|
|
|
}
|
2002-09-18 06:59:27 -07:00
|
|
|
}
|
2017-08-31 06:25:15 -07:00
|
|
|
if (!vv || ft == Forward_tag || ft == Lazy_tag
|
|
|
|
#ifdef FLAT_FLOAT_ARRAY
|
|
|
|
|| ft == Double_tag
|
|
|
|
#endif
|
|
|
|
){
|
2003-10-16 16:22:23 -07:00
|
|
|
/* Do not short-circuit the pointer. Copy as a normal block. */
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (Wosize_hd (hd) == 1);
|
2019-04-23 00:27:31 -07:00
|
|
|
result = caml_alloc_shr_for_minor_gc (1, Forward_tag, hd);
|
2002-09-18 06:59:27 -07:00
|
|
|
*p = result;
|
|
|
|
Hd_val (v) = 0; /* Set (GC) forward flag */
|
|
|
|
Field (v, 0) = result; /* and forward pointer. */
|
|
|
|
p = &Field (result, 0);
|
|
|
|
v = f;
|
|
|
|
goto tail_call;
|
|
|
|
}else{
|
|
|
|
v = f; /* Follow the forwarding */
|
|
|
|
goto tail_call; /* then oldify. */
|
|
|
|
}
|
2000-01-02 08:10:21 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
*p = v;
|
|
|
|
}
|
|
|
|
}
|
1995-05-04 03:15:53 -07:00
|
|
|
|
2015-12-29 15:13:08 -08:00
|
|
|
/* Test if the ephemeron is alive, everything outside minor heap is alive */
|
2020-02-11 01:33:55 -08:00
|
|
|
Caml_inline int ephe_check_alive_data(struct caml_ephe_ref_elt *re){
|
2015-12-29 15:13:08 -08:00
|
|
|
mlsize_t i;
|
|
|
|
value child;
|
2016-02-29 04:17:45 -08:00
|
|
|
for (i = CAML_EPHE_FIRST_KEY; i < Wosize_val(re->ephe); i++){
|
2015-12-29 15:13:08 -08:00
|
|
|
child = Field (re->ephe, i);
|
|
|
|
if(child != caml_ephe_none
|
2020-07-06 09:23:07 -07:00
|
|
|
&& Is_block (child) && Is_young (child)) {
|
|
|
|
if(Tag_val(child) == Infix_tag) child -= Infix_offset_val(child);
|
|
|
|
if(Hd_val (child) != 0) return 0; /* Value not copied to major heap */
|
2015-12-29 15:13:08 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-12-31 06:20:40 -08:00
|
|
|
/* Finish the work that was put off by [caml_oldify_one].
|
|
|
|
Note that [caml_oldify_one] itself is called by oldify_mopup, so we
|
2002-01-18 07:13:26 -08:00
|
|
|
have to be careful to remove the first entry from the list before
|
|
|
|
oldifying its fields. */
|
2003-12-31 06:20:40 -08:00
|
|
|
void caml_oldify_mopup (void)
|
2002-01-18 07:13:26 -08:00
|
|
|
{
|
|
|
|
value v, new_v, f;
|
|
|
|
mlsize_t i;
|
2013-12-25 11:15:39 -08:00
|
|
|
struct caml_ephe_ref_elt *re;
|
2020-07-06 09:23:07 -07:00
|
|
|
int redo;
|
|
|
|
|
|
|
|
again:
|
|
|
|
redo = 0;
|
2002-01-18 07:13:26 -08:00
|
|
|
|
2002-02-05 09:11:33 -08:00
|
|
|
while (oldify_todo_list != 0){
|
2002-01-18 07:13:26 -08:00
|
|
|
v = oldify_todo_list; /* Get the head. */
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert (Hd_val (v) == 0); /* It must be forwarded. */
|
2002-01-18 07:13:26 -08:00
|
|
|
new_v = Field (v, 0); /* Follow forward pointer. */
|
|
|
|
oldify_todo_list = Field (new_v, 1); /* Remove from list. */
|
|
|
|
|
|
|
|
f = Field (new_v, 0);
|
|
|
|
if (Is_block (f) && Is_young (f)){
|
2003-12-31 06:20:40 -08:00
|
|
|
caml_oldify_one (f, &Field (new_v, 0));
|
2002-01-18 07:13:26 -08:00
|
|
|
}
|
|
|
|
for (i = 1; i < Wosize_val (new_v); i++){
|
|
|
|
f = Field (v, i);
|
|
|
|
if (Is_block (f) && Is_young (f)){
|
2003-12-31 06:20:40 -08:00
|
|
|
caml_oldify_one (f, &Field (new_v, i));
|
2002-01-18 07:13:26 -08:00
|
|
|
}else{
|
|
|
|
Field (new_v, i) = f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-25 11:15:39 -08:00
|
|
|
|
|
|
|
/* Oldify the data in the minor heap of alive ephemeron
|
|
|
|
During minor collection keys outside the minor heap are considered alive */
|
2019-08-01 23:28:09 -07:00
|
|
|
for (re = Caml_state->ephe_ref_table->base;
|
|
|
|
re < Caml_state->ephe_ref_table->ptr; re++){
|
2013-12-25 11:15:39 -08:00
|
|
|
/* look only at ephemeron with data in the minor heap */
|
|
|
|
if (re->offset == 1){
|
2020-07-06 09:23:07 -07:00
|
|
|
value *data = &Field(re->ephe,1), v = *data;
|
|
|
|
if (v != caml_ephe_none && Is_block (v) && Is_young (v)){
|
|
|
|
mlsize_t offs = Tag_val(v) == Infix_tag ? Infix_offset_val(v) : 0;
|
|
|
|
v -= offs;
|
|
|
|
if (Hd_val (v) == 0){ /* Value copied to major heap */
|
|
|
|
*data = Field (v, 0) + offs;
|
2013-12-25 11:15:39 -08:00
|
|
|
} else {
|
2015-12-29 15:13:08 -08:00
|
|
|
if (ephe_check_alive_data(re)){
|
|
|
|
caml_oldify_one(*data,data);
|
|
|
|
redo = 1; /* oldify_todo_list can still be 0 */
|
2013-12-25 11:15:39 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-06 09:23:07 -07:00
|
|
|
if (redo) goto again;
|
2002-01-18 07:13:26 -08:00
|
|
|
}
|
|
|
|
|
2000-01-07 08:51:58 -08:00
|
|
|
/* Make sure the minor heap is empty by performing a minor collection
|
|
|
|
if needed.
|
|
|
|
*/
|
2003-12-31 06:20:40 -08:00
|
|
|
void caml_empty_minor_heap (void)
|
1995-05-04 03:15:53 -07:00
|
|
|
{
|
|
|
|
value **r;
|
2016-04-28 07:00:18 -07:00
|
|
|
struct caml_custom_elt *elt;
|
2015-07-17 07:31:05 -07:00
|
|
|
uintnat prev_alloc_words;
|
2013-12-25 11:15:39 -08:00
|
|
|
struct caml_ephe_ref_elt *re;
|
2000-01-07 08:51:58 -08:00
|
|
|
|
2019-06-05 23:39:26 -07:00
|
|
|
if (Caml_state->young_ptr != Caml_state->young_alloc_end){
|
2019-09-18 04:21:24 -07:00
|
|
|
CAMLassert_young_header(*(header_t*)Caml_state->young_ptr);
|
2015-07-17 07:31:05 -07:00
|
|
|
if (caml_minor_gc_begin_hook != NULL) (*caml_minor_gc_begin_hook) ();
|
|
|
|
prev_alloc_words = caml_allocated_words;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->in_minor_collection = 1;
|
2017-02-27 08:32:44 -08:00
|
|
|
caml_gc_message (0x02, "<");
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_BEGIN(EV_MINOR_LOCAL_ROOTS);
|
2004-01-01 08:42:43 -08:00
|
|
|
caml_oldify_local_roots();
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR_LOCAL_ROOTS);
|
|
|
|
CAML_EV_BEGIN(EV_MINOR_REF_TABLES);
|
2019-08-01 23:28:09 -07:00
|
|
|
for (r = Caml_state->ref_table->base;
|
|
|
|
r < Caml_state->ref_table->ptr; r++) {
|
2003-12-31 06:20:40 -08:00
|
|
|
caml_oldify_one (**r, *r);
|
2002-06-05 05:11:23 -07:00
|
|
|
}
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR_REF_TABLES);
|
|
|
|
CAML_EV_BEGIN(EV_MINOR_COPY);
|
2003-12-31 06:20:40 -08:00
|
|
|
caml_oldify_mopup ();
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR_COPY);
|
2013-12-25 11:15:39 -08:00
|
|
|
/* Update the ephemerons */
|
2019-08-01 23:28:09 -07:00
|
|
|
for (re = Caml_state->ephe_ref_table->base;
|
|
|
|
re < Caml_state->ephe_ref_table->ptr; re++){
|
2016-02-29 04:17:45 -08:00
|
|
|
if(re->offset < Wosize_val(re->ephe)){
|
|
|
|
/* If it is not the case, the ephemeron has been truncated */
|
2020-07-06 09:23:07 -07:00
|
|
|
value *key = &Field(re->ephe,re->offset), v = *key;
|
|
|
|
if (v != caml_ephe_none && Is_block (v) && Is_young (v)){
|
|
|
|
mlsize_t offs = Tag_val (v) == Infix_tag ? Infix_offset_val (v) : 0;
|
|
|
|
v -= offs;
|
|
|
|
if (Hd_val (v) == 0){ /* Value copied to major heap */
|
|
|
|
*key = Field (v, 0) + offs;
|
2016-02-29 04:17:45 -08:00
|
|
|
}else{ /* Value not copied so it's dead */
|
2017-03-10 08:29:21 -08:00
|
|
|
CAMLassert(!ephe_check_alive_data(re));
|
2016-02-29 04:17:45 -08:00
|
|
|
*key = caml_ephe_none;
|
|
|
|
Field(re->ephe,1) = caml_ephe_none;
|
|
|
|
}
|
2007-05-04 07:05:13 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-07 00:14:06 -07:00
|
|
|
/* Update the OCaml finalise_last values */
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_BEGIN(EV_MINOR_UPDATE_WEAK);
|
2016-07-07 00:14:06 -07:00
|
|
|
caml_final_update_minor_roots();
|
2019-09-04 05:36:23 -07:00
|
|
|
/* Trigger memprofs callbacks for blocks in the minor heap. */
|
|
|
|
caml_memprof_minor_update();
|
2016-02-29 04:17:45 -08:00
|
|
|
/* Run custom block finalisation of dead minor values */
|
2019-08-01 23:28:09 -07:00
|
|
|
for (elt = Caml_state->custom_table->base;
|
|
|
|
elt < Caml_state->custom_table->ptr; elt++){
|
2016-04-28 07:00:18 -07:00
|
|
|
value v = elt->block;
|
|
|
|
if (Hd_val (v) == 0){
|
|
|
|
/* Block was copied to the major heap: adjust GC speed numbers. */
|
|
|
|
caml_adjust_gc_speed(elt->mem, elt->max);
|
|
|
|
}else{
|
|
|
|
/* Block will be freed: call finalization function, if any. */
|
|
|
|
void (*final_fun)(value) = Custom_ops_val(v)->finalize;
|
|
|
|
if (final_fun != NULL) final_fun(v);
|
2015-08-16 14:23:47 -07:00
|
|
|
}
|
|
|
|
}
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR_UPDATE_WEAK);
|
|
|
|
CAML_EV_BEGIN(EV_MINOR_FINALIZED);
|
2019-06-17 11:07:49 -07:00
|
|
|
Caml_state->stat_minor_words +=
|
|
|
|
Caml_state->young_alloc_end - Caml_state->young_ptr;
|
|
|
|
caml_gc_clock +=
|
|
|
|
(double) (Caml_state->young_alloc_end - Caml_state->young_ptr)
|
|
|
|
/ Caml_state->minor_heap_wsz;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_ptr = Caml_state->young_alloc_end;
|
2019-08-01 23:28:09 -07:00
|
|
|
clear_table ((struct generic_table *) Caml_state->ref_table);
|
|
|
|
clear_table ((struct generic_table *) Caml_state->ephe_ref_table);
|
|
|
|
clear_table ((struct generic_table *) Caml_state->custom_table);
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->extra_heap_resources_minor = 0;
|
2017-02-27 08:32:44 -08:00
|
|
|
caml_gc_message (0x02, ">");
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->in_minor_collection = 0;
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_final_empty_young ();
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR_FINALIZED);
|
2019-06-06 22:00:47 -07:00
|
|
|
Caml_state->stat_promoted_words += caml_allocated_words - prev_alloc_words;
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_COUNTER (EV_C_MINOR_PROMOTED,
|
|
|
|
caml_allocated_words - prev_alloc_words);
|
2019-06-06 22:00:47 -07:00
|
|
|
++ Caml_state->stat_minor_collections;
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_memprof_renew_minor_sample();
|
2015-07-17 07:31:05 -07:00
|
|
|
if (caml_minor_gc_end_hook != NULL) (*caml_minor_gc_end_hook) ();
|
|
|
|
}else{
|
2016-07-06 09:34:12 -07:00
|
|
|
/* The minor heap is empty nothing to do. */
|
2015-07-17 07:31:05 -07:00
|
|
|
caml_final_empty_young ();
|
2000-01-07 08:51:58 -08:00
|
|
|
}
|
2000-03-17 05:22:36 -08:00
|
|
|
#ifdef DEBUG
|
2000-04-03 01:34:22 -07:00
|
|
|
{
|
|
|
|
value *p;
|
2019-06-17 11:07:49 -07:00
|
|
|
for (p = Caml_state->young_alloc_start; p < Caml_state->young_alloc_end;
|
|
|
|
++p) {
|
2000-04-03 01:34:22 -07:00
|
|
|
*p = Debug_free_minor;
|
|
|
|
}
|
|
|
|
}
|
2000-03-17 05:22:36 -08:00
|
|
|
#endif
|
2000-01-07 08:51:58 -08:00
|
|
|
}
|
|
|
|
|
2015-11-20 08:54:26 -08:00
|
|
|
#ifdef CAML_INSTR
|
|
|
|
extern uintnat caml_instr_alloc_jump;
|
2019-11-15 04:52:35 -08:00
|
|
|
#endif /*CAML_INSTR*/
|
2015-11-20 08:54:26 -08:00
|
|
|
|
|
|
|
/* Do a minor collection or a slice of major collection, call finalisation
|
2000-01-07 08:51:58 -08:00
|
|
|
functions, etc.
|
2015-11-20 08:54:26 -08:00
|
|
|
Leave enough room in the minor heap to allocate at least one object.
|
2019-05-23 04:32:22 -07:00
|
|
|
Guaranteed not to call any OCaml callback.
|
2000-01-07 08:51:58 -08:00
|
|
|
*/
|
2020-09-04 07:53:29 -07:00
|
|
|
void caml_gc_dispatch (void)
|
2000-01-07 08:51:58 -08:00
|
|
|
{
|
2019-06-05 23:39:26 -07:00
|
|
|
value *trigger = Caml_state->young_trigger; /* save old value of trigger */
|
2019-11-15 04:52:35 -08:00
|
|
|
|
|
|
|
CAML_EVENTLOG_DO({
|
|
|
|
CAML_EV_COUNTER(EV_C_ALLOC_JUMP, caml_instr_alloc_jump);
|
|
|
|
caml_instr_alloc_jump = 0;
|
|
|
|
});
|
2002-02-05 09:11:33 -08:00
|
|
|
|
2019-06-17 11:07:49 -07:00
|
|
|
if (trigger == Caml_state->young_alloc_start
|
|
|
|
|| Caml_state->requested_minor_gc) {
|
2015-11-20 08:54:26 -08:00
|
|
|
/* The minor heap is full, we must do a minor collection. */
|
|
|
|
/* reset the pointers first because the end hooks might allocate */
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_BEGIN(EV_MINOR);
|
2019-06-09 10:37:42 -07:00
|
|
|
Caml_state->requested_minor_gc = 0;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_trigger = Caml_state->young_alloc_mid;
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_update_young_limit();
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_empty_minor_heap ();
|
2015-12-04 05:40:37 -08:00
|
|
|
/* The minor heap is empty, we can start a major collection. */
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MINOR);
|
|
|
|
if (caml_gc_phase == Phase_idle)
|
|
|
|
{
|
|
|
|
CAML_EV_BEGIN(EV_MAJOR);
|
|
|
|
caml_major_collection_slice (-1);
|
|
|
|
CAML_EV_END(EV_MAJOR);
|
|
|
|
}
|
2015-11-20 08:54:26 -08:00
|
|
|
}
|
2019-06-17 11:07:49 -07:00
|
|
|
if (trigger != Caml_state->young_alloc_start
|
|
|
|
|| Caml_state->requested_major_slice) {
|
2015-11-20 08:54:26 -08:00
|
|
|
/* The minor heap is half-full, do a major GC slice. */
|
2019-06-09 10:37:42 -07:00
|
|
|
Caml_state->requested_major_slice = 0;
|
2019-06-05 23:39:26 -07:00
|
|
|
Caml_state->young_trigger = Caml_state->young_alloc_start;
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_update_young_limit();
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_BEGIN(EV_MAJOR);
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_major_collection_slice (-1);
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_END(EV_MAJOR);
|
2015-11-20 08:54:26 -08:00
|
|
|
}
|
|
|
|
}
|
2000-01-07 08:51:58 -08:00
|
|
|
|
2020-01-07 04:30:26 -08:00
|
|
|
/* Called by young allocations when [Caml_state->young_ptr] reaches
|
2019-10-03 07:27:32 -07:00
|
|
|
[Caml_state->young_limit]. We may have to either call memprof or
|
|
|
|
the gc. */
|
2020-01-07 04:30:26 -08:00
|
|
|
void caml_alloc_small_dispatch (intnat wosize, int flags,
|
|
|
|
int nallocs, unsigned char* encoded_alloc_lens)
|
2019-05-09 08:39:35 -07:00
|
|
|
{
|
2019-10-09 04:04:50 -07:00
|
|
|
intnat whsize = Whsize_wosize (wosize);
|
|
|
|
|
|
|
|
/* First, we un-do the allocation performed in [Alloc_small] */
|
|
|
|
Caml_state->young_ptr += whsize;
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
/* We might be here because of an async callback / urgent GC
|
|
|
|
request. Take the opportunity to do what has been requested. */
|
2019-10-14 05:19:11 -07:00
|
|
|
if (flags & CAML_FROM_CAML)
|
In long-running C code, force examining all callbacks at the next safe
point following every minor collection or major slice.
Also run signal handlers first.
Indeed, in some cases, caml_something_to_do is not reliable (spotted
by @jhjourdan):
* We could get into caml_process_pending_actions when
caml_something_to_do is seen as set but not caml_pending_signals,
making us miss the signal.
* If there are two different callbacks (say, a signal and a finaliser)
arriving at the same time, then we set caml_something_to_do to 0
when starting the first one while the second one is still waiting.
We may want to run the second one if the first one is taking long.
In the latter case, the additional fix is to favour signals, which
have a lower latency requirement, whereas the latency of finalisers
keeps the same order of magnitude, and memprof callbacks are served on
a best-effort basis.
2019-10-10 15:12:06 -07:00
|
|
|
/* In the case of allocations performed from OCaml, execute
|
|
|
|
asynchronous callbacks. */
|
2019-10-14 05:19:11 -07:00
|
|
|
caml_raise_if_exception(caml_do_pending_actions_exn ());
|
In long-running C code, force examining all callbacks at the next safe
point following every minor collection or major slice.
Also run signal handlers first.
Indeed, in some cases, caml_something_to_do is not reliable (spotted
by @jhjourdan):
* We could get into caml_process_pending_actions when
caml_something_to_do is seen as set but not caml_pending_signals,
making us miss the signal.
* If there are two different callbacks (say, a signal and a finaliser)
arriving at the same time, then we set caml_something_to_do to 0
when starting the first one while the second one is still waiting.
We may want to run the second one if the first one is taking long.
In the latter case, the additional fix is to favour signals, which
have a lower latency requirement, whereas the latency of finalisers
keeps the same order of magnitude, and memprof callbacks are served on
a best-effort basis.
2019-10-10 15:12:06 -07:00
|
|
|
else {
|
2019-10-14 05:19:11 -07:00
|
|
|
caml_check_urgent_gc (Val_unit);
|
In long-running C code, force examining all callbacks at the next safe
point following every minor collection or major slice.
Also run signal handlers first.
Indeed, in some cases, caml_something_to_do is not reliable (spotted
by @jhjourdan):
* We could get into caml_process_pending_actions when
caml_something_to_do is seen as set but not caml_pending_signals,
making us miss the signal.
* If there are two different callbacks (say, a signal and a finaliser)
arriving at the same time, then we set caml_something_to_do to 0
when starting the first one while the second one is still waiting.
We may want to run the second one if the first one is taking long.
In the latter case, the additional fix is to favour signals, which
have a lower latency requirement, whereas the latency of finalisers
keeps the same order of magnitude, and memprof callbacks are served on
a best-effort basis.
2019-10-10 15:12:06 -07:00
|
|
|
/* In the case of long-running C code that regularly polls with
|
|
|
|
caml_process_pending_actions, force a query of all callbacks
|
|
|
|
at every minor collection or major slice. */
|
|
|
|
caml_something_to_do = 1;
|
|
|
|
}
|
2019-10-09 04:04:50 -07:00
|
|
|
|
|
|
|
/* Now, there might be enough room in the minor heap to do our
|
|
|
|
allocation. */
|
|
|
|
if (Caml_state->young_ptr - whsize >= Caml_state->young_trigger)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* If not, then empty the minor heap, and check again for async
|
|
|
|
callbacks. */
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_COUNTER (EV_C_FORCE_MINOR_ALLOC_SMALL, 1);
|
2019-05-09 08:39:35 -07:00
|
|
|
caml_gc_dispatch ();
|
|
|
|
}
|
2019-10-09 04:04:50 -07:00
|
|
|
|
|
|
|
/* Re-do the allocation: we now have enough space in the minor heap. */
|
|
|
|
Caml_state->young_ptr -= whsize;
|
|
|
|
|
|
|
|
/* Check if the allocated block has been sampled by memprof. */
|
2019-06-03 07:13:15 -07:00
|
|
|
if(Caml_state->young_ptr < caml_memprof_young_trigger){
|
2019-05-23 04:32:22 -07:00
|
|
|
if(flags & CAML_DO_TRACK) {
|
2020-01-07 04:30:26 -08:00
|
|
|
caml_memprof_track_young(wosize, flags & CAML_FROM_CAML,
|
|
|
|
nallocs, encoded_alloc_lens);
|
2019-05-09 08:39:35 -07:00
|
|
|
/* Until the allocation actually takes place, the heap is in an invalid
|
|
|
|
state (see comments in [caml_memprof_track_young]). Hence, very little
|
|
|
|
heap operations are allowed before the actual allocation.
|
|
|
|
|
2019-06-03 07:13:15 -07:00
|
|
|
Moreover, [Caml_state->young_ptr] should not be modified before the
|
2019-05-09 08:39:35 -07:00
|
|
|
allocation, because its value has been used as the pointer to
|
|
|
|
the sampled block.
|
|
|
|
*/
|
|
|
|
} else caml_memprof_renew_minor_sample();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-09 11:32:16 -07:00
|
|
|
/* Exported for backward compatibility with Lablgtk: do a minor
|
|
|
|
collection to ensure that the minor heap is empty.
|
2015-11-20 08:54:26 -08:00
|
|
|
*/
|
|
|
|
CAMLexport void caml_minor_collection (void)
|
|
|
|
{
|
2019-06-09 10:37:42 -07:00
|
|
|
Caml_state->requested_minor_gc = 1;
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_gc_dispatch ();
|
1995-05-04 03:15:53 -07:00
|
|
|
}
|
|
|
|
|
2019-10-09 11:18:44 -07:00
|
|
|
CAMLexport value caml_check_urgent_gc (value extra_root)
|
|
|
|
{
|
|
|
|
if (Caml_state->requested_major_slice || Caml_state->requested_minor_gc){
|
|
|
|
CAMLparam1 (extra_root);
|
|
|
|
caml_gc_dispatch();
|
|
|
|
CAMLdrop;
|
|
|
|
}
|
|
|
|
return extra_root;
|
|
|
|
}
|
|
|
|
|
2014-04-02 04:46:19 -07:00
|
|
|
static void realloc_generic_table
|
|
|
|
(struct generic_table *tbl, asize_t element_size,
|
2019-11-15 04:52:35 -08:00
|
|
|
ev_gc_counter ev_counter_name,
|
|
|
|
char *msg_threshold, char *msg_growing, char *msg_error)
|
2014-04-02 04:46:19 -07:00
|
|
|
{
|
2017-03-31 09:20:36 -07:00
|
|
|
CAMLassert (tbl->ptr == tbl->limit);
|
|
|
|
CAMLassert (tbl->limit <= tbl->end);
|
|
|
|
CAMLassert (tbl->limit >= tbl->threshold);
|
1995-05-04 03:15:53 -07:00
|
|
|
|
2007-05-04 07:05:13 -07:00
|
|
|
if (tbl->base == NULL){
|
2019-06-05 23:39:26 -07:00
|
|
|
alloc_generic_table (tbl, Caml_state->minor_heap_wsz / 8, 256,
|
2014-04-02 04:46:19 -07:00
|
|
|
element_size);
|
2007-05-04 07:05:13 -07:00
|
|
|
}else if (tbl->limit == tbl->threshold){
|
2019-11-15 04:52:35 -08:00
|
|
|
CAML_EV_COUNTER (ev_counter_name, 1);
|
2014-04-02 04:46:19 -07:00
|
|
|
caml_gc_message (0x08, msg_threshold, 0);
|
2007-05-04 07:05:13 -07:00
|
|
|
tbl->limit = tbl->end;
|
2015-11-20 08:54:26 -08:00
|
|
|
caml_request_minor_gc ();
|
|
|
|
}else{
|
1995-05-04 03:15:53 -07:00
|
|
|
asize_t sz;
|
2007-05-04 07:05:13 -07:00
|
|
|
asize_t cur_ptr = tbl->ptr - tbl->base;
|
2019-06-09 10:37:42 -07:00
|
|
|
CAMLassert (Caml_state->requested_minor_gc);
|
1995-12-22 08:48:17 -08:00
|
|
|
|
2007-05-04 07:05:13 -07:00
|
|
|
tbl->size *= 2;
|
2014-04-02 04:46:19 -07:00
|
|
|
sz = (tbl->size + tbl->reserve) * element_size;
|
|
|
|
caml_gc_message (0x08, msg_growing, (intnat) sz/1024);
|
2014-05-28 16:11:47 -07:00
|
|
|
tbl->base = caml_stat_resize_noexc (tbl->base, sz);
|
2007-05-04 07:05:13 -07:00
|
|
|
if (tbl->base == NULL){
|
2018-05-17 23:28:19 -07:00
|
|
|
caml_fatal_error ("%s", msg_error);
|
2003-12-29 14:15:02 -08:00
|
|
|
}
|
2016-02-29 04:17:45 -08:00
|
|
|
tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size;
|
|
|
|
tbl->threshold = tbl->base + tbl->size * element_size;
|
2007-05-04 07:05:13 -07:00
|
|
|
tbl->ptr = tbl->base + cur_ptr;
|
|
|
|
tbl->limit = tbl->end;
|
1995-05-04 03:15:53 -07:00
|
|
|
}
|
|
|
|
}
|
2014-04-02 04:46:19 -07:00
|
|
|
|
|
|
|
void caml_realloc_ref_table (struct caml_ref_table *tbl)
|
|
|
|
{
|
|
|
|
realloc_generic_table
|
|
|
|
((struct generic_table *) tbl, sizeof (value *),
|
2019-11-15 04:52:35 -08:00
|
|
|
EV_C_REQUEST_MINOR_REALLOC_REF_TABLE,
|
2014-04-02 04:46:19 -07:00
|
|
|
"ref_table threshold crossed\n",
|
|
|
|
"Growing ref_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
|
2018-05-17 06:17:04 -07:00
|
|
|
"ref_table overflow");
|
2014-04-02 04:46:19 -07:00
|
|
|
}
|
2013-12-25 11:15:39 -08:00
|
|
|
|
|
|
|
void caml_realloc_ephe_ref_table (struct caml_ephe_ref_table *tbl)
|
|
|
|
{
|
|
|
|
realloc_generic_table
|
|
|
|
((struct generic_table *) tbl, sizeof (struct caml_ephe_ref_elt),
|
2019-11-15 04:52:35 -08:00
|
|
|
EV_C_REQUEST_MINOR_REALLOC_EPHE_REF_TABLE,
|
2013-12-25 11:15:39 -08:00
|
|
|
"ephe_ref_table threshold crossed\n",
|
|
|
|
"Growing ephe_ref_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
|
2018-05-17 06:17:04 -07:00
|
|
|
"ephe_ref_table overflow");
|
2013-12-25 11:15:39 -08:00
|
|
|
}
|
2016-04-28 07:00:18 -07:00
|
|
|
|
|
|
|
void caml_realloc_custom_table (struct caml_custom_table *tbl)
|
|
|
|
{
|
|
|
|
realloc_generic_table
|
|
|
|
((struct generic_table *) tbl, sizeof (struct caml_custom_elt),
|
2019-11-15 04:52:35 -08:00
|
|
|
EV_C_REQUEST_MINOR_REALLOC_CUSTOM_TABLE,
|
2016-04-28 07:00:18 -07:00
|
|
|
"custom_table threshold crossed\n",
|
|
|
|
"Growing custom_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
|
2018-05-17 06:17:04 -07:00
|
|
|
"custom_table overflow");
|
2016-04-28 07:00:18 -07:00
|
|
|
}
|