version 1.1, 1999/12/03 07:39:09 |
version 1.2, 2000/04/10 08:31:31 |
Line 87 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
|
Line 87 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
|
# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) |
# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) |
/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ |
/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ |
/* multiple of HBLKSIZE. */ |
/* multiple of HBLKSIZE. */ |
|
/* The incremental collector actually likes a larger */ |
|
/* size, since it want to push all marked dirty objs */ |
|
/* before marking anything new. Currently we let it */ |
|
/* grow dynamically. */ |
# endif |
# endif |
|
|
/* |
/* |
Line 254 ptr_t cold_gc_frame; |
|
Line 258 ptr_t cold_gc_frame; |
|
|
|
case MS_PUSH_RESCUERS: |
case MS_PUSH_RESCUERS: |
if (GC_mark_stack_top |
if (GC_mark_stack_top |
>= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) { |
>= GC_mark_stack + GC_mark_stack_size |
|
- INITIAL_MARK_STACK_SIZE/2) { |
|
/* Go ahead and mark, even though that might cause us to */ |
|
/* see more marked dirty objects later on. Avoid this */ |
|
/* in the future. */ |
|
GC_mark_stack_too_small = TRUE; |
GC_mark_from_mark_stack(); |
GC_mark_from_mark_stack(); |
return(FALSE); |
return(FALSE); |
} else { |
} else { |
|
|
# endif |
# endif |
word p; |
word p; |
{ |
{ |
|
# ifdef NURSERY |
|
if (0 != GC_push_proc) { |
|
GC_push_proc(p); |
|
return; |
|
} |
|
# endif |
GC_PUSH_ONE_STACK(p, 0); |
GC_PUSH_ONE_STACK(p, 0); |
} |
} |
|
|
Line 1115 struct hblk *h; |
|
Line 1130 struct hblk *h; |
|
struct hblk * GC_push_next_marked_dirty(h) |
struct hblk * GC_push_next_marked_dirty(h) |
struct hblk *h; |
struct hblk *h; |
{ |
{ |
register hdr * hhdr = HDR(h); |
register hdr * hhdr; |
|
|
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } |
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } |
for (;;) { |
for (;;) { |