version 1.3, 2000/12/01 09:26:10 |
version 1.6, 2002/07/24 08:00:06 |
|
|
* modified is included with the above copyright notice. |
* modified is included with the above copyright notice. |
*/ |
*/ |
|
|
#define DEBUG |
/* #define DEBUG */ |
#undef DEBUG |
|
#include <stdio.h> |
#include <stdio.h> |
#include "gc_priv.h" |
#include "private/gc_priv.h" |
|
|
GC_bool GC_use_entire_heap = 0; |
GC_bool GC_use_entire_heap = 0; |
|
|
Line 47 GC_bool GC_use_entire_heap = 0; |
|
Line 46 GC_bool GC_use_entire_heap = 0; |
|
|
|
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; |
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; |
|
|
|
#ifndef USE_MUNMAP |
|
|
|
word GC_free_bytes[N_HBLK_FLS+1] = { 0 }; |
|
/* Number of free bytes on each list. */ |
|
|
|
/* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */ |
|
/* > GC_max_large_allocd_bytes? */ |
|
# ifdef __GNUC__ |
|
__inline__ |
|
# endif |
|
static GC_bool GC_enough_large_bytes_left(bytes,n) |
|
word bytes; |
|
int n; |
|
{ |
|
int i; |
|
for (i = N_HBLK_FLS; i >= n; --i) { |
|
bytes += GC_free_bytes[i]; |
|
if (bytes > GC_max_large_allocd_bytes) return TRUE; |
|
} |
|
return FALSE; |
|
} |
|
|
|
# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b); |
|
|
|
# define FREE_ASSERT(e) GC_ASSERT(e) |
|
|
|
#else /* USE_MUNMAP */ |
|
|
|
# define INCR_FREE_BYTES(n, b) |
|
# define FREE_ASSERT(e) |
|
|
|
#endif /* USE_MUNMAP */ |
|
|
/* Map a number of blocks to the appropriate large block free list index. */ |
/* Map a number of blocks to the appropriate large block free list index. */ |
int GC_hblk_fl_from_blocks(blocks_needed) |
int GC_hblk_fl_from_blocks(blocks_needed) |
word blocks_needed; |
word blocks_needed; |
Line 58 word blocks_needed; |
|
Line 90 word blocks_needed; |
|
|
|
} |
} |
|
|
# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map) |
|
# define PHDR(hhdr) HDR(hhdr -> hb_prev) |
# define PHDR(hhdr) HDR(hhdr -> hb_prev) |
# define NHDR(hhdr) HDR(hhdr -> hb_next) |
# define NHDR(hhdr) HDR(hhdr -> hb_next) |
|
|
Line 79 void GC_print_hblkfreelist() |
|
Line 110 void GC_print_hblkfreelist() |
|
|
|
for (i = 0; i <= N_HBLK_FLS; ++i) { |
for (i = 0; i <= N_HBLK_FLS; ++i) { |
h = GC_hblkfreelist[i]; |
h = GC_hblkfreelist[i]; |
if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i); |
# ifdef USE_MUNMAP |
|
if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n", |
|
(unsigned long)i); |
|
# else |
|
if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n", |
|
(unsigned long)i, |
|
(unsigned long)GC_free_bytes[i]); |
|
# endif |
while (h != 0) { |
while (h != 0) { |
hhdr = HDR(h); |
hhdr = HDR(h); |
sz = hhdr -> hb_sz; |
sz = hhdr -> hb_sz; |
Line 219 void GC_remove_from_fl(hhdr, n) |
|
Line 257 void GC_remove_from_fl(hhdr, n) |
|
hdr * hhdr; |
hdr * hhdr; |
int n; |
int n; |
{ |
{ |
|
int index; |
|
|
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0); |
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0); |
|
# ifndef USE_MUNMAP |
|
/* We always need index to mainatin free counts. */ |
|
if (FL_UNKNOWN == n) { |
|
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz)); |
|
} else { |
|
index = n; |
|
} |
|
# endif |
if (hhdr -> hb_prev == 0) { |
if (hhdr -> hb_prev == 0) { |
int index; |
# ifdef USE_MUNMAP |
if (FL_UNKNOWN == n) { |
if (FL_UNKNOWN == n) { |
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz)); |
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz)); |
} else { |
} else { |
index = n; |
index = n; |
} |
} |
|
# endif |
GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr); |
GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr); |
GC_hblkfreelist[index] = hhdr -> hb_next; |
GC_hblkfreelist[index] = hhdr -> hb_next; |
} else { |
} else { |
|
|
GET_HDR(hhdr -> hb_prev, phdr); |
GET_HDR(hhdr -> hb_prev, phdr); |
phdr -> hb_next = hhdr -> hb_next; |
phdr -> hb_next = hhdr -> hb_next; |
} |
} |
|
INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz)); |
|
FREE_ASSERT(GC_free_bytes[index] >= 0); |
if (0 != hhdr -> hb_next) { |
if (0 != hhdr -> hb_next) { |
hdr * nhdr; |
hdr * nhdr; |
GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr))); |
GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr))); |
|
|
# endif |
# endif |
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0); |
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0); |
GC_hblkfreelist[index] = h; |
GC_hblkfreelist[index] = h; |
|
INCR_FREE_BYTES(index, hhdr -> hb_sz); |
|
FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes) |
hhdr -> hb_next = second; |
hhdr -> hb_next = second; |
hhdr -> hb_prev = 0; |
hhdr -> hb_prev = 0; |
if (0 != second) { |
if (0 != second) { |
|
|
rest_hdr -> hb_sz = total_size - bytes; |
rest_hdr -> hb_sz = total_size - bytes; |
rest_hdr -> hb_flags = 0; |
rest_hdr -> hb_flags = 0; |
# ifdef GC_ASSERTIONS |
# ifdef GC_ASSERTIONS |
// Mark h not free, to avoid assertion about adjacent free blocks. |
/* Mark h not free, to avoid assertion about adjacent free blocks. */ |
hhdr -> hb_map = 0; |
hhdr -> hb_map = 0; |
# endif |
# endif |
GC_add_to_fl(rest, rest_hdr); |
GC_add_to_fl(rest, rest_hdr); |
Line 463 int index; /* Index of free list */ |
|
Line 516 int index; /* Index of free list */ |
|
if (0 != next) { |
if (0 != next) { |
HDR(next) -> hb_prev = n; |
HDR(next) -> hb_prev = n; |
} |
} |
|
INCR_FREE_BYTES(index, -(signed_word)h_size); |
|
FREE_ASSERT(GC_free_bytes[index] > 0); |
# ifdef GC_ASSERTIONS |
# ifdef GC_ASSERTIONS |
nhdr -> hb_map = 0; /* Don't fail test for consecutive */ |
nhdr -> hb_map = 0; /* Don't fail test for consecutive */ |
/* free blocks in GC_add_to_fl. */ |
/* free blocks in GC_add_to_fl. */ |
Line 484 struct hblk * GC_allochblk_nth(); |
|
Line 539 struct hblk * GC_allochblk_nth(); |
|
* NOTE: We set obj_map field in header correctly. |
* NOTE: We set obj_map field in header correctly. |
* Caller is responsible for building an object freelist in block. |
* Caller is responsible for building an object freelist in block. |
* |
* |
* We clear the block if it is destined for large objects, and if |
* Unlike older versions of the collectors, the client is responsible |
* kind requires that newly allocated objects be cleared. |
* for clearing the block, if necessary. |
*/ |
*/ |
struct hblk * |
struct hblk * |
GC_allochblk(sz, kind, flags) |
GC_allochblk(sz, kind, flags) |
word sz; |
word sz; |
int kind; |
int kind; |
unsigned char flags; /* IGNORE_OFF_PAGE or 0 */ |
unsigned flags; /* IGNORE_OFF_PAGE or 0 */ |
{ |
{ |
int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz)); |
word blocks = OBJ_SZ_TO_BLOCKS(sz); |
|
int start_list = GC_hblk_fl_from_blocks(blocks); |
int i; |
int i; |
for (i = start_list; i <= N_HBLK_FLS; ++i) { |
for (i = start_list; i <= N_HBLK_FLS; ++i) { |
struct hblk * result = GC_allochblk_nth(sz, kind, flags, i); |
struct hblk * result = GC_allochblk_nth(sz, kind, flags, i); |
if (0 != result) return result; |
if (0 != result) { |
|
return result; |
|
} |
} |
} |
return 0; |
return 0; |
} |
} |
|
|
GET_HDR(hbp, hhdr); |
GET_HDR(hbp, hhdr); |
size_avail = hhdr->hb_sz; |
size_avail = hhdr->hb_sz; |
if (size_avail < size_needed) continue; |
if (size_avail < size_needed) continue; |
if (!GC_use_entire_heap) { |
if (!GC_use_entire_heap |
if (size_avail != size_needed |
&& size_avail != size_needed |
&& USED_HEAP_SIZE >= GC_requested_heapsize |
&& USED_HEAP_SIZE >= GC_requested_heapsize |
&& !GC_incremental && GC_should_collect()) { |
&& !TRUE_INCREMENTAL && GC_should_collect()) { |
|
# ifdef USE_MUNMAP |
continue; |
continue; |
} |
# else |
|
/* If we have enough large blocks left to cover any */ |
|
/* previous request for large blocks, we go ahead */ |
|
/* and split. Assuming a steady state, that should */ |
|
/* be safe. It means that we can use the full */ |
|
/* heap if we allocate only small objects. */ |
|
if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) { |
|
continue; |
|
} |
|
/* If we are deallocating lots of memory from */ |
|
/* finalizers, fail and collect sooner rather */ |
|
/* than later. */ |
|
if (GC_finalizer_mem_freed > (GC_heapsize >> 4)) { |
|
continue; |
|
} |
|
# endif /* !USE_MUNMAP */ |
} |
} |
/* If the next heap block is obviously better, go on. */ |
/* If the next heap block is obviously better, go on. */ |
/* This prevents us from disassembling a single large block */ |
/* This prevents us from disassembling a single large block */ |
|
|
|
|
while ((ptr_t)lasthbp <= search_end |
while ((ptr_t)lasthbp <= search_end |
&& (thishbp = GC_is_black_listed(lasthbp, |
&& (thishbp = GC_is_black_listed(lasthbp, |
(word)eff_size_needed))) { |
(word)eff_size_needed)) |
|
!= 0) { |
lasthbp = thishbp; |
lasthbp = thishbp; |
} |
} |
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp; |
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp; |
|
|
&& orig_avail - size_needed |
&& orig_avail - size_needed |
> (signed_word)BL_LIMIT) { |
> (signed_word)BL_LIMIT) { |
/* Punt, since anything else risks unreasonable heap growth. */ |
/* Punt, since anything else risks unreasonable heap growth. */ |
WARN("Needed to allocate blacklisted block at 0x%lx\n", |
if (++GC_large_alloc_warn_suppressed |
(word)hbp); |
>= GC_large_alloc_warn_interval) { |
|
WARN("Repeated allocation of very large block " |
|
"(appr. size %ld):\n" |
|
"\tMay lead to memory leak and poor performance.\n", |
|
size_needed); |
|
GC_large_alloc_warn_suppressed = 0; |
|
} |
size_avail = orig_avail; |
size_avail = orig_avail; |
} else if (size_avail == 0 && size_needed == HBLKSIZE |
} else if (size_avail == 0 && size_needed == HBLKSIZE |
&& IS_MAPPED(hhdr)) { |
&& IS_MAPPED(hhdr)) { |
|
|
if (h == hbp || 0 != (hhdr = GC_install_header(h))) { |
if (h == hbp || 0 != (hhdr = GC_install_header(h))) { |
(void) setup_header( |
(void) setup_header( |
hhdr, |
hhdr, |
BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES), |
BYTES_TO_WORDS(HBLKSIZE), |
PTRFREE, 0); /* Cant fail */ |
PTRFREE, 0); /* Cant fail */ |
if (GC_debugging_started) { |
if (GC_debugging_started) { |
BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES); |
BZERO(h, HBLKSIZE); |
} |
} |
} |
} |
} |
} |
|
|
|
|
if (0 == hbp) return 0; |
if (0 == hbp) return 0; |
|
|
/* Notify virtual dirty bit implementation that we are about to write. */ |
|
GC_write_hint(hbp); |
|
|
|
/* Add it to map of valid blocks */ |
/* Add it to map of valid blocks */ |
if (!GC_install_counts(hbp, (word)size_needed)) return(0); |
if (!GC_install_counts(hbp, (word)size_needed)) return(0); |
/* This leaks memory under very rare conditions. */ |
/* This leaks memory under very rare conditions. */ |
|
|
GC_remove_counts(hbp, (word)size_needed); |
GC_remove_counts(hbp, (word)size_needed); |
return(0); /* ditto */ |
return(0); /* ditto */ |
} |
} |
|
|
/* Clear block if necessary */ |
|
if (GC_debugging_started |
|
|| sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) { |
|
BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES); |
|
} |
|
|
|
|
/* Notify virtual dirty bit implementation that we are about to write. */ |
|
/* Ensure that pointerfree objects are not protected if it's avoidable. */ |
|
GC_remove_protection(hbp, divHBLKSZ(size_needed), |
|
(hhdr -> hb_descr == 0) /* pointer-free */); |
|
|
/* We just successfully allocated a block. Restart count of */ |
/* We just successfully allocated a block. Restart count of */ |
/* consecutive failures. */ |
/* consecutive failures. */ |
{ |
{ |
Line 712 signed_word size; |
|
Line 789 signed_word size; |
|
if (HBLK_IS_FREE(hhdr)) { |
if (HBLK_IS_FREE(hhdr)) { |
GC_printf1("Duplicate large block deallocation of 0x%lx\n", |
GC_printf1("Duplicate large block deallocation of 0x%lx\n", |
(unsigned long) hbp); |
(unsigned long) hbp); |
|
ABORT("Duplicate large block deallocation"); |
} |
} |
|
|
GC_ASSERT(IS_MAPPED(hhdr)); |
GC_ASSERT(IS_MAPPED(hhdr)); |