version 1.4, 2001/04/20 07:39:18 |
version 1.6, 2002/07/24 08:00:06 |
Line 47 GC_bool GC_use_entire_heap = 0; |
|
Line 47 GC_bool GC_use_entire_heap = 0; |
|
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; |
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 }; |
|
|
#ifndef USE_MUNMAP |
#ifndef USE_MUNMAP |
|
|
word GC_free_bytes[N_HBLK_FLS+1] = { 0 }; |
word GC_free_bytes[N_HBLK_FLS+1] = { 0 }; |
/* Number of free bytes on each list. */ |
/* Number of free bytes on each list. */ |
|
|
/* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */ |
/* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */ |
/* > GC_max_large_allocd_bytes? */ |
/* > GC_max_large_allocd_bytes? */ |
GC_bool GC_enough_large_bytes_left(bytes,n) |
# ifdef __GNUC__ |
|
__inline__ |
|
# endif |
|
static GC_bool GC_enough_large_bytes_left(bytes,n) |
word bytes; |
word bytes; |
int n; |
int n; |
{ |
{ |
Line 86 word blocks_needed; |
|
Line 90 word blocks_needed; |
|
|
|
} |
} |
|
|
# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map) |
|
# define PHDR(hhdr) HDR(hhdr -> hb_prev) |
# define PHDR(hhdr) HDR(hhdr -> hb_prev) |
# define NHDR(hhdr) HDR(hhdr -> hb_next) |
# define NHDR(hhdr) HDR(hhdr -> hb_next) |
|
|
|
|
if (!GC_use_entire_heap |
if (!GC_use_entire_heap |
&& size_avail != size_needed |
&& size_avail != size_needed |
&& USED_HEAP_SIZE >= GC_requested_heapsize |
&& USED_HEAP_SIZE >= GC_requested_heapsize |
&& !GC_incremental && GC_should_collect()) { |
&& !TRUE_INCREMENTAL && GC_should_collect()) { |
# ifdef USE_MUNMAP |
# ifdef USE_MUNMAP |
continue; |
continue; |
# else |
# else |
/* If we enough large blocks left to cover any */ |
/* If we have enough large blocks left to cover any */ |
/* previous request for large blocks, we go ahead */ |
/* previous request for large blocks, we go ahead */ |
/* and split. Assuming a steady state, that should */ |
/* and split. Assuming a steady state, that should */ |
/* be safe. It means that we can use the full */ |
/* be safe. It means that we can use the full */ |
|
|
if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) { |
if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) { |
continue; |
continue; |
} |
} |
|
/* If we are deallocating lots of memory from */ |
|
/* finalizers, fail and collect sooner rather */ |
|
/* than later. */ |
|
if (GC_finalizer_mem_freed > (GC_heapsize >> 4)) { |
|
continue; |
|
} |
# endif /* !USE_MUNMAP */ |
# endif /* !USE_MUNMAP */ |
} |
} |
/* If the next heap block is obviously better, go on. */ |
/* If the next heap block is obviously better, go on. */ |
|
|
|
|
while ((ptr_t)lasthbp <= search_end |
while ((ptr_t)lasthbp <= search_end |
&& (thishbp = GC_is_black_listed(lasthbp, |
&& (thishbp = GC_is_black_listed(lasthbp, |
(word)eff_size_needed))) { |
(word)eff_size_needed)) |
|
!= 0) { |
lasthbp = thishbp; |
lasthbp = thishbp; |
} |
} |
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp; |
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp; |
|
|
&& orig_avail - size_needed |
&& orig_avail - size_needed |
> (signed_word)BL_LIMIT) { |
> (signed_word)BL_LIMIT) { |
/* Punt, since anything else risks unreasonable heap growth. */ |
/* Punt, since anything else risks unreasonable heap growth. */ |
if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) { |
if (++GC_large_alloc_warn_suppressed |
WARN("Needed to allocate blacklisted block at 0x%lx\n", |
>= GC_large_alloc_warn_interval) { |
(word)hbp); |
WARN("Repeated allocation of very large block " |
|
"(appr. size %ld):\n" |
|
"\tMay lead to memory leak and poor performance.\n", |
|
size_needed); |
|
GC_large_alloc_warn_suppressed = 0; |
} |
} |
size_avail = orig_avail; |
size_avail = orig_avail; |
} else if (size_avail == 0 && size_needed == HBLKSIZE |
} else if (size_avail == 0 && size_needed == HBLKSIZE |
|
|
|
|
if (0 == hbp) return 0; |
if (0 == hbp) return 0; |
|
|
/* Notify virtual dirty bit implementation that we are about to write. */ |
|
GC_write_hint(hbp); |
|
|
|
/* Add it to map of valid blocks */ |
/* Add it to map of valid blocks */ |
if (!GC_install_counts(hbp, (word)size_needed)) return(0); |
if (!GC_install_counts(hbp, (word)size_needed)) return(0); |
/* This leaks memory under very rare conditions. */ |
/* This leaks memory under very rare conditions. */ |
|
|
GC_remove_counts(hbp, (word)size_needed); |
GC_remove_counts(hbp, (word)size_needed); |
return(0); /* ditto */ |
return(0); /* ditto */ |
} |
} |
|
|
|
/* Notify virtual dirty bit implementation that we are about to write. */ |
|
/* Ensure that pointerfree objects are not protected if it's avoidable. */ |
|
GC_remove_protection(hbp, divHBLKSZ(size_needed), |
|
(hhdr -> hb_descr == 0) /* pointer-free */); |
|
|
/* We just successfully allocated a block. Restart count of */ |
/* We just successfully allocated a block. Restart count of */ |
/* consecutive failures. */ |
/* consecutive failures. */ |
Line 773 signed_word size; |
|
Line 789 signed_word size; |
|
if (HBLK_IS_FREE(hhdr)) { |
if (HBLK_IS_FREE(hhdr)) { |
GC_printf1("Duplicate large block deallocation of 0x%lx\n", |
GC_printf1("Duplicate large block deallocation of 0x%lx\n", |
(unsigned long) hbp); |
(unsigned long) hbp); |
|
ABORT("Duplicate large block deallocation"); |
} |
} |
|
|
GC_ASSERT(IS_MAPPED(hhdr)); |
GC_ASSERT(IS_MAPPED(hhdr)); |