version 1.2, 2000/12/01 09:26:11 |
version 1.3, 2001/04/20 07:39:19 |
|
|
/* |
/* |
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers |
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers |
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. |
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. |
|
* Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. |
* |
* |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
|
|
/* Boehm, February 7, 1996 4:32 pm PST */ |
/* Boehm, February 7, 1996 4:32 pm PST */ |
|
|
#include <stdio.h> |
#include <stdio.h> |
#include "gc_priv.h" |
#include "private/gc_priv.h" |
|
|
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */ |
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */ |
void GC_extend_size_map(); /* in misc.c. */ |
void GC_extend_size_map(); /* in misc.c. */ |
Line 32 register struct obj_kind * kind; |
|
Line 33 register struct obj_kind * kind; |
|
return(TRUE); |
return(TRUE); |
} |
} |
|
|
/* allocate lb bytes for an object of kind. */ |
/* Allocate a large block of size lw words. */ |
|
/* The block is not cleared. */ |
|
/* Flags is 0 or IGNORE_OFF_PAGE. */ |
|
ptr_t GC_alloc_large(lw, k, flags) |
|
word lw; |
|
int k; |
|
unsigned flags; |
|
{ |
|
struct hblk * h; |
|
word n_blocks = OBJ_SZ_TO_BLOCKS(lw); |
|
ptr_t result; |
|
|
|
if (!GC_is_initialized) GC_init_inner(); |
|
/* Do our share of marking work */ |
|
if(GC_incremental && !GC_dont_gc) |
|
GC_collect_a_little_inner((int)n_blocks); |
|
h = GC_allochblk(lw, k, flags); |
|
# ifdef USE_MUNMAP |
|
if (0 == h) { |
|
GC_merge_unmapped(); |
|
h = GC_allochblk(lw, k, flags); |
|
} |
|
# endif |
|
while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) { |
|
h = GC_allochblk(lw, k, flags); |
|
} |
|
if (h == 0) { |
|
result = 0; |
|
} else { |
|
int total_bytes = BYTES_TO_WORDS(n_blocks * HBLKSIZE); |
|
if (n_blocks > 1) { |
|
GC_large_allocd_bytes += n_blocks * HBLKSIZE; |
|
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) |
|
GC_max_large_allocd_bytes = GC_large_allocd_bytes; |
|
} |
|
result = (ptr_t) (h -> hb_body); |
|
GC_words_wasted += total_bytes - lw; |
|
} |
|
return result; |
|
} |
|
|
|
|
|
/* Allocate a large block of size lb bytes. Clear if appropriate. */ |
|
ptr_t GC_alloc_large_and_clear(lw, k, flags) |
|
word lw; |
|
int k; |
|
unsigned flags; |
|
{ |
|
ptr_t result = GC_alloc_large(lw, k, flags); |
|
word n_blocks = OBJ_SZ_TO_BLOCKS(lw); |
|
|
|
if (0 == result) return 0; |
|
if (GC_debugging_started || GC_obj_kinds[k].ok_init) { |
|
/* Clear the whole block, in case of GC_realloc call. */ |
|
BZERO(result, n_blocks * HBLKSIZE); |
|
} |
|
return result; |
|
} |
|
|
|
/* allocate lb bytes for an object of kind k. */ |
/* Should not be used to directly to allocate */ |
/* Should not be used to directly to allocate */ |
/* objects such as STUBBORN objects that */ |
/* objects such as STUBBORN objects that */ |
/* require special handling on allocation. */ |
/* require special handling on allocation. */ |
Line 52 register ptr_t *opp; |
|
Line 112 register ptr_t *opp; |
|
lw = GC_size_map[lb]; |
lw = GC_size_map[lb]; |
# else |
# else |
lw = ALIGNED_WORDS(lb); |
lw = ALIGNED_WORDS(lb); |
if (lw == 0) lw = 1; |
if (lw == 0) lw = MIN_WORDS; |
# endif |
# endif |
opp = &(kind -> ok_freelist[lw]); |
opp = &(kind -> ok_freelist[lw]); |
if( (op = *opp) == 0 ) { |
if( (op = *opp) == 0 ) { |
Line 88 register ptr_t *opp; |
|
Line 148 register ptr_t *opp; |
|
*opp = obj_link(op); |
*opp = obj_link(op); |
obj_link(op) = 0; |
obj_link(op) = 0; |
} else { |
} else { |
register struct hblk * h; |
|
register word n_blocks = divHBLKSZ(ADD_SLOP(lb) |
|
+ HDR_BYTES + HBLKSIZE-1); |
|
|
|
if (!GC_is_initialized) GC_init_inner(); |
|
/* Do our share of marking work */ |
|
if(GC_incremental && !GC_dont_gc) |
|
GC_collect_a_little_inner((int)n_blocks); |
|
lw = ROUNDED_UP_WORDS(lb); |
lw = ROUNDED_UP_WORDS(lb); |
h = GC_allochblk(lw, k, 0); |
op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0); |
# ifdef USE_MUNMAP |
|
if (0 == h) { |
|
GC_merge_unmapped(); |
|
h = GC_allochblk(lw, k, 0); |
|
} |
|
# endif |
|
while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) { |
|
h = GC_allochblk(lw, k, 0); |
|
} |
|
if (h == 0) { |
|
op = 0; |
|
} else { |
|
op = (ptr_t) (h -> hb_body); |
|
GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw; |
|
} |
|
} |
} |
GC_words_allocd += lw; |
GC_words_allocd += lw; |
|
|
out: |
out: |
return((ptr_t)op); |
return op; |
} |
} |
|
|
|
/* Allocate a composite object of size n bytes. The caller guarantees */ |
|
/* that pointers past the first page are not relevant. Caller holds */ |
|
/* allocation lock. */ |
|
ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k) |
|
register size_t lb; |
|
register int k; |
|
{ |
|
register word lw; |
|
ptr_t op; |
|
|
|
if (lb <= HBLKSIZE) |
|
return(GC_generic_malloc_inner((word)lb, k)); |
|
lw = ROUNDED_UP_WORDS(lb); |
|
op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE); |
|
GC_words_allocd += lw; |
|
return op; |
|
} |
|
|
ptr_t GC_generic_malloc(lb, k) |
ptr_t GC_generic_malloc(lb, k) |
register word lb; |
register word lb; |
register int k; |
register int k; |
|
|
DCL_LOCK_STATE; |
DCL_LOCK_STATE; |
|
|
GC_INVOKE_FINALIZERS(); |
GC_INVOKE_FINALIZERS(); |
DISABLE_SIGNALS(); |
if (SMALL_OBJ(lb)) { |
LOCK(); |
DISABLE_SIGNALS(); |
result = GC_generic_malloc_inner(lb, k); |
LOCK(); |
UNLOCK(); |
result = GC_generic_malloc_inner((word)lb, k); |
ENABLE_SIGNALS(); |
UNLOCK(); |
|
ENABLE_SIGNALS(); |
|
} else { |
|
word lw; |
|
word n_blocks; |
|
GC_bool init; |
|
lw = ROUNDED_UP_WORDS(lb); |
|
n_blocks = OBJ_SZ_TO_BLOCKS(lw); |
|
init = GC_obj_kinds[k].ok_init; |
|
DISABLE_SIGNALS(); |
|
LOCK(); |
|
result = (ptr_t)GC_alloc_large(lw, k, 0); |
|
if (0 != result) { |
|
if (GC_debugging_started) { |
|
BZERO(result, n_blocks * HBLKSIZE); |
|
} else { |
|
# ifdef THREADS |
|
/* Clear any memory that might be used for GC descriptors */ |
|
/* before we release the lock. */ |
|
((word *)result)[0] = 0; |
|
((word *)result)[1] = 0; |
|
((word *)result)[lw-1] = 0; |
|
((word *)result)[lw-2] = 0; |
|
# endif |
|
} |
|
} |
|
GC_words_allocd += lw; |
|
UNLOCK(); |
|
ENABLE_SIGNALS(); |
|
if (init & !GC_debugging_started && 0 != result) { |
|
BZERO(result, n_blocks * HBLKSIZE); |
|
} |
|
} |
if (0 == result) { |
if (0 == result) { |
return((*GC_oom_fn)(lb)); |
return((*GC_oom_fn)(lb)); |
} else { |
} else { |
Line 159 register ptr_t * opp; |
|
Line 246 register ptr_t * opp; |
|
register word lw; |
register word lw; |
DCL_LOCK_STATE; |
DCL_LOCK_STATE; |
|
|
if( SMALL_OBJ(lb) ) { |
if( EXPECT(SMALL_OBJ(lb), 1) ) { |
# ifdef MERGE_SIZES |
# ifdef MERGE_SIZES |
lw = GC_size_map[lb]; |
lw = GC_size_map[lb]; |
# else |
# else |
|
|
# endif |
# endif |
opp = &(GC_aobjfreelist[lw]); |
opp = &(GC_aobjfreelist[lw]); |
FASTLOCK(); |
FASTLOCK(); |
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { |
if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) { |
FASTUNLOCK(); |
FASTUNLOCK(); |
return(GENERAL_MALLOC((word)lb, PTRFREE)); |
return(GENERAL_MALLOC((word)lb, PTRFREE)); |
} |
} |
Line 194 register ptr_t *opp; |
|
Line 281 register ptr_t *opp; |
|
register word lw; |
register word lw; |
DCL_LOCK_STATE; |
DCL_LOCK_STATE; |
|
|
if( SMALL_OBJ(lb) ) { |
if( EXPECT(SMALL_OBJ(lb), 1) ) { |
# ifdef MERGE_SIZES |
# ifdef MERGE_SIZES |
lw = GC_size_map[lb]; |
lw = GC_size_map[lb]; |
# else |
# else |
|
|
# endif |
# endif |
opp = &(GC_objfreelist[lw]); |
opp = &(GC_objfreelist[lw]); |
FASTLOCK(); |
FASTLOCK(); |
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { |
if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) { |
FASTUNLOCK(); |
FASTUNLOCK(); |
return(GENERAL_MALLOC((word)lb, NORMAL)); |
return(GENERAL_MALLOC((word)lb, NORMAL)); |
} |
} |
|
|
*/ |
*/ |
if (!GC_is_initialized) return sbrk(lb); |
if (!GC_is_initialized) return sbrk(lb); |
# endif /* I386 && SOLARIS_THREADS */ |
# endif /* I386 && SOLARIS_THREADS */ |
return(REDIRECT_MALLOC(lb)); |
return((GC_PTR)REDIRECT_MALLOC(lb)); |
} |
} |
|
|
# ifdef __STDC__ |
# ifdef __STDC__ |
|
|
size_t n, lb; |
size_t n, lb; |
# endif |
# endif |
{ |
{ |
return(REDIRECT_MALLOC(n*lb)); |
return((GC_PTR)REDIRECT_MALLOC(n*lb)); |
} |
} |
# endif /* REDIRECT_MALLOC */ |
# endif /* REDIRECT_MALLOC */ |
|
|
GC_PTR GC_generic_or_special_malloc(lb,knd) |
|
word lb; |
|
int knd; |
|
{ |
|
switch(knd) { |
|
# ifdef STUBBORN_ALLOC |
|
case STUBBORN: |
|
return(GC_malloc_stubborn((size_t)lb)); |
|
# endif |
|
case PTRFREE: |
|
return(GC_malloc_atomic((size_t)lb)); |
|
case NORMAL: |
|
return(GC_malloc((size_t)lb)); |
|
case UNCOLLECTABLE: |
|
return(GC_malloc_uncollectable((size_t)lb)); |
|
# ifdef ATOMIC_UNCOLLECTABLE |
|
case AUNCOLLECTABLE: |
|
return(GC_malloc_atomic_uncollectable((size_t)lb)); |
|
# endif /* ATOMIC_UNCOLLECTABLE */ |
|
default: |
|
return(GC_generic_malloc(lb,knd)); |
|
} |
|
} |
|
|
|
|
|
/* Change the size of the block pointed to by p to contain at least */ |
|
/* lb bytes. The object may be (and quite likely will be) moved. */ |
|
/* The kind (e.g. atomic) is the same as that of the old. */ |
|
/* Shrinking of large blocks is not implemented well. */ |
|
# ifdef __STDC__ |
|
GC_PTR GC_realloc(GC_PTR p, size_t lb) |
|
# else |
|
GC_PTR GC_realloc(p,lb) |
|
GC_PTR p; |
|
size_t lb; |
|
# endif |
|
{ |
|
register struct hblk * h; |
|
register hdr * hhdr; |
|
register word sz; /* Current size in bytes */ |
|
register word orig_sz; /* Original sz in bytes */ |
|
int obj_kind; |
|
|
|
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */ |
|
h = HBLKPTR(p); |
|
hhdr = HDR(h); |
|
sz = hhdr -> hb_sz; |
|
obj_kind = hhdr -> hb_obj_kind; |
|
sz = WORDS_TO_BYTES(sz); |
|
orig_sz = sz; |
|
|
|
if (sz > WORDS_TO_BYTES(MAXOBJSZ)) { |
|
/* Round it up to the next whole heap block */ |
|
register word descr; |
|
|
|
sz = (sz+HDR_BYTES+HBLKSIZE-1) |
|
& (~HBLKMASK); |
|
sz -= HDR_BYTES; |
|
hhdr -> hb_sz = BYTES_TO_WORDS(sz); |
|
descr = GC_obj_kinds[obj_kind].ok_descriptor; |
|
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz; |
|
hhdr -> hb_descr = descr; |
|
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz); |
|
/* Extra area is already cleared by allochblk. */ |
|
} |
|
if (ADD_SLOP(lb) <= sz) { |
|
if (lb >= (sz >> 1)) { |
|
# ifdef STUBBORN_ALLOC |
|
if (obj_kind == STUBBORN) GC_change_stubborn(p); |
|
# endif |
|
if (orig_sz > lb) { |
|
/* Clear unneeded part of object to avoid bogus pointer */ |
|
/* tracing. */ |
|
/* Safe for stubborn objects. */ |
|
BZERO(((ptr_t)p) + lb, orig_sz - lb); |
|
} |
|
return(p); |
|
} else { |
|
/* shrink */ |
|
GC_PTR result = |
|
GC_generic_or_special_malloc((word)lb, obj_kind); |
|
|
|
if (result == 0) return(0); |
|
/* Could also return original object. But this */ |
|
/* gives the client warning of imminent disaster. */ |
|
BCOPY(p, result, lb); |
|
# ifndef IGNORE_FREE |
|
GC_free(p); |
|
# endif |
|
return(result); |
|
} |
|
} else { |
|
/* grow */ |
|
GC_PTR result = |
|
GC_generic_or_special_malloc((word)lb, obj_kind); |
|
|
|
if (result == 0) return(0); |
|
BCOPY(p, result, sz); |
|
# ifndef IGNORE_FREE |
|
GC_free(p); |
|
# endif |
|
return(result); |
|
} |
|
} |
|
|
|
# ifdef REDIRECT_MALLOC |
|
# ifdef __STDC__ |
|
GC_PTR realloc(GC_PTR p, size_t lb) |
|
# else |
|
GC_PTR realloc(p,lb) |
|
GC_PTR p; |
|
size_t lb; |
|
# endif |
|
{ |
|
return(GC_realloc(p, lb)); |
|
} |
|
# endif /* REDIRECT_MALLOC */ |
|
|
|
/* Explicitly deallocate an object p. */ |
/* Explicitly deallocate an object p. */ |
# ifdef __STDC__ |
# ifdef __STDC__ |
void GC_free(GC_PTR p) |
void GC_free(GC_PTR p) |
|
|
h = HBLKPTR(p); |
h = HBLKPTR(p); |
hhdr = HDR(h); |
hhdr = HDR(h); |
# if defined(REDIRECT_MALLOC) && \ |
# if defined(REDIRECT_MALLOC) && \ |
(defined(SOLARIS_THREADS) || defined(LINUX_THREADS)) |
(defined(SOLARIS_THREADS) || defined(LINUX_THREADS) \ |
/* We have to redirect malloc calls during initialization. */ |
|| defined(__MINGW32__)) /* Should this be MSWIN32 in general? */ |
|
/* For Solaris, we have to redirect malloc calls during */ |
|
/* initialization. For the others, this seems to happen */ |
|
/* implicitly. */ |
/* Don't try to deallocate that memory. */ |
/* Don't try to deallocate that memory. */ |
if (0 == hhdr) return; |
if (0 == hhdr) return; |
# endif |
# endif |
knd = hhdr -> hb_obj_kind; |
knd = hhdr -> hb_obj_kind; |
sz = hhdr -> hb_sz; |
sz = hhdr -> hb_sz; |
ok = &GC_obj_kinds[knd]; |
ok = &GC_obj_kinds[knd]; |
if (sz <= MAXOBJSZ) { |
if (EXPECT((sz <= MAXOBJSZ), 1)) { |
# ifdef THREADS |
# ifdef THREADS |
DISABLE_SIGNALS(); |
DISABLE_SIGNALS(); |
LOCK(); |
LOCK(); |
|
|
ENABLE_SIGNALS(); |
ENABLE_SIGNALS(); |
} |
} |
} |
} |
|
|
|
/* Explicitly deallocate an object p when we already hold lock. */ |
|
/* Only used for internally allocated objects, so we can take some */ |
|
/* shortcuts. */ |
|
#ifdef THREADS |
|
void GC_free_inner(GC_PTR p) |
|
{ |
|
register struct hblk *h; |
|
register hdr *hhdr; |
|
register signed_word sz; |
|
register ptr_t * flh; |
|
register int knd; |
|
register struct obj_kind * ok; |
|
DCL_LOCK_STATE; |
|
|
|
h = HBLKPTR(p); |
|
hhdr = HDR(h); |
|
knd = hhdr -> hb_obj_kind; |
|
sz = hhdr -> hb_sz; |
|
ok = &GC_obj_kinds[knd]; |
|
if (sz <= MAXOBJSZ) { |
|
GC_mem_freed += sz; |
|
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); |
|
if (ok -> ok_init) { |
|
BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1)); |
|
} |
|
flh = &(ok -> ok_freelist[sz]); |
|
obj_link(p) = *flh; |
|
*flh = (ptr_t)p; |
|
} else { |
|
GC_mem_freed += sz; |
|
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz); |
|
GC_freehblk(h); |
|
} |
|
} |
|
#endif /* THREADS */ |
|
|
# ifdef REDIRECT_MALLOC |
# ifdef REDIRECT_MALLOC |
# ifdef __STDC__ |
# ifdef __STDC__ |