diff --git a/mozilla/js/src/jsarray.c b/mozilla/js/src/jsarray.c index aa22f63c44e..8d0feafe2ad 100644 --- a/mozilla/js/src/jsarray.c +++ b/mozilla/js/src/jsarray.c @@ -2111,6 +2111,6 @@ js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector) JS_POP_TEMP_ROOT(cx, &tvr); /* Set/clear newborn root, in case we lost it. */ - cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj; + cx->weakRoots.newborn[GCX_OBJECT] = obj; return obj; } diff --git a/mozilla/js/src/jscntxt.h b/mozilla/js/src/jscntxt.h index edf9f87eac7..fa3a47ccaf8 100644 --- a/mozilla/js/src/jscntxt.h +++ b/mozilla/js/src/jscntxt.h @@ -171,6 +171,7 @@ struct JSRuntime { JSContextCallback cxCallback; /* Garbage collector state, used by jsgc.c. */ + JSGCChunkInfo *gcChunkList; JSGCArenaList gcArenaList[GC_NUM_FREELISTS]; JSDHashTable gcRootsHash; JSDHashTable *gcLocksHash; @@ -202,9 +203,9 @@ struct JSRuntime { JSGCThingCallback gcThingCallback; void *gcThingCallbackClosure; uint32 gcMallocBytes; - JSGCArena *gcUnscannedArenaStackTop; + JSGCArenaInfo *gcUntracedArenaStackTop; #ifdef DEBUG - size_t gcUnscannedBagSize; + size_t gcTraceLaterCount; #endif /* diff --git a/mozilla/js/src/jsfun.c b/mozilla/js/src/jsfun.c index 5901d41a888..9bcdc918d85 100644 --- a/mozilla/js/src/jsfun.c +++ b/mozilla/js/src/jsfun.c @@ -1173,8 +1173,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, * root until then to protect pval in case it is figuratively * up in the air, with no strong refs protecting it. */ - cx->weakRoots.newborn[GCX_OBJECT] = - (JSGCThing *)JSVAL_TO_GCTHING(pval); + cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(pval); parentProto = JSVAL_TO_OBJECT(pval); } } diff --git a/mozilla/js/src/jsgc.c b/mozilla/js/src/jsgc.c index 67a5beadf0e..cf68084c3ee 100644 --- a/mozilla/js/src/jsgc.c +++ b/mozilla/js/src/jsgc.c @@ -79,151 +79,263 @@ #endif /* - * GC arena sizing depends on amortizing arena overhead using a large number - * of things per arena, and on the thing/flags ratio of 8:1 on most platforms. - * - * On 64-bit platforms, we would have half as many things per arena because - * pointers are twice as big, so we double the bytes for things per arena. - * This preserves the 1024 byte flags sub-arena size, which relates to the - * GC_PAGE_SIZE (see below for why). + * Deduce if mmap or similar is available. */ -#if JS_BYTES_PER_WORD == 8 -# define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */ -#else -# define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */ +#ifndef JS_GC_USE_MMAP +# if defined(XP_WIN) +# define JS_GC_USE_MMAP 1 +# elif defined(XP_UNIX) || defined(XP_BEOS) +# include +# if defined(_POSIX_MAPPED_FILES) && _POSIX_MAPPED_FILES > 0 +# define JS_GC_USE_MMAP 1 +# endif +# endif +#endif + +#ifndef JS_GC_USE_MMAP +# define JS_GC_USE_MMAP 0 +#endif + +#if JS_GC_USE_MMAP +# if defined(XP_WIN) +# include +# elif defined(XP_UNIX) || defined(XP_BEOS) +# include +# endif #endif -#define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT) -#define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing)) /* * A GC arena contains one flag byte for each thing in its heap, and supports * O(1) lookup of a flag given its thing's address. * - * To implement this, we take advantage of the thing/flags numerology: given - * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K - * allocation for things+flags there are always 8 consecutive 1K-pages each - * aligned on 1K boundary. We use these pages to allocate things and the - * remaining 1K of space before and after the aligned pages to store flags. - * If we are really lucky and things+flags starts on a 1K boundary, then - * flags would consist of a single 1K chunk that comes after 8K of things. - * Otherwise there are 2 chunks of flags, one before and one after things. + * To implement this, we allocate things of the same size from a GC arena + * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The + * following picture shows arena's layout: * - * To be able to find the flag byte for a particular thing, we put a - * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that - * page's offset from the beginning of things+flags allocation and we allocate - * things after this record. Thus for each thing |thing_address & ~1023| - * gives the address of a JSGCPageInfo record from which we read page_offset. - * Due to page alignment - * (page_offset & ~1023) + (thing_address & 1023) - * gives thing_offset from the beginning of 8K paged things. We then divide - * thing_offset by sizeof(JSGCThing) to get thing_index. + * +------------------------------+--------------------+---------------+ + * | allocation area for GC thing | flags of GC things | JSGCArenaInfo | + * +------------------------------+--------------------+---------------+ * - * Now |page_address - page_offset| is things+flags arena_address and - * (page_offset & 1023) is the offset of the first page from the start of - * things+flags area. Thus if - * thing_index < (page_offset & 1023) - * then - * allocation_start_address + thing_index < address_of_the_first_page - * and we use - * allocation_start_address + thing_index - * as the address to store thing's flags. If - * thing_index >= (page_offset & 1023), - * then we use the chunk of flags that comes after the pages with things - * and calculate the address for the flag byte as - * address_of_the_first_page + 8K + (thing_index - (page_offset & 1023)) - * which is just - * allocation_start_address + thing_index + 8K. + * For a GC thing of size thingSize the number of things that the arena can + * hold is given by: + * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1) * - * When we allocate things with size equal to sizeof(JSGCThing), the overhead - * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87% - * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each - * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the - * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size - * and the overhead is close to 1/8 or 12.5%. - * FIXME: How can we avoid this overhead? + * The address of thing's flag is given by: + * flagByteAddress = + * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - + * (thingAddress & GC_ARENA_MASK) / thingSize + * where + * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) + * is the last byte of flags' area and + * (thingAddress & GC_ARENA_MASK) / thingSize + * is thing's index counting from arena's start. * - * Here's some ASCII art showing an arena: + * Things are allocated from the start of their area and flags are allocated + * from the end of their area. This avoids calculating the location of the + * boundary separating things and flags. * - * split or the first 1-K aligned address. - * | - * V - * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+ - * |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA | - * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+ - * ^ ^ - * tI ---------+ | - * tJ -------------------------------------------+ + * JS_GC_USE_MMAP macros governs the allocation of aligned arenas. When the + * macro is true, a platform specific allocation code like POSIX mmap is used + * with no extra overhead. If the macro is false, the code uses malloc to + * allocate a chunk of + * GC_ARENA_SIZE * (js_gcArenasPerChunk + 1) + * bytes. The chunk contains at least js_gcArenasPerChunk aligned arenas so + * the overhead of this schema is approximately 1/js_gcArenasPerChunk. See + * NewGCChunk/DestroyGCChunk below for details. * - * - fB are the "before split" flags, fA are the "after split" flags - * - tp0-tp7 are the 8 thing pages - * - thing tI points into tp1, whose flags are below the split, in fB - * - thing tJ points into tp5, clearly above the split - * - * In general, one of the thing pages will have some of its things' flags on - * the low side of the split, and the rest of its things' flags on the high - * side. All the other pages have flags only below or only above. - * - * (If we need to implement card-marking for an incremental GC write barrier, - * we can replace word-sized offsetInArena in JSGCPageInfo by pair of - * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed - * GC_THINGS_SIZE. This would gives an extremely efficient write barrier: - * when mutating an object obj, just store a 1 byte at - * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.) + * Note that even when JS_GC_USE_MMAP is true the code still allocates arenas + * in chunks to minimize the overhead of mmap/munmap. */ -#define GC_PAGE_SHIFT 10 -#define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT)) -#define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT) -#define GC_PAGE_COUNT (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT)) - -typedef struct JSGCPageInfo { - jsuword offsetInArena; /* offset from the arena start */ - jsuword unscannedBitmap; /* bitset for fast search of marked - but not yet scanned GC things */ -} JSGCPageInfo; - -struct JSGCArena { - JSGCArenaList *list; /* allocation list for the arena */ - JSGCArena *prev; /* link field for allocation list */ - JSGCArena *prevUnscanned; /* link field for the list of arenas - with marked but not yet scanned - things */ - jsuword unscannedPages; /* bitset for fast search of pages - with marked but not yet scanned - things */ - uint8 base[1]; /* things+flags allocation area */ -}; - -#define GC_ARENA_SIZE \ - (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE) - -#define FIRST_THING_PAGE(a) \ - (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK) - -#define PAGE_TO_ARENA(pi) \ - ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena \ - - offsetof(JSGCArena, base))) - -#define PAGE_INDEX(pi) \ - ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT)) - -#define THING_TO_PAGE(thing) \ - ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK)) /* - * Given a thing size n, return the size of the gap from the page start before - * the first thing. We know that any n not a power of two packs from - * the end of the page leaving at least enough room for one JSGCPageInfo, but - * not for another thing, at the front of the page (JS_ASSERTs below insist - * on this). - * - * This works because all allocations are a multiple of sizeof(JSGCThing) == - * sizeof(JSGCPageInfo) in size. + * When mmap is available, use the minimal known CPU page size as the size of + * GC arena. Otherwise use 1K arenas to minimize the overhead of the aligned + * allocation. */ -#define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n)) +#if JS_GC_USE_MMAP +# define GC_ARENA_SHIFT 12 +#else +# define GC_ARENA_SHIFT 10 +#endif + +#define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT)) +#define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT) + +struct JSGCArenaInfo { + /* + * Allocation list for the arena. + */ + JSGCArenaList *list; + + /* + * Pointer to the previous arena in a linked list. The arena can either + * belong to one of JSContext.gcArenaList lists or, when it does not have + * any allocated GC things, to the list of free arenas in the chunk with + * head stored in JSGCChunkInfo.lastFreeArena. + */ + JSGCArenaInfo *prev; + + /* + * A link field for the list of arenas with marked but not yet traced + * things. The field is encoded as arena's page to share the space with + * firstArena and arenaIndex fields. + */ + jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT; + + /* + * When firstArena is false, the index of arena in the chunk. When + * firstArena is true, the index of a free arena holding JSGCChunkInfo or + * NO_FREE_ARENAS if there are no free arenas in the chunk. + * + * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to + * access either of indexes. + */ + jsuword arenaIndex : GC_ARENA_SHIFT - 1; + + /* + * Flag indicating if the arena is the first in the chunk. + */ + jsuword firstArena : 1; + + /* + * Bitset for fast search of marked but not yet traced things. + */ + jsuword untracedThings; +}; + +/* + * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small + * as possible. The code does not rely on this check so if on a particular + * platform this does not compile, then, as a workaround, comment the assert + * out and submit a bug report. + */ +JS_STATIC_ASSERT(sizeof(JSGCArenaInfo) == 4 * sizeof(jsuword)); + +#define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1) + +/* + * All chunks that have at least one free arena are put on the doubly-linked + * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains + * the head of the chunk's free arena list together with the link fields for + * gcChunkList. + * + * The structure is stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX + * macro gives the index of this arena. When all arenas in the chunk are used, + * it is removed from the list and the index is set to NO_FREE_ARENAS + * indicating that the chunk is not on gcChunkList and has no JSGCChunkInfo + * available. + */ +struct JSGCChunkInfo { + JSGCChunkInfo **prevp; + JSGCChunkInfo *next; + JSGCArenaInfo *lastFreeArena; + uint32 numFreeArenas; +}; + +/* + * Even when mmap is available, its overhead may be too big so the final + * decision to use it is done at runtime. + */ +#if JS_GC_USE_MMAP +static uint32 js_gcArenasPerChunk = 0; +static JSBool js_gcUseMmap = JS_FALSE; +#else +# define js_gcArenasPerChunk 31 +#endif + +/* + * Macros to convert between JSGCArenaInfo, the start address of the arena and + * arena's page defined as (start address) >> GC_ARENA_SHIFT. + */ +#define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) + +#define IS_ARENA_INFO_ADDRESS(arena) \ + (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET) + +#define ARENA_START_TO_INFO(arenaStart) \ + (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \ + (JSGCArenaInfo *) ((arenaStart) + ARENA_INFO_OFFSET)) + +#define ARENA_INFO_TO_START(arena) \ + (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \ + (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK) + +#define ARENA_PAGE_TO_INFO(arenaPage) \ + (JS_ASSERT(arenaPage != 0), \ + JS_ASSERT(((arenaPage) >> (JS_BITS_PER_WORD - GC_ARENA_SHIFT)) == 0), \ + ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT)) + +#define ARENA_INFO_TO_PAGE(arena) \ + (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \ + ((jsuword) (arena) >> GC_ARENA_SHIFT)) + +#define GET_ARENA_INFO(chunk, index) \ + (JS_ASSERT((index) < js_gcArenasPerChunk), \ + ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT))) + +/* + * Macros to access/modify information about the chunk of GC arenas. + */ +#define GET_ARENA_CHUNK(arena, index) \ + (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \ + ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT)) + +#define GET_ARENA_INDEX(arena) \ + ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex) + +#define GET_CHUNK_INFO_INDEX(chunk) \ + ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex) + +#define SET_CHUNK_INFO_INDEX(chunk, index) \ + (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \ + (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index))) + +#define GET_CHUNK_INFO(chunk, infoIndex) \ + (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \ + JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \ + (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT))) + +#define CHUNK_INFO_TO_INDEX(ci) \ + GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci)) + +/* + * Macros for GC-thing operations. + */ +#define THINGS_PER_ARENA(thingSize) \ + ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U)) + +#define THING_TO_ARENA(thing) \ + ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \ + 1 - sizeof(JSGCArenaInfo))) + +#define THING_TO_INDEX(thing, thingSize) \ + ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize)) + +#define THING_FLAGS_END(arena) ((uint8 *)(arena)) + +#define THING_FLAGP(arena, thingIndex) \ + (JS_ASSERT((jsuword) (thingIndex) \ + < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \ + (uint8 *)(arena) - 1 - (thingIndex)) + +#define THING_TO_FLAGP(thing, thingSize) \ + THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize)) + +#define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp) + +#define FLAGP_TO_INDEX(flagp) \ + (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \ + (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK))) + +#define FLAGP_TO_THING(flagp, thingSize) \ + (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \ + (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \ + (void *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \ + (thingSize) * FLAGP_TO_INDEX(flagp))) #ifdef JS_THREADSAFE /* - * The maximum number of things to put to the local free list by taking + * The maximum number of things to put on the local free list by taking * several things from the global free list or from the tail of the last * allocated arena to amortize the cost of rt->gcLock. * @@ -233,8 +345,6 @@ struct JSGCArena { #endif -JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo)); -JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE); JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval)); JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString)); @@ -414,63 +524,217 @@ ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info, # define METER(x) ((void) 0) #endif -static JSBool -NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList) +/* + * For chunks allocated via over-sized malloc, get a pointer to store the gap + * between the malloc's result and the first arena in the chunk. + */ +static uint32 * +GetMallocedChunkGapPtr(jsuword chunk) { - JSGCArena *a; - jsuword offset; - JSGCPageInfo *pi; + JS_ASSERT((chunk & GC_ARENA_MASK) == 0); - /* Check if we are allowed and can allocate a new arena. */ - if (rt->gcBytes >= rt->gcMaxBytes) - return JS_FALSE; - a = (JSGCArena *)malloc(GC_ARENA_SIZE); - if (!a) - return JS_FALSE; + /* Use the memory after the chunk, see NewGCChunk for details. */ + return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT)); +} - /* Initialize the JSGCPageInfo records at the start of every thing page. */ - offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK; - JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a)); - do { - pi = (JSGCPageInfo *) (a->base + offset); - pi->offsetInArena = offset; - pi->unscannedBitmap = 0; - offset += GC_PAGE_SIZE; - } while (offset < GC_THINGS_SIZE); +static jsuword +NewGCChunk() +{ + void *p; + jsuword chunk; - METER(++arenaList->stats.narenas); - METER(arenaList->stats.maxarenas - = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas)); +#if JS_GC_USE_MMAP + if (js_gcUseMmap) { +# if defined(XP_WIN) + p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + return (jsuword) p; +# elif defined(XP_UNIX) || defined(XP_BEOS) + p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + return (p == MAP_FAILED) ? 0 : (jsuword) p; +# else +# error "Not implemented" +# endif + } +#endif - a->list = arenaList; - a->prev = arenaList->last; - a->prevUnscanned = NULL; - a->unscannedPages = 0; - arenaList->last = a; - arenaList->lastLimit = 0; - rt->gcBytes += GC_ARENA_SIZE; - return JS_TRUE; + /* + * Implement the chunk allocation using over sized malloc if mmap cannot + * be used. FIXME bug 396007: the code should use posix_memalign when it + * is available. + * + * Since malloc allocates pointers aligned on the word boundary, to get + * js_gcArenasPerChunk aligned arenas, we need to malloc only + * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t) + * bytes. But since we stores the gap between the malloced pointer and the + * first arena in the chunk after the chunk, we need to ask for + * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) + * bytes to ensure that we always have room to store the gap. + */ + p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); + if (!p) + return 0; + chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK; + *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p); + return chunk; } static void -DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap) +DestroyGCChunk(jsuword chunk) { - JSGCArena *a; + JS_ASSERT((chunk & GC_ARENA_MASK) == 0); +#if JS_GC_USE_MMAP + if (js_gcUseMmap) { +# if defined(XP_WIN) + VirtualFree((void *) chunk, 0, MEM_RELEASE); +# elif defined(XP_UNIX) || defined(XP_BEOS) + munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT); +# else +# error "Not implemented" +# endif + return; + } +#endif + + /* See comments in NewGCChunk. */ + JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE); + free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); +} + +static void +AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci) +{ + ci->prevp = &rt->gcChunkList; + ci->next = rt->gcChunkList; + if (rt->gcChunkList) { + JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList); + rt->gcChunkList->prevp = &ci->next; + } + rt->gcChunkList = ci; +} + +static void +RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci) +{ + *ci->prevp = ci->next; + if (ci->next) { + JS_ASSERT(ci->next->prevp == &ci->next); + ci->next->prevp = ci->prevp; + } +} + +static JSGCArenaInfo * +NewGCArena(JSRuntime *rt) +{ + jsuword chunk; + JSGCChunkInfo *ci; + uint32 i; + JSGCArenaInfo *a, *aprev; + + if (js_gcArenasPerChunk == 1) { + chunk = NewGCChunk(); + return (chunk == 0) ? NULL : ARENA_START_TO_INFO(chunk); + } + + ci = rt->gcChunkList; + if (!ci) { + chunk = NewGCChunk(); + if (chunk == 0) + return NULL; + JS_ASSERT((chunk & GC_ARENA_MASK) == 0); + a = GET_ARENA_INFO(chunk, 0); + a->firstArena = JS_TRUE; + a->arenaIndex = 0; + aprev = NULL; + i = 0; + do { + a->prev = aprev; + aprev = a; + ++i; + a = GET_ARENA_INFO(chunk, i); + a->firstArena = JS_FALSE; + a->arenaIndex = i; + } while (i != js_gcArenasPerChunk - 1); + ci = GET_CHUNK_INFO(chunk, 0); + ci->lastFreeArena = aprev; + ci->numFreeArenas = js_gcArenasPerChunk - 1; + AddChunkToList(rt, ci); + } else { + JS_ASSERT(ci->prevp == &rt->gcChunkList); + a = ci->lastFreeArena; + aprev = a->prev; + if (!aprev) { + JS_ASSERT(ci->numFreeArenas == 1); + JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci); + RemoveChunkFromList(rt, ci); + chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a)); + SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS); + } else { + JS_ASSERT(ci->numFreeArenas >= 2); + JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci); + ci->lastFreeArena = aprev; + ci->numFreeArenas--; + } + } + + return a; +} + +static void +DestroyGCArena(JSRuntime *rt, JSGCArenaInfo *a) +{ + uint32 arenaIndex; + jsuword chunk; + uint32 chunkInfoIndex; + JSGCChunkInfo *ci; - a = *ap; - JS_ASSERT(a); - JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE); - rt->gcBytes -= GC_ARENA_SIZE; METER(rt->gcStats.afree++); - METER(--arenaList->stats.narenas); - if (a == arenaList->last) - arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0); - *ap = a->prev; + + if (js_gcArenasPerChunk == 1) { + DestroyGCChunk(ARENA_INFO_TO_START(a)); + return; + } #ifdef DEBUG - memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE); + { + jsuword firstArena, arenaIndex; + + firstArena = a->firstArena; + arenaIndex = a->arenaIndex; + memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN, + GC_ARENA_SIZE); + a->firstArena = firstArena; + a->arenaIndex = arenaIndex; + } #endif - free(a); + + arenaIndex = GET_ARENA_INDEX(a); + chunk = GET_ARENA_CHUNK(a, arenaIndex); + chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk); + if (chunkInfoIndex == NO_FREE_ARENAS) { + chunkInfoIndex = arenaIndex; + SET_CHUNK_INFO_INDEX(chunk, arenaIndex); + ci = GET_CHUNK_INFO(chunk, chunkInfoIndex); + a->prev = NULL; + ci->lastFreeArena = a; + ci->numFreeArenas = 1; + AddChunkToList(rt, ci); + } else { + JS_ASSERT(chunkInfoIndex != arenaIndex); + ci = GET_CHUNK_INFO(chunk, chunkInfoIndex); + JS_ASSERT(ci->numFreeArenas != 0); + JS_ASSERT(ci->lastFreeArena); + JS_ASSERT(a != ci->lastFreeArena); + if (ci->numFreeArenas == js_gcArenasPerChunk - 1) { + RemoveChunkFromList(rt, ci); + DestroyGCChunk(chunk); + } else { + ++ci->numFreeArenas; + a->prev = ci->lastFreeArena; + ci->lastFreeArena = a; + } + } } static void @@ -484,7 +748,7 @@ InitGCArenaLists(JSRuntime *rt) thingSize = GC_FREELIST_NBYTES(i); JS_ASSERT((size_t)(uint16)thingSize == thingSize); arenaList->last = NULL; - arenaList->lastLimit = 0; + arenaList->lastCount = THINGS_PER_ARENA(thingSize); arenaList->thingSize = (uint16)thingSize; arenaList->freeList = NULL; METER(memset(&arenaList->stats, 0, sizeof arenaList->stats)); @@ -496,40 +760,41 @@ FinishGCArenaLists(JSRuntime *rt) { uintN i; JSGCArenaList *arenaList; + JSGCArenaInfo *a, *aprev; for (i = 0; i < GC_NUM_FREELISTS; i++) { arenaList = &rt->gcArenaList[i]; - while (arenaList->last) - DestroyGCArena(rt, arenaList, &arenaList->last); + + for (a = arenaList->last; a; a = aprev) { + aprev = a->prev; + DestroyGCArena(rt, a); + } + arenaList->last = NULL; + arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize); arenaList->freeList = NULL; + METER(arenaList->stats.narenas = 0); } + rt->gcBytes = 0; + JS_ASSERT(rt->gcChunkList == 0); } JS_FRIEND_API(uint8 *) js_GetGCThingFlags(void *thing) { - JSGCPageInfo *pi; - jsuword offsetInArena, thingIndex; + JSGCArenaInfo *a; + uint32 index; - pi = THING_TO_PAGE(thing); - offsetInArena = pi->offsetInArena; - JS_ASSERT(offsetInArena < GC_THINGS_SIZE); - thingIndex = ((offsetInArena & ~GC_PAGE_MASK) | - ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing); - JS_ASSERT(thingIndex < GC_PAGE_SIZE); - if (thingIndex >= (offsetInArena & GC_PAGE_MASK)) - thingIndex += GC_THINGS_SIZE; - return (uint8 *)pi - offsetInArena + thingIndex; + a = THING_TO_ARENA(thing); + index = THING_TO_INDEX(thing, a->list->thingSize); + return THING_FLAGP(a, index); } JSRuntime* js_GetGCStringRuntime(JSString *str) { - JSGCPageInfo *pi; JSGCArenaList *list; - pi = THING_TO_PAGE(str); - list = PAGE_TO_ARENA(pi)->list; + list = THING_TO_ARENA(str)->list; JS_ASSERT(list->thingSize == sizeof(JSGCThing)); JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0); @@ -623,11 +888,59 @@ typedef struct JSGCRootHashEntry { /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */ #define GC_ROOTS_SIZE 256 -#define GC_FINALIZE_LEN 1024 + +/* + * For a CPU with extremely large pages using them for GC things wastes + * too much memory. + */ +#define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT) + +JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS); JSBool js_InitGC(JSRuntime *rt, uint32 maxbytes) { +#if JS_GC_USE_MMAP + if (js_gcArenasPerChunk == 0) { + size_t cpuPageSize, arenasPerPage; +# if defined(XP_WIN) + SYSTEM_INFO si; + + GetSystemInfo(&si); + cpuPageSize = si.dwPageSize; + +# elif defined(XP_UNIX) || defined(XP_BEOS) + cpuPageSize = (size_t) sysconf(_SC_PAGESIZE); +# else +# error "Not implemented" +# endif + /* cpuPageSize is a power of 2. */ + JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0); + arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT; +#ifdef DEBUG + if (arenasPerPage == 0) { + fprintf(stderr, +"JS engine warning: the size of the CPU page, %u bytes, is too low to use\n" +"paged allocation for the garbage collector. Please report this.\n", + (unsigned) cpuPageSize); + } +#endif + if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) { + /* + * Use at least 4 GC arenas per paged allocation chunk to minimize + * the overhead of mmap/VirtualAlloc. + */ + js_gcUseMmap = JS_TRUE; + js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4); + } else { + js_gcUseMmap = JS_FALSE; + js_gcArenasPerChunk = 7; + } + } +#endif + JS_ASSERT(1 <= js_gcArenasPerChunk && + js_gcArenasPerChunk <= NO_FREE_ARENAS); + InitGCArenaLists(rt); if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL, sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) { @@ -650,6 +963,7 @@ JS_FRIEND_API(void) js_DumpGCStats(JSRuntime *rt, FILE *fp) { uintN i; + size_t thingsPerArena; size_t totalThings, totalMaxThings, totalBytes; size_t sumArenas, sumTotalArenas; size_t sumFreeSize, sumTotalFreeSize; @@ -673,6 +987,7 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp) i, UL(GC_FREELIST_NBYTES(i))); continue; } + thingsPerArena = THINGS_PER_ARENA(list->thingSize); fprintf(fp, "ARENA LIST %u (thing size %lu):\n", i, UL(GC_FREELIST_NBYTES(i))); fprintf(fp, " arenas: %lu\n", UL(stats->narenas)); @@ -683,20 +998,18 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp) fprintf(fp, " free list density: %.1f%%\n", stats->narenas == 0 ? 0.0 - : (100.0 * list->thingSize * (jsdouble)stats->freelen / - (GC_THINGS_SIZE * (jsdouble)stats->narenas))); + : 100.0 * stats->freelen / (thingsPerArena * stats->narenas)); fprintf(fp, " average free list density: %.1f%%\n", stats->totalarenas == 0 ? 0.0 - : (100.0 * list->thingSize * (jsdouble)stats->totalfreelen / - (GC_THINGS_SIZE * (jsdouble)stats->totalarenas))); + : 100.0 * stats->totalfreelen / + (thingsPerArena * stats->totalarenas)); fprintf(fp, " recycles: %lu\n", UL(stats->recycle)); fprintf(fp, " recycle/alloc ratio: %.2f\n", - (jsdouble)stats->recycle / - (jsdouble)(stats->totalnew - stats->recycle)); + (double) stats->recycle / (stats->totalnew - stats->recycle)); totalThings += stats->nthings; totalMaxThings += stats->maxthings; - totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings; + totalBytes += list->thingSize * stats->nthings; sumArenas += stats->narenas; sumTotalArenas += stats->totalarenas; sumFreeSize += list->thingSize * stats->freelen; @@ -715,11 +1028,11 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp) fprintf(fp, " total free list density: %.1f%%\n", sumArenas == 0 ? 0.0 - : 100.0 * sumFreeSize / (GC_THINGS_SIZE * (jsdouble)sumArenas)); + : 100.0 * sumFreeSize / (sumArenas << GC_ARENA_SHIFT)); fprintf(fp, " average free list density: %.1f%%\n", sumTotalFreeSize == 0 ? 0.0 - : 100.0 * sumTotalFreeSize / (GC_THINGS_SIZE * sumTotalArenas)); + : 100.0 * sumTotalFreeSize / (sumTotalArenas << GC_ARENA_SHIFT)); fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry)); fprintf(fp, " allocation failures: %lu\n", ULSTAT(fail)); fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn)); @@ -729,9 +1042,9 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp) fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth)); fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth)); fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth)); - fprintf(fp, " delayed scan bag adds: %lu\n", ULSTAT(unscanned)); + fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced)); #ifdef DEBUG - fprintf(fp, " max delayed scan bag size: %lu\n", ULSTAT(maxunscanned)); + fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced)); #endif fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel)); fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke)); @@ -1031,10 +1344,10 @@ js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes) uintN flindex; JSBool doGC; JSGCThing *thing; - uint8 *flagp, *firstPage; + uint8 *flagp; JSGCArenaList *arenaList; - jsuword offset; - JSGCArena *a; + JSGCArenaInfo *a; + uintN thingsLimit; JSLocalRootStack *lrs; #ifdef JS_THREADSAFE JSBool gcLocked; @@ -1145,67 +1458,71 @@ js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes) break; } - /* Allocate from the tail of last arena or from new arena if we can. */ - if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) || - NewGCArena(rt, arenaList)) { - - offset = arenaList->lastLimit; - if ((offset & GC_PAGE_MASK) == 0) { - /* - * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary. - */ - offset += PAGE_THING_GAP(nbytes); - } - JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE); - arenaList->lastLimit = (uint16)(offset + nbytes); + /* + * Try to allocate things from the last arena. If it is fully used, + * check if we can allocate a new one and, if we cannot, consider + * doing a "last ditch" GC unless already tried. + */ + thingsLimit = THINGS_PER_ARENA(nbytes); + if (arenaList->lastCount != thingsLimit) { + JS_ASSERT(arenaList->lastCount < thingsLimit); a = arenaList->last; - firstPage = (uint8 *)FIRST_THING_PAGE(a); - thing = (JSGCThing *)(firstPage + offset); - flagp = a->base + offset / sizeof(JSGCThing); - if (flagp >= firstPage) - flagp += GC_THINGS_SIZE; - -#ifdef JS_THREADSAFE - /* - * Refill the local free list by taking free things from the last - * arena. Prefer to order free things by ascending address in the - * (unscientific) hope of better cache locality. - */ - if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex]) - break; - METER(nfree = 0); - lastptr = &flbase[flindex]; - maxFreeThings = MAX_THREAD_LOCAL_THINGS; - for (offset = arenaList->lastLimit; - offset != GC_THINGS_SIZE && maxFreeThings-- != 0; - offset += nbytes) { - if ((offset & GC_PAGE_MASK) == 0) - offset += PAGE_THING_GAP(nbytes); - JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE); - tmpflagp = a->base + offset / sizeof(JSGCThing); - if (tmpflagp >= firstPage) - tmpflagp += GC_THINGS_SIZE; - - tmpthing = (JSGCThing *)(firstPage + offset); - tmpthing->flagp = tmpflagp; - *tmpflagp = GCF_FINAL; /* signifying that thing is free */ - - *lastptr = tmpthing; - lastptr = &tmpthing->next; - METER(++nfree); + } else { + if (rt->gcBytes >= rt->gcMaxBytes || !(a = NewGCArena(rt))) { + if (doGC) + goto fail; + rt->gcPoke = JS_TRUE; + doGC = JS_TRUE; + continue; } - arenaList->lastLimit = (uint16)offset; - *lastptr = NULL; - METER(arenaList->stats.freelen += nfree); -#endif - break; + + rt->gcBytes += GC_ARENA_SIZE; + METER(++arenaList->stats.narenas); + METER(arenaList->stats.maxarenas + = JS_MAX(arenaList->stats.maxarenas, + arenaList->stats.narenas)); + + a->list = arenaList; + a->prev = arenaList->last; + a->prevUntracedPage = 0; + a->untracedThings = 0; + arenaList->last = a; + arenaList->lastCount = 0; } - /* Consider doing a "last ditch" GC unless already tried. */ - if (doGC) - goto fail; - rt->gcPoke = JS_TRUE; - doGC = JS_TRUE; + flagp = THING_FLAGP(a, arenaList->lastCount); + thing = (JSGCThing *) FLAGP_TO_THING(flagp, nbytes); + arenaList->lastCount++; + +#ifdef JS_THREADSAFE + /* + * Refill the local free list by taking free things from the last + * arena. Prefer to order free things by ascending address in the + * (unscientific) hope of better cache locality. + */ + if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex]) + break; + METER(nfree = 0); + lastptr = &flbase[flindex]; + maxFreeThings = thingsLimit - arenaList->lastCount; + if (maxFreeThings > MAX_THREAD_LOCAL_THINGS) + maxFreeThings = MAX_THREAD_LOCAL_THINGS; + METER(arenaList->stats.freelen += maxFreeThings); + while (maxFreeThings != 0) { + --maxFreeThings; + + tmpflagp = THING_FLAGP(a, arenaList->lastCount); + tmpthing = (JSGCThing *) FLAGP_TO_THING(tmpflagp, nbytes); + arenaList->lastCount++; + tmpthing->flagp = tmpflagp; + *tmpflagp = GCF_FINAL; /* signifying that thing is free */ + + *lastptr = tmpthing; + lastptr = &tmpthing->next; + } + *lastptr = NULL; +#endif + break; } /* We successfully allocated the thing. */ @@ -1473,234 +1790,171 @@ JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind) } /* - * Avoid using PAGE_THING_GAP inside this macro to optimize the - * thingsPerUnscannedChunk calculation when thingSize is a power of two. + * Number of things covered by a single bit of JSGCArenaInfo.untracedThings. */ -#define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap) \ - JS_BEGIN_MACRO \ - if (0 == ((thingSize) & ((thingSize) - 1))) { \ - pageGap = (thingSize); \ - thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize)) \ - + JS_BITS_PER_WORD - 1) \ - >> JS_BITS_PER_WORD_LOG2; \ - } else { \ - pageGap = GC_PAGE_SIZE % (thingSize); \ - thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize), \ - JS_BITS_PER_WORD); \ - } \ - JS_END_MACRO +#define THINGS_PER_UNTRACED_BIT(thingSize) \ + JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD) static void -AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp) +DelayTracingChildren(JSRuntime *rt, uint8 *flagp) { - JSGCPageInfo *pi; - JSGCArena *arena; - size_t thingSize; - size_t thingsPerUnscannedChunk; - size_t pageGap; - size_t chunkIndex; + JSGCArenaInfo *a; + uint32 untracedBitIndex; jsuword bit; - /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */ + /* + * Things with children to be traced later are marked with + * GCF_MARK | GCF_FINAL flags. + */ JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK); *flagp |= GCF_FINAL; - METER(rt->gcStats.unscanned++); + METER(rt->gcStats.untraced++); #ifdef DEBUG - ++rt->gcUnscannedBagSize; - METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned) - rt->gcStats.maxunscanned = rt->gcUnscannedBagSize); + ++rt->gcTraceLaterCount; + METER(if (rt->gcTraceLaterCount > rt->gcStats.maxuntraced) + rt->gcStats.maxuntraced = rt->gcTraceLaterCount); #endif - pi = THING_TO_PAGE(thing); - arena = PAGE_TO_ARENA(pi); - thingSize = arena->list->thingSize; - GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap); - chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) / - (thingSize * thingsPerUnscannedChunk); - JS_ASSERT(chunkIndex < JS_BITS_PER_WORD); - bit = (jsuword)1 << chunkIndex; - if (pi->unscannedBitmap != 0) { - JS_ASSERT(rt->gcUnscannedArenaStackTop); - if (thingsPerUnscannedChunk != 1) { - if (pi->unscannedBitmap & bit) { - /* Chunk already contains things to scan later. */ - return; - } - } else { - /* - * The chunk must not contain things to scan later if there is - * only one thing per chunk. - */ - JS_ASSERT(!(pi->unscannedBitmap & bit)); + a = FLAGP_TO_ARENA(flagp); + untracedBitIndex = FLAGP_TO_INDEX(flagp) / + THINGS_PER_UNTRACED_BIT(a->list->thingSize); + JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD); + bit = (jsuword)1 << untracedBitIndex; + if (a->untracedThings != 0) { + JS_ASSERT(rt->gcUntracedArenaStackTop); + if (a->untracedThings & bit) { + /* bit already covers things with children to trace later. */ + return; } - pi->unscannedBitmap |= bit; - JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi))); + a->untracedThings |= bit; } else { /* - * The thing is the first unscanned thing in the page, set the bit - * corresponding to this page arena->unscannedPages. + * The thing is the first thing with not yet traced children in the + * whole arena, so push the arena on the stack of arenas with things + * to be traced later unless the arena has already been pushed. We + * detect that through checking prevUntracedPage as the field is 0 + * only for not yet pushed arenas. To ensure that + * prevUntracedPage != 0 + * even when the stack contains one element, we make prevUntracedPage + * for the arena at the bottom to point to itself. + * + * See comments in TraceDelayedChildren. */ - pi->unscannedBitmap = bit; - JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD); - bit = (jsuword)1 << PAGE_INDEX(pi); - JS_ASSERT(!(arena->unscannedPages & bit)); - if (arena->unscannedPages != 0) { - arena->unscannedPages |= bit; - JS_ASSERT(arena->prevUnscanned); - JS_ASSERT(rt->gcUnscannedArenaStackTop); - } else { - /* - * The thing is the first unscanned thing in the whole arena, push - * the arena on the stack of unscanned arenas unless the arena - * has already been pushed. We detect that through prevUnscanned - * field which is NULL only for not yet pushed arenas. To ensure - * that prevUnscanned != NULL even when the stack contains one - * element, we make prevUnscanned for the arena at the bottom - * to point to itself. - * - * See comments in ScanDelayedChildren. - */ - arena->unscannedPages = bit; - if (!arena->prevUnscanned) { - if (!rt->gcUnscannedArenaStackTop) { - /* Stack was empty, mark the arena as bottom element. */ - arena->prevUnscanned = arena; - } else { - JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned); - arena->prevUnscanned = rt->gcUnscannedArenaStackTop; - } - rt->gcUnscannedArenaStackTop = arena; + a->untracedThings = bit; + if (a->prevUntracedPage == 0) { + if (!rt->gcUntracedArenaStackTop) { + /* Stack was empty, mark the arena as the bottom element. */ + a->prevUntracedPage = ARENA_INFO_TO_PAGE(a); + } else { + JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0); + a->prevUntracedPage = + ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop); } - } - } - JS_ASSERT(rt->gcUnscannedArenaStackTop); + rt->gcUntracedArenaStackTop = a; + } + } + JS_ASSERT(rt->gcUntracedArenaStackTop); } static void -ScanDelayedChildren(JSTracer *trc) +TraceDelayedChildren(JSTracer *trc) { JSRuntime *rt; - JSGCArena *arena; - size_t thingSize; - size_t thingsPerUnscannedChunk; - size_t pageGap; - size_t pageIndex; - JSGCPageInfo *pi; - size_t chunkIndex; - size_t thingOffset, thingLimit; + JSGCArenaInfo *a, *aprev; + uint32 thingSize; + uint32 thingsPerUntracedBit; + uint32 untracedBitIndex, thingIndex, indexLimit, endIndex; JSGCThing *thing; uint8 *flagp; - JSGCArena *prevArena; rt = trc->context->runtime; - arena = rt->gcUnscannedArenaStackTop; - if (!arena) { - JS_ASSERT(rt->gcUnscannedBagSize == 0); + a = rt->gcUntracedArenaStackTop; + if (!a) { + JS_ASSERT(rt->gcTraceLaterCount == 0); return; } - init_size: - thingSize = arena->list->thingSize; - GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap); for (;;) { /* - * The following assert verifies that the current arena belongs to - * the unscan stack since AddThingToUnscannedBag ensures that even - * for stack's bottom prevUnscanned != NULL but rather points to self. + * The following assert verifies that the current arena belongs to the + * untraced stack, since DelayTracingChildren ensures that even for + * stack's bottom prevUntracedPage != 0 but rather points to itself. */ - JS_ASSERT(arena->prevUnscanned); - JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned); - while (arena->unscannedPages != 0) { - pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages); - JS_ASSERT(pageIndex < GC_PAGE_COUNT); - pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) + - pageIndex * GC_PAGE_SIZE); - JS_ASSERT(pi->unscannedBitmap); - chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap); - pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex); - if (pi->unscannedBitmap == 0) - arena->unscannedPages &= ~((jsuword)1 << pageIndex); - thingOffset = (pageGap - + chunkIndex * thingsPerUnscannedChunk * thingSize); - JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo)); - thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize; - if (thingsPerUnscannedChunk != 1) { - /* - * thingLimit can go beyond the last allocated thing for the - * last chunk as the real limit can be inside the chunk. - */ - if (arena->list->last == arena && - arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE + - thingLimit)) { - thingLimit = (arena->list->lastLimit - - pageIndex * GC_PAGE_SIZE); - } else if (thingLimit > GC_PAGE_SIZE) { - thingLimit = GC_PAGE_SIZE; - } - JS_ASSERT(thingLimit > thingOffset); - } - JS_ASSERT(arena->list->last != arena || - arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE + - thingLimit)); - JS_ASSERT(thingLimit <= GC_PAGE_SIZE); + JS_ASSERT(a->prevUntracedPage != 0); + JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0); + thingSize = a->list->thingSize; + indexLimit = (a == a->list->last) + ? a->list->lastCount + : THINGS_PER_ARENA(thingSize); + thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize); - for (; thingOffset != thingLimit; thingOffset += thingSize) { + /* + * We can not use do-while loop here as a->untracedThings can be zero + * before the loop as a leftover from the previous iterations. See + * comments after the loop. + */ + while (a->untracedThings != 0) { + untracedBitIndex = JS_FLOOR_LOG2W(a->untracedThings); + a->untracedThings &= ~((jsuword)1 << untracedBitIndex); + thingIndex = untracedBitIndex * thingsPerUntracedBit; + endIndex = thingIndex + thingsPerUntracedBit; + + /* + * endIndex can go beyond the last allocated thing as the real + * limit can be "inside" the bit. + */ + if (endIndex > indexLimit) + endIndex = indexLimit; + JS_ASSERT(thingIndex < indexLimit); + + do { /* - * XXX: inline js_GetGCThingFlags() to use already available - * pi. + * Skip free or already traced things that share the bit + * with untraced ones. */ - thing = (JSGCThing *)((jsuword)pi + thingOffset); - flagp = js_GetGCThingFlags(thing); - if (thingsPerUnscannedChunk != 1) { - /* - * Skip free or already scanned things that share the chunk - * with unscanned ones. - */ - if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL)) - continue; - } - JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL)) - == (GCF_MARK|GCF_FINAL)); + flagp = THING_FLAGP(a, thingIndex); + if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL)) + continue; *flagp &= ~GCF_FINAL; #ifdef DEBUG - JS_ASSERT(rt->gcUnscannedBagSize != 0); - --rt->gcUnscannedBagSize; + JS_ASSERT(rt->gcTraceLaterCount != 0); + --rt->gcTraceLaterCount; #endif + thing = FLAGP_TO_THING(flagp, thingSize); JS_TraceChildren(trc, thing, GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK]); - } + } while (++thingIndex != endIndex); } + /* - * We finished scanning of the arena but we can only pop it from - * the stack if the arena is the stack's top. + * We finished tracing of all things in the the arena but we can only + * pop it from the stack if the arena is the stack's top. * - * When JS_TraceChildren from the above calls JS_Trace that in turn - * on low C stack calls AddThingToUnscannedBag and the latter pushes - * new arenas to the unscanned stack, we have to skip popping of this - * arena until it becomes the top of the stack again. + * When JS_TraceChildren from the above calls JS_CallTracer that in + * turn on low C stack calls DelayTracingChildren and the latter + * pushes new arenas to the untraced stack, we have to skip popping + * of this arena until it becomes the top of the stack again. */ - if (arena == rt->gcUnscannedArenaStackTop) { - prevArena = arena->prevUnscanned; - arena->prevUnscanned = NULL; - if (arena == prevArena) { + if (a == rt->gcUntracedArenaStackTop) { + aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage); + a->prevUntracedPage = 0; + if (a == aprev) { /* - * prevUnscanned points to itself and we reached the bottom - * of the stack. + * prevUntracedPage points to itself and we reached the + * bottom of the stack. */ break; } - rt->gcUnscannedArenaStackTop = arena = prevArena; + rt->gcUntracedArenaStackTop = a = aprev; } else { - arena = rt->gcUnscannedArenaStackTop; + a = rt->gcUntracedArenaStackTop; } - if (arena->list->thingSize != thingSize) - goto init_size; } - JS_ASSERT(rt->gcUnscannedArenaStackTop); - JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned); - rt->gcUnscannedArenaStackTop = NULL; - JS_ASSERT(rt->gcUnscannedBagSize == 0); + JS_ASSERT(rt->gcUntracedArenaStackTop); + JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0); + rt->gcUntracedArenaStackTop = NULL; + JS_ASSERT(rt->gcTraceLaterCount == 0); } JS_PUBLIC_API(void) @@ -1724,8 +1978,42 @@ JS_CallTracer(JSTracer *trc, void *thing, uint32 kind) JS_ASSERT(rt->gcMarkingTracer == trc); JS_ASSERT(rt->gcLevel > 0); + /* + * Optimize for string and double as their size is known and their tracing + * is not recursive. + */ + switch (kind) { + case JSTRACE_DOUBLE: + flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing)); + JS_ASSERT((*flagp & GCF_FINAL) == 0); + JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind); + if (rt->gcThingCallback) + rt->gcThingCallback(thing, *flagp, rt->gcThingCallbackClosure); + + *flagp |= GCF_MARK; + goto out; + + case JSTRACE_STRING: + for (;;) { + flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing)); + JS_ASSERT((*flagp & GCF_FINAL) == 0); + JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind); + if (rt->gcThingCallback) + rt->gcThingCallback(thing, *flagp, rt->gcThingCallbackClosure); + + if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) { + *flagp |= GCF_MARK; + goto out; + } + if (*flagp & GCF_MARK) + goto out; + *flagp |= GCF_MARK; + thing = JSSTRDEP_BASE((JSString *) thing); + } + /* NOTREACHED */ + } + flagp = js_GetGCThingFlags(thing); - JS_ASSERT(*flagp != GCF_FINAL); JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind); if (rt->gcThingCallback) @@ -1733,8 +2021,13 @@ JS_CallTracer(JSTracer *trc, void *thing, uint32 kind) if (*flagp & GCF_MARK) goto out; - *flagp |= GCF_MARK; + /* + * We check for non-final flag only if mark is unset as + * DelayTracingChildren uses the flag. See comments in the function. + */ + JS_ASSERT(*flagp != GCF_FINAL); + *flagp |= GCF_MARK; if (!cx->insideGCMarkCallback) { /* * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always @@ -1748,28 +2041,28 @@ JS_CallTracer(JSTracer *trc, void *thing, uint32 kind) # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy)) #endif if (RECURSION_TOO_DEEP()) - AddThingToUnscannedBag(rt, thing, flagp); + DelayTracingChildren(rt, flagp); else JS_TraceChildren(trc, thing, kind); } else { /* * For API compatibility we allow for the callback to assume that - * after it calls JS_Trace or JS_MarkGCThing for the last time, the - * callback can start to finalize its own objects that are only - * referenced by unmarked GC things. + * after it calls JS_MarkGCThing for the last time, the callback can + * start to finalize its own objects that are only referenced by + * unmarked GC things. * * Since we do not know which call from inside the callback is the - * last, we ensure that the unscanned bag is always empty when we - * return to the callback and all marked things are scanned. + * last, we ensure that children of all marked things are traced and + * call TraceDelayedChildren(trc) after tracing the thing. * - * We do not check for the stack size here and uncondinally call - * JS_TraceChildren. Otherwise with low C stack the thing would be - * pushed to the bag just to be feed again to JS_TraceChildren from - * inside ScanDelayedChildren. + * As TraceDelayedChildren unconditionally invokes JS_TraceChildren + * for the things with untraced children, calling DelayTracingChildren + * is useless here. Hence we always trace thing's children even with a + * low native stack. */ cx->insideGCMarkCallback = JS_FALSE; JS_TraceChildren(trc, thing, kind); - ScanDelayedChildren(trc); + TraceDelayedChildren(trc); cx->insideGCMarkCallback = JS_TRUE; } @@ -1816,18 +2109,20 @@ gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, jsuword thing = (jsuword) JSVAL_TO_GCTHING(v); uintN i; JSGCArenaList *arenaList; - JSGCArena *a; + uint32 thingSize; + JSGCArenaInfo *a; size_t limit; for (i = 0; i < GC_NUM_FREELISTS; i++) { arenaList = &trc->context->runtime->gcArenaList[i]; - limit = arenaList->lastLimit; + thingSize = arenaList->thingSize; + limit = (size_t) arenaList->lastCount * thingSize; for (a = arenaList->last; a; a = a->prev) { - if (thing - FIRST_THING_PAGE(a) < limit) { + if (thing - ARENA_INFO_TO_START(a) < limit) { root_points_to_gcArenaList = JS_TRUE; break; } - limit = GC_THINGS_SIZE; + limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize; } } if (!root_points_to_gcArenaList && rhe->name) { @@ -2109,9 +2404,9 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) JSBool keepAtoms; uintN i, type; JSTracer trc; - size_t nbytes, limit, offset; - JSGCArena *a, **ap; - uint8 flags, *flagp, *firstPage; + uint32 thingSize, indexLimit; + JSGCArenaInfo *a, **ap; + uint8 flags, *flagp; JSGCThing *thing, *freeList; JSGCArenaList *arenaList; GCFinalizeOp finalizer; @@ -2309,8 +2604,8 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) restart: rt->gcNumber++; - JS_ASSERT(!rt->gcUnscannedArenaStackTop); - JS_ASSERT(rt->gcUnscannedBagSize == 0); + JS_ASSERT(!rt->gcUntracedArenaStackTop); + JS_ASSERT(rt->gcTraceLaterCount == 0); /* * Mark phase. @@ -2325,7 +2620,7 @@ restart: * Mark children of things that caused too deep recursion during the above * tracing. */ - ScanDelayedChildren(&trc); + TraceDelayedChildren(&trc); JS_ASSERT(!cx->insideGCMarkCallback); if (rt->gcCallback) { @@ -2334,7 +2629,7 @@ restart: JS_ASSERT(cx->insideGCMarkCallback); cx->insideGCMarkCallback = JS_FALSE; } - JS_ASSERT(rt->gcUnscannedBagSize == 0); + JS_ASSERT(rt->gcTraceLaterCount == 0); rt->gcMarkingTracer = NULL; @@ -2382,22 +2677,18 @@ restart: : i == GC_FREELIST_INDEX(sizeof(JSObject)) ? 0 : i]; - nbytes = arenaList->thingSize; - limit = arenaList->lastLimit; - for (a = arenaList->last; a; a = a->prev) { - JS_ASSERT(!a->prevUnscanned); - JS_ASSERT(a->unscannedPages == 0); - firstPage = (uint8 *) FIRST_THING_PAGE(a); - for (offset = 0; offset != limit; offset += nbytes) { - if ((offset & GC_PAGE_MASK) == 0) { - JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))-> - unscannedBitmap == 0); - offset += PAGE_THING_GAP(nbytes); - } - JS_ASSERT(offset < limit); - flagp = a->base + offset / sizeof(JSGCThing); - if (flagp >= firstPage) - flagp += GC_THINGS_SIZE; + a = arenaList->last; + if (!a) + continue; + + thingSize = arenaList->thingSize; + indexLimit = THINGS_PER_ARENA(thingSize); + JS_ASSERT(arenaList->lastCount > 0); + flagp = THING_FLAGP(a, arenaList->lastCount - 1); + for (;;) { + JS_ASSERT(a->prevUntracedPage == 0); + JS_ASSERT(a->untracedThings == 0); + do { flags = *flagp; if (flags & GCF_MARK) { *flagp &= ~GCF_MARK; @@ -2406,7 +2697,7 @@ restart: type = flags & GCF_TYPEMASK; finalizer = gc_finalizers[type]; if (finalizer) { - thing = (JSGCThing *)(firstPage + offset); + thing = (JSGCThing *) FLAGP_TO_THING(flagp, thingSize); *flagp = (uint8)(flags | GCF_FINAL); if (type >= GCX_EXTERNAL_STRING) js_PurgeDeflatedStringCache(rt, (JSString *)thing); @@ -2416,8 +2707,11 @@ restart: /* Set flags to GCF_FINAL, signifying that thing is free. */ *flagp = GCF_FINAL; } - } - limit = GC_THINGS_SIZE; + } while (++flagp != THING_FLAGS_END(a)); + a = a->prev; + if (!a) + break; + flagp = THING_FLAGP(a, indexLimit - 1); } } @@ -2442,47 +2736,45 @@ restart: for (i = 0; i < GC_NUM_FREELISTS; i++) { arenaList = &rt->gcArenaList[i]; ap = &arenaList->last; - a = *ap; - if (!a) + if (!(a = *ap)) continue; allClear = JS_TRUE; arenaList->freeList = NULL; freeList = NULL; + thingSize = arenaList->thingSize; + indexLimit = THINGS_PER_ARENA(thingSize); + JS_ASSERT(arenaList->lastCount > 0); + flagp = THING_FLAGP(a, arenaList->lastCount - 1); METER(arenaList->stats.nthings = 0); METER(arenaList->stats.freelen = 0); - - nbytes = GC_FREELIST_NBYTES(i); - limit = arenaList->lastLimit; - do { + for (;;) { METER(size_t nfree = 0); - firstPage = (uint8 *) FIRST_THING_PAGE(a); - for (offset = 0; offset != limit; offset += nbytes) { - if ((offset & GC_PAGE_MASK) == 0) - offset += PAGE_THING_GAP(nbytes); - JS_ASSERT(offset < limit); - flagp = a->base + offset / sizeof(JSGCThing); - if (flagp >= firstPage) - flagp += GC_THINGS_SIZE; - + do { if (*flagp != GCF_FINAL) { allClear = JS_FALSE; METER(++arenaList->stats.nthings); } else { - thing = (JSGCThing *)(firstPage + offset); + thing = (JSGCThing *) FLAGP_TO_THING(flagp, thingSize); thing->flagp = flagp; thing->next = freeList; freeList = thing; METER(++nfree); } - } + } while (++flagp != THING_FLAGS_END(a)); + if (allClear) { /* - * Forget just assembled free list head for the arena - * and destroy the arena itself. + * Forget just assembled free list head for the arena and + * destroy the arena itself. */ freeList = arenaList->freeList; - DestroyGCArena(rt, arenaList, ap); + if (a == arenaList->last) + arenaList->lastCount = indexLimit; + *ap = a->prev; + JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE); + rt->gcBytes -= GC_ARENA_SIZE; + DestroyGCArena(rt, a); } else { allClear = JS_TRUE; arenaList->freeList = freeList; @@ -2491,8 +2783,10 @@ restart: METER(arenaList->stats.totalfreelen += nfree); METER(++arenaList->stats.totalarenas); } - limit = GC_THINGS_SIZE; - } while ((a = *ap) != NULL); + if (!(a = *ap)) + break; + flagp = THING_FLAGP(a, indexLimit - 1); + } } if (rt->gcCallback) diff --git a/mozilla/js/src/jsgc.h b/mozilla/js/src/jsgc.h index bad22b8ae60..a620cb2fb61 100644 --- a/mozilla/js/src/jsgc.h +++ b/mozilla/js/src/jsgc.h @@ -254,10 +254,11 @@ typedef struct JSGCStats { uint32 maxdepth; /* maximum mark tail recursion depth */ uint32 cdepth; /* mark recursion depth of C functions */ uint32 maxcdepth; /* maximum mark recursion depth of C functions */ - uint32 unscanned; /* mark C stack overflows or number of times - GC things were put in unscanned bag */ + uint32 untraced; /* number of times tracing of GC thing's children were + delayed due to a low C stack */ #ifdef DEBUG - uint32 maxunscanned; /* maximum size of unscanned bag */ + uint32 maxuntraced;/* maximum number of things with children to trace + later */ #endif uint32 maxlevel; /* maximum GC nesting (indirect recursion) level */ uint32 poke; /* number of potentially useful GC calls */ @@ -276,8 +277,9 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp); #endif /* JS_GCMETER */ -typedef struct JSGCArena JSGCArena; +typedef struct JSGCArenaInfo JSGCArenaInfo; typedef struct JSGCArenaList JSGCArenaList; +typedef struct JSGCChunkInfo JSGCChunkInfo; #ifdef JS_GCMETER typedef struct JSGCArenaStats JSGCArenaStats; @@ -298,25 +300,26 @@ struct JSGCArenaStats { #endif struct JSGCArenaList { - JSGCArena *last; /* last allocated GC arena */ - uint16 lastLimit; /* end offset of allocated so far things in - the last arena */ - uint16 thingSize; /* size of things to allocate on this list */ - JSGCThing *freeList; /* list of free GC things */ + JSGCArenaInfo *last; /* last allocated GC arena */ + uint16 lastCount; /* number of allocated things in the last + arena */ + uint16 thingSize; /* size of things to allocate on this list + */ + JSGCThing *freeList; /* list of free GC things */ #ifdef JS_GCMETER - JSGCArenaStats stats; + JSGCArenaStats stats; #endif }; struct JSWeakRoots { /* Most recently created things by type, members of the GC's root set. */ - JSGCThing *newborn[GCX_NTYPES]; + void *newborn[GCX_NTYPES]; /* Atom root for the last-looked-up atom on this context. */ - jsval lastAtom; + jsval lastAtom; /* Root for the result of the most recent js_InternalInvoke call. */ - jsval lastInternalResult; + jsval lastInternalResult; }; JS_STATIC_ASSERT(JSVAL_NULL == 0); diff --git a/mozilla/js/src/jsinterp.c b/mozilla/js/src/jsinterp.c index 8bc2d75db79..cff9d4b670b 100644 --- a/mozilla/js/src/jsinterp.c +++ b/mozilla/js/src/jsinterp.c @@ -5291,8 +5291,7 @@ interrupt: JS_ASSERT(sp - fp->spbase >= 1); lval = FETCH_OPND(-1); JS_ASSERT(JSVAL_IS_OBJECT(lval)); - cx->weakRoots.newborn[GCX_OBJECT] = - (JSGCThing *)JSVAL_TO_GCTHING(lval); + cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(lval); END_CASE(JSOP_ENDINIT) BEGIN_CASE(JSOP_INITPROP) diff --git a/mozilla/js/src/jsobj.c b/mozilla/js/src/jsobj.c index 60533375222..3d30058eeb0 100644 --- a/mozilla/js/src/jsobj.c +++ b/mozilla/js/src/jsobj.c @@ -2540,7 +2540,7 @@ js_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent) out: JS_POP_TEMP_ROOT(cx, &tvr); - cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj; + cx->weakRoots.newborn[GCX_OBJECT] = obj; return obj; bad: @@ -4405,8 +4405,7 @@ js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id, * instance that delegates to this object, or just query the * prototype for its class. */ - cx->weakRoots.newborn[GCX_OBJECT] = - (JSGCThing *)JSVAL_TO_GCTHING(v); + cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(v); } } *protop = JSVAL_IS_OBJECT(v) ? JSVAL_TO_OBJECT(v) : NULL;