/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sw=4 et tw=78: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is Mozilla Communicator client code, released * March 31, 1998. * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either of the GNU General Public License Version 2 or later (the "GPL"), * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** *//* * JS Mark-and-Sweep Garbage Collector. * * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see * jsgc.h). It allocates from a special GC arena pool with each arena allocated * using malloc. It uses an ideally parallel array of flag bytes to hold the * mark bit, finalizer type index, etc. * * XXX swizzle page to freelist for better locality of reference */#include<stdlib.h> /* for free */#include<math.h>#include<string.h> /* for memset used when DEBUG */#include"jstypes.h"#include"jsstdint.h"#include"jsutil.h" /* Added by JSIFY */#include"jshash.h" /* Added by JSIFY */#include"jsbit.h"#include"jsclist.h"#include"jsprf.h"#include"jsapi.h"#include"jsatom.h"#include"jscntxt.h"#include"jsversion.h"#include"jsdbgapi.h"#include"jsexn.h"#include"jsfun.h"#include"jsgc.h"#include"jsgcchunk.h"#include"jsinterp.h"#include"jsiter.h"#include"jslock.h"#include"jsnum.h"#include"jsobj.h"#include"jsparse.h"#include"jsscope.h"#include"jsscript.h"#include"jsstaticcheck.h"#include"jsstr.h"#include"jstask.h"#include"jstracer.h"#if JS_HAS_XML_SUPPORT#include"jsxml.h"#endif#ifdef INCLUDE_MOZILLA_DTRACE#include"jsdtracef.h"#endif#include"jscntxtinlines.h"#include"jsobjinlines.h"/* * Include the headers for mmap. */#if defined(XP_WIN)# include <windows.h>#endif#if defined(XP_UNIX) || defined(XP_BEOS)# include <unistd.h># include <sys/mman.h>#endif/* On Mac OS X MAP_ANONYMOUS is not defined. */#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)# define MAP_ANONYMOUS MAP_ANON#endif#if !defined(MAP_ANONYMOUS)# define MAP_ANONYMOUS 0#endifusingnamespacejs;/* * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and * JSTRACE_STRING. */JS_STATIC_ASSERT(JSTRACE_OBJECT==0);JS_STATIC_ASSERT(JSTRACE_DOUBLE==1);JS_STATIC_ASSERT(JSTRACE_STRING==2);JS_STATIC_ASSERT(JSTRACE_XML==3);/* * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml * trace kind when JS_HAS_XML_SUPPORT is false. */JS_STATIC_ASSERT(JSTRACE_STRING+1==JSTRACE_XML);/* * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS. */JS_STATIC_ASSERT(JSVAL_NULL==0);/* * Check consistency of external string constants from JSFinalizeGCThingKind. */JS_STATIC_ASSERT(FINALIZE_EXTERNAL_STRING_LAST-FINALIZE_EXTERNAL_STRING0==JS_EXTERNAL_STRING_LIMIT-1);JS_STATIC_ASSERT(sizeof(JSStackHeader)>=2*sizeof(jsval));/* * GC memory is allocated in chunks. The size of each chunk is GC_CHUNK_SIZE. * The chunk contains an array of GC arenas holding GC things, an array of * the mark bitmaps for each arena, an array of JSGCArenaInfo arena * descriptors, an array of JSGCMarkingDelay descriptors, the JSGCChunkInfo * chunk descriptor and a bitmap indicating free arenas in the chunk. The * following picture demonstrates the layout: * * +--------+--------------+-------+--------+------------+-----------------+ * | arenas | mark bitmaps | infos | delays | chunk info | free arena bits | * +--------+--------------+-------+--------+------------+-----------------+ * * To ensure fast O(1) lookup of mark bits and arena descriptors each chunk is * allocated on GC_CHUNK_SIZE boundary. This way a simple mask and shift * operation gives an arena index into the mark and JSGCArenaInfo arrays. * * All chunks that have at least one free arena are put on the doubly-linked * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains * the head of the chunk's free arena list together with the link fields for * gcChunkList. * * A GC arena contains GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary * and holds things of the same size and kind. The size of each thing in the * arena must be divisible by GC_CELL_SIZE, the minimal allocation unit, and * the size of the mark bitmap is fixed and is independent of the thing's * size with one bit per each GC_CELL_SIZE bytes. For thing sizes that exceed * GC_CELL_SIZE this implies that we waste space in the mark bitmap. The * advantage is that we can find the mark bit for the thing using just * integer shifts avoiding an expensive integer division. We trade some space * for speed here. * * The number of arenas in the chunk is given by GC_ARENAS_PER_CHUNK. We find * that number as follows. Suppose chunk contains n arenas. Together with the * word-aligned free arena bitmap and JSGCChunkInfo they should fit into the * chunk. Hence GC_ARENAS_PER_CHUNK or n_max is the maximum value of n for * which the following holds: * * n*s + ceil(n/B) <= M (1) * * where "/" denotes normal real division, * ceil(r) gives the least integer not smaller than the number r, * s is the number of words in the GC arena, arena's mark bitmap, * JSGCArenaInfo and JSGCMarkingDelay or GC_ARENA_ALL_WORDS. * B is number of bits per word or B == JS_BITS_PER_WORD * M is the number of words in the chunk without JSGCChunkInfo or * M == (GC_CHUNK_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword). * * We rewrite the inequality as * * n*B*s/B + ceil(n/B) <= M, * ceil(n*B*s/B + n/B) <= M, * ceil(n*(B*s + 1)/B) <= M (2) * * We define a helper function e(n, s, B), * * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1. * * It gives: * * n*(B*s + 1)/B + e(n, s, B) <= M, * n + e*B/(B*s + 1) <= M*B/(B*s + 1) * * We apply the floor function to both sides of the last equation, where * floor(r) gives the biggest integer not greater than r. As a consequence we * have: * * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)), * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)), * n <= floor(M*B/(B*s + 1)), (3) * * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1. * Thus any n that satisfies the original constraint (1) or its equivalent (2), * must also satisfy (3). That is, we got an upper estimate for the maximum * value of n. Lets show that this upper estimate, * * floor(M*B/(B*s + 1)), (4) * * also satisfies (1) and, as such, gives the required maximum value. * Substituting it into (2) gives: * * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X) * * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and * * ceil(floor(M/X)*X) <= ceil(M) == M. * * Thus the value of (4) gives the maximum n satisfying (1). * * For the final result we observe that in (4) * * M*B == (GC_CHUNK_SIZE - sizeof(JSGCChunkInfo)) / sizeof(jsuword) * * JS_BITS_PER_WORD * == (GC_CHUNK_SIZE - sizeof(JSGCChunkInfo)) * JS_BITS_PER_BYTE * * since GC_CHUNK_SIZE and sizeof(JSGCChunkInfo) are at least word-aligned. */staticconstjsuwordGC_ARENA_SHIFT=12;staticconstjsuwordGC_ARENA_MASK=JS_BITMASK(GC_ARENA_SHIFT);staticconstjsuwordGC_ARENA_SIZE=JS_BIT(GC_ARENA_SHIFT);staticconstjsuwordGC_MAX_CHUNK_AGE=3;constsize_tGC_CELL_SHIFT=3;constsize_tGC_CELL_SIZE=size_t(1)<<GC_CELL_SHIFT;constsize_tGC_CELL_MASK=GC_CELL_SIZE-1;constsize_tBITS_PER_GC_CELL=GC_CELL_SIZE*JS_BITS_PER_BYTE;constsize_tGC_CELLS_PER_ARENA=size_t(1)<<(GC_ARENA_SHIFT-GC_CELL_SHIFT);constsize_tGC_MARK_BITMAP_SIZE=GC_CELLS_PER_ARENA/JS_BITS_PER_BYTE;constsize_tGC_MARK_BITMAP_WORDS=GC_CELLS_PER_ARENA/JS_BITS_PER_WORD;JS_STATIC_ASSERT(sizeof(jsbitmap)==sizeof(jsuword));JS_STATIC_ASSERT(sizeof(JSString)%GC_CELL_SIZE==0);JS_STATIC_ASSERT(sizeof(JSObject)%GC_CELL_SIZE==0);JS_STATIC_ASSERT(sizeof(JSFunction)%GC_CELL_SIZE==0);#ifdef JSXMLJS_STATIC_ASSERT(sizeof(JSXML)%GC_CELL_SIZE==0);#endifJS_STATIC_ASSERT(GC_CELL_SIZE==sizeof(jsdouble));constsize_tDOUBLES_PER_ARENA=GC_CELLS_PER_ARENA;structJSGCArenaInfo{/* * Allocation list for the arena or NULL if the arena holds double values. */JSGCArenaList*list;/* * Pointer to the previous arena in a linked list. The arena can either * belong to one of JSContext.gcArenaList lists or, when it does not have * any allocated GC things, to the list of free arenas in the chunk with * head stored in JSGCChunkInfo.lastFreeArena. */JSGCArena*prev;JSGCThing*freeList;/* The arena has marked doubles. */boolhasMarkedDoubles;staticinlineJSGCArenaInfo*fromGCThing(void*thing);};/* See comments before ThingsPerUnmarkedBit below. */structJSGCMarkingDelay{JSGCArena*link;jsuwordunmarkedChildren;};structJSGCArena{uint8data[GC_ARENA_SIZE];voidcheckAddress()const{JS_ASSERT(!(reinterpret_cast<jsuword>(this)&GC_ARENA_MASK));}jsuwordtoPageStart()const{checkAddress();returnreinterpret_cast<jsuword>(this);}staticinlineJSGCArena*fromGCThing(void*thing);staticinlineJSGCArena*fromChunkAndIndex(jsuwordchunk,size_tindex);jsuwordgetChunk(){returntoPageStart()&~GC_CHUNK_MASK;}jsuwordgetIndex(){return(toPageStart()&GC_CHUNK_MASK)>>GC_ARENA_SHIFT;}inlineJSGCArenaInfo*getInfo();inlineJSGCMarkingDelay*getMarkingDelay();inlinejsbitmap*getMarkBitmap();inlinevoidclearMarkBitmap();};structJSGCChunkInfo{JSGCChunkInfo**prevp;JSGCChunkInfo*next;size_tnumFreeArenas;size_tgcChunkAge;inlinevoidinit(JSRuntime*rt);inlinejsbitmap*getFreeArenaBitmap();voidaddToList(JSRuntime*rt){prevp=&rt->gcChunkList;next=rt->gcChunkList;if(rt->gcChunkList){JS_ASSERT(rt->gcChunkList->prevp==&rt->gcChunkList);rt->gcChunkList->prevp=&next;}rt->gcChunkList=this;}voidremoveFromList(JSRuntime*rt){*prevp=next;if(next){JS_ASSERT(next->prevp==&next);next->prevp=prevp;}}inlinejsuwordgetChunk();staticinlineJSGCChunkInfo*fromChunk(jsuwordchunk);};/* Check that all chunk arrays at least word-aligned. */JS_STATIC_ASSERT(sizeof(JSGCArena)==GC_ARENA_SIZE);JS_STATIC_ASSERT(GC_MARK_BITMAP_WORDS%sizeof(jsuword)==0);JS_STATIC_ASSERT(sizeof(JSGCArenaInfo)%sizeof(jsuword)==0);JS_STATIC_ASSERT(sizeof(JSGCMarkingDelay)%sizeof(jsuword)==0);constsize_tGC_ARENA_ALL_WORDS=(GC_ARENA_SIZE+GC_MARK_BITMAP_SIZE+sizeof(JSGCArenaInfo)+sizeof(JSGCMarkingDelay))/sizeof(jsuword);/* The value according (4) above. */constsize_tGC_ARENAS_PER_CHUNK=(GC_CHUNK_SIZE-sizeof(JSGCChunkInfo))*JS_BITS_PER_BYTE/(JS_BITS_PER_WORD*GC_ARENA_ALL_WORDS+1);constsize_tGC_FREE_ARENA_BITMAP_WORDS=(GC_ARENAS_PER_CHUNK+JS_BITS_PER_WORD-1)/JS_BITS_PER_WORD;constsize_tGC_FREE_ARENA_BITMAP_SIZE=GC_FREE_ARENA_BITMAP_WORDS*sizeof(jsuword);/* Check that GC_ARENAS_PER_CHUNK indeed maximises (1). */JS_STATIC_ASSERT(GC_ARENAS_PER_CHUNK*GC_ARENA_ALL_WORDS+GC_FREE_ARENA_BITMAP_WORDS<=(GC_CHUNK_SIZE-sizeof(JSGCChunkInfo))/sizeof(jsuword));JS_STATIC_ASSERT((GC_ARENAS_PER_CHUNK+1)*GC_ARENA_ALL_WORDS+(GC_ARENAS_PER_CHUNK+1+JS_BITS_PER_WORD-1)/JS_BITS_PER_WORD>(GC_CHUNK_SIZE-sizeof(JSGCChunkInfo))/sizeof(jsuword));constsize_tGC_MARK_BITMAP_ARRAY_OFFSET=GC_ARENAS_PER_CHUNK<<GC_ARENA_SHIFT;constsize_tGC_ARENA_INFO_ARRAY_OFFSET=GC_MARK_BITMAP_ARRAY_OFFSET+GC_MARK_BITMAP_SIZE*GC_ARENAS_PER_CHUNK;constsize_tGC_MARKING_DELAY_ARRAY_OFFSET=GC_ARENA_INFO_ARRAY_OFFSET+sizeof(JSGCArenaInfo)*GC_ARENAS_PER_CHUNK;constsize_tGC_CHUNK_INFO_OFFSET=GC_CHUNK_SIZE-GC_FREE_ARENA_BITMAP_SIZE-sizeof(JSGCChunkInfo);inlinejsuwordJSGCChunkInfo::getChunk(){jsuwordaddr=reinterpret_cast<jsuword>(this);JS_ASSERT((addr&GC_CHUNK_MASK)==GC_CHUNK_INFO_OFFSET);jsuwordchunk=addr&~GC_CHUNK_MASK;returnchunk;}/* static */inlineJSGCChunkInfo*JSGCChunkInfo::fromChunk(jsuwordchunk){JS_ASSERT(!(chunk&GC_CHUNK_MASK));jsuwordaddr=chunk|GC_CHUNK_INFO_OFFSET;returnreinterpret_cast<JSGCChunkInfo*>(addr);}inlinejsbitmap*JSGCChunkInfo::getFreeArenaBitmap(){jsuwordaddr=reinterpret_cast<jsuword>(this);returnreinterpret_cast<jsbitmap*>(addr+sizeof(JSGCChunkInfo));}inlinevoidJSGCChunkInfo::init(JSRuntime*rt){numFreeArenas=GC_ARENAS_PER_CHUNK;jsbitmap*freeArenas=getFreeArenaBitmap();/* * For simplicity we set all bits to 1 including the high bits in the * last word that corresponds to non-existing arenas. This is fine since * the arena scans the bitmap words from lowest to highest bits and the * allocation checks numFreeArenas before doing the search. */memset(freeArenas,0xFF,GC_FREE_ARENA_BITMAP_SIZE);addToList(rt);}inlinevoidCheckValidGCThingPtr(void*thing){#ifdef DEBUGJS_ASSERT(!JSString::isStatic(thing));jsuwordaddr=reinterpret_cast<jsuword>(thing);JS_ASSERT(!(addr&GC_CELL_MASK));JS_ASSERT((addr&GC_CHUNK_MASK)<GC_MARK_BITMAP_ARRAY_OFFSET);#endif}/* static */inlineJSGCArenaInfo*JSGCArenaInfo::fromGCThing(void*thing){CheckValidGCThingPtr(thing);jsuwordaddr=reinterpret_cast<jsuword>(thing);jsuwordchunk=addr&~GC_CHUNK_MASK;JSGCArenaInfo*array=reinterpret_cast<JSGCArenaInfo*>(chunk|GC_ARENA_INFO_ARRAY_OFFSET);size_tarenaIndex=(addr&GC_CHUNK_MASK)>>GC_ARENA_SHIFT;returnarray+arenaIndex;}/* static */inlineJSGCArena*JSGCArena::fromGCThing(void*thing){CheckValidGCThingPtr(thing);jsuwordaddr=reinterpret_cast<jsuword>(thing);returnreinterpret_cast<JSGCArena*>(addr&~GC_ARENA_MASK);}/* static */inlineJSGCArena*JSGCArena::fromChunkAndIndex(jsuwordchunk,size_tindex){JS_ASSERT(chunk);JS_ASSERT(!(chunk&GC_CHUNK_MASK));JS_ASSERT(index<GC_ARENAS_PER_CHUNK);returnreinterpret_cast<JSGCArena*>(chunk|(index<<GC_ARENA_SHIFT));}inlineJSGCArenaInfo*JSGCArena::getInfo(){jsuwordchunk=getChunk();jsuwordindex=getIndex();jsuwordoffset=GC_ARENA_INFO_ARRAY_OFFSET+index*sizeof(JSGCArenaInfo);returnreinterpret_cast<JSGCArenaInfo*>(chunk|offset);}inlineJSGCMarkingDelay*JSGCArena::getMarkingDelay(){jsuwordchunk=getChunk();jsuwordindex=getIndex();jsuwordoffset=GC_MARKING_DELAY_ARRAY_OFFSET+index*sizeof(JSGCMarkingDelay);returnreinterpret_cast<JSGCMarkingDelay*>(chunk|offset);}inlinejsbitmap*JSGCArena::getMarkBitmap(){jsuwordchunk=getChunk();jsuwordindex=getIndex();jsuwordoffset=GC_MARK_BITMAP_ARRAY_OFFSET+index*GC_MARK_BITMAP_SIZE;returnreinterpret_cast<jsbitmap*>(chunk|offset);}inlinevoidJSGCArena::clearMarkBitmap(){PodZero(getMarkBitmap(),GC_MARK_BITMAP_WORDS);}/* * Helpers for GC-thing operations. */inlinejsbitmap*GetGCThingMarkBit(void*thing,size_t&bitIndex){CheckValidGCThingPtr(thing);jsuwordaddr=reinterpret_cast<jsuword>(thing);jsuwordchunk=addr&~GC_CHUNK_MASK;bitIndex=(addr&GC_CHUNK_MASK)>>GC_CELL_SHIFT;returnreinterpret_cast<jsbitmap*>(chunk|GC_MARK_BITMAP_ARRAY_OFFSET);}inlineboolIsMarkedGCThing(void*thing){size_tindex;jsbitmap*markBitmap=GetGCThingMarkBit(thing,index);return!!JS_TEST_BIT(markBitmap,index);}inlineboolMarkIfUnmarkedGCThing(void*thing){size_tindex;jsbitmap*markBitmap=GetGCThingMarkBit(thing,index);if(JS_TEST_BIT(markBitmap,index))returnfalse;JS_SET_BIT(markBitmap,index);returntrue;}inlinesize_tThingsPerArena(size_tthingSize){JS_ASSERT(!(thingSize&GC_CELL_MASK));JS_ASSERT(thingSize<=GC_ARENA_SIZE);returnGC_ARENA_SIZE/thingSize;}/* Can only be called if thing belongs to an arena where a->list is not null. */inlinesize_tGCThingToArenaIndex(void*thing){CheckValidGCThingPtr(thing);jsuwordaddr=reinterpret_cast<jsuword>(thing);jsuwordoffsetInArena=addr&GC_ARENA_MASK;JSGCArenaInfo*a=JSGCArenaInfo::fromGCThing(thing);JS_ASSERT(a->list);JS_ASSERT(offsetInArena%a->list->thingSize==0);returnoffsetInArena/a->list->thingSize;}/* Can only be applicable to arena where a->list is not null. */inlineuint8*GCArenaIndexToThing(JSGCArena*a,JSGCArenaInfo*ainfo,size_tindex){JS_ASSERT(a->getInfo()==ainfo);/* * We use "<=" and not "<" in the assert so index can mean the limit. * For the same reason we use "+", not "|" when finding the thing address * as the limit address can start at the next arena. */JS_ASSERT(index<=ThingsPerArena(ainfo->list->thingSize));jsuwordoffsetInArena=index*ainfo->list->thingSize;returnreinterpret_cast<uint8*>(a->toPageStart()+offsetInArena);}/* * The private JSGCThing struct, which describes a JSRuntime.gcFreeList element. */structJSGCThing{JSGCThing*link;};staticinlineJSGCThing*MakeNewArenaFreeList(JSGCArena*a,size_tthingSize){jsuwordthingsStart=a->toPageStart();jsuwordlastThingMinAddr=thingsStart+GC_ARENA_SIZE-thingSize*2+1;jsuwordthingPtr=thingsStart;do{jsuwordnextPtr=thingPtr+thingSize;JS_ASSERT((nextPtr&GC_ARENA_MASK)+thingSize<=GC_ARENA_SIZE);JSGCThing*thing=reinterpret_cast<JSGCThing*>(thingPtr);thing->link=reinterpret_cast<JSGCThing*>(nextPtr);thingPtr=nextPtr;}while(thingPtr<lastThingMinAddr);JSGCThing*lastThing=reinterpret_cast<JSGCThing*>(thingPtr);lastThing->link=NULL;returnreinterpret_cast<JSGCThing*>(thingsStart);}#ifdef JS_GCMETER# define METER(x) ((void) (x))# define METER_IF(condition, x) ((void) ((condition) && (x)))#else# define METER(x) ((void) 0)# define METER_IF(condition, x) ((void) 0)#endif#define METER_UPDATE_MAX(maxLval, rval) \ METER_IF((maxLval) < (rval), (maxLval) = (rval))#ifdef MOZ_GCTIMERstaticjsrefcountnewChunkCount=0;staticjsrefcountdestroyChunkCount=0;#endifinlinevoid*GetGCChunk(JSRuntime*rt){void*p=AllocGCChunk();#ifdef MOZ_GCTIMERif(p)JS_ATOMIC_INCREMENT(&newChunkCount);#endifMETER_IF(p,rt->gcStats.nchunks++);METER_UPDATE_MAX(rt->gcStats.maxnchunks,rt->gcStats.nchunks);returnp;}inlinevoidReleaseGCChunk(JSRuntime*rt,void*p){JS_ASSERT(p);#ifdef MOZ_GCTIMERJS_ATOMIC_INCREMENT(&destroyChunkCount);#endifJS_ASSERT(rt->gcStats.nchunks!=0);METER(rt->gcStats.nchunks--);FreeGCChunk(p);}staticJSGCArena*NewGCArena(JSContext*cx){JSRuntime*rt=cx->runtime;if(!JS_THREAD_DATA(cx)->waiveGCQuota&&rt->gcBytes>=rt->gcMaxBytes){/* * FIXME bug 524051 We cannot run a last-ditch GC on trace for now, so * just pretend we are out of memory which will throw us off trace and * we will re-try this code path from the interpreter. */if(!JS_ON_TRACE(cx))returnNULL;js_TriggerGC(cx,true);}JSGCChunkInfo*ci=rt->gcChunkList;jsuwordchunk;if(!ci){GCEmptyChunks*chunks=&rt->gcEmptyChunks;if(!chunks->empty()){ci=chunks->back();chunks->popBack();JS_ASSERT(ci);chunk=ci->getChunk();}else{void*chunkptr=GetGCChunk(rt);if(!chunkptr)returnNULL;chunk=reinterpret_cast<jsuword>(chunkptr);ci=JSGCChunkInfo::fromChunk(chunk);}ci->gcChunkAge=0;ci->init(rt);}else{chunk=ci->getChunk();}JS_ASSERT(ci->prevp==&rt->gcChunkList);JS_ASSERT(ci->numFreeArenas!=0);/* Scan the bitmap for the first non-zero bit. */jsbitmap*freeArenas=ci->getFreeArenaBitmap();size_tarenaIndex=0;while(!*freeArenas){arenaIndex+=JS_BITS_PER_WORD;freeArenas++;}size_tbit=CountTrailingZeros(*freeArenas);arenaIndex+=bit;JS_ASSERT(arenaIndex<GC_ARENAS_PER_CHUNK);JS_ASSERT(*freeArenas&(jsuword(1)<<bit));*freeArenas&=~(jsuword(1)<<bit);--ci->numFreeArenas;if(ci->numFreeArenas==0)ci->removeFromList(rt);rt->gcBytes+=GC_ARENA_SIZE;METER(rt->gcStats.nallarenas++);METER_UPDATE_MAX(rt->gcStats.maxnallarenas,rt->gcStats.nallarenas);returnJSGCArena::fromChunkAndIndex(chunk,arenaIndex);}namespacejs{structGCArenaReleaser{#ifdef DEBUGJSGCArena*emptyArenaList;#endifGCArenaReleaser(){#ifdef DEBUGemptyArenaList=NULL;#endif}/* * The method does not touches the arena or release its memory so code can * still refer into it. */voidrelease(JSRuntime*rt,JSGCArena*a){METER(rt->gcStats.afree++);JS_ASSERT(rt->gcBytes>=GC_ARENA_SIZE);rt->gcBytes-=GC_ARENA_SIZE;JS_ASSERT(rt->gcStats.nallarenas!=0);METER(rt->gcStats.nallarenas--);jsuwordchunk=a->getChunk();JSGCChunkInfo*ci=JSGCChunkInfo::fromChunk(chunk);JS_ASSERT(ci->numFreeArenas<=GC_ARENAS_PER_CHUNK-1);if(ci->numFreeArenas==0)ci->addToList(rt);jsbitmap*freeArenas=ci->getFreeArenaBitmap();JS_ASSERT(!JS_TEST_BIT(freeArenas,a->getIndex()));JS_SET_BIT(freeArenas,a->getIndex());ci->numFreeArenas++;if(ci->numFreeArenas==GC_ARENAS_PER_CHUNK){ci->removeFromList(rt);ci->gcChunkAge=0;if(!rt->gcEmptyChunks.append(ci)){jsuwordchunk=ci->getChunk();ReleaseGCChunk(rt,(void*)chunk);}}#ifdef DEBUGa->getInfo()->prev=emptyArenaList;emptyArenaList=a;#endif}voidreleaseList(JSRuntime*rt,JSGCArena*arenaList){for(JSGCArena*prev;arenaList;arenaList=prev){/* * Read the prev link before we release as that modifies the field * in debug-only builds when assembling the empty arena list. */prev=arenaList->getInfo()->prev;release(rt,arenaList);}}voidfreeArenas(JSRuntime*rt){#ifdef DEBUGwhile(emptyArenaList){JSGCArena*next=emptyArenaList->getInfo()->prev;memset(emptyArenaList,JS_FREE_PATTERN,GC_ARENA_SIZE);emptyArenaList=next;}#endif}};}/* namespace js */staticinlinesize_tGetFinalizableThingSize(unsignedthingKind){JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT==8);staticconstuint8map[FINALIZE_LIMIT]={sizeof(JSObject),/* FINALIZE_OBJECT */sizeof(JSObject),/* FINALIZE_ITER */sizeof(JSFunction),/* FINALIZE_FUNCTION */#if JS_HAS_XML_SUPPORTsizeof(JSXML),/* FINALIZE_XML */#endifsizeof(JSString),/* FINALIZE_STRING */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING0 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING1 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING2 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING3 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING4 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING5 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING6 */sizeof(JSString),/* FINALIZE_EXTERNAL_STRING7 */};JS_ASSERT(thingKind<FINALIZE_LIMIT);returnmap[thingKind];}staticinlinesize_tGetFinalizableTraceKind(size_tthingKind){JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT==8);staticconstuint8map[FINALIZE_LIMIT]={JSTRACE_OBJECT,/* FINALIZE_OBJECT */JSTRACE_OBJECT,/* FINALIZE_ITER */JSTRACE_OBJECT,/* FINALIZE_FUNCTION */#if JS_HAS_XML_SUPPORT /* FINALIZE_XML */JSTRACE_XML,#endif /* FINALIZE_STRING */JSTRACE_STRING,JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING0 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING1 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING2 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING3 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING4 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING5 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING6 */JSTRACE_STRING,/* FINALIZE_EXTERNAL_STRING7 */};JS_ASSERT(thingKind<FINALIZE_LIMIT);returnmap[thingKind];}staticinlinesize_tGetFinalizableArenaTraceKind(JSGCArenaInfo*ainfo){JS_ASSERT(ainfo->list);returnGetFinalizableTraceKind(ainfo->list->thingKind);}staticinlinesize_tGetFinalizableThingTraceKind(void*thing){JSGCArenaInfo*ainfo=JSGCArenaInfo::fromGCThing(thing);returnGetFinalizableArenaTraceKind(ainfo);}staticvoidInitGCArenaLists(JSRuntime*rt){for(unsignedi=0;i!=FINALIZE_LIMIT;++i){JSGCArenaList*arenaList=&rt->gcArenaList[i];arenaList->head=NULL;arenaList->cursor=NULL;arenaList->thingKind=i;arenaList->thingSize=GetFinalizableThingSize(i);}rt->gcDoubleArenaList.head=NULL;rt->gcDoubleArenaList.cursor=NULL;}staticvoidFinishGCArenaLists(JSRuntime*rt){js::GCArenaReleaserarenaReleaser;for(unsignedi=0;i<FINALIZE_LIMIT;i++){JSGCArenaList*arenaList=&rt->gcArenaList[i];arenaReleaser.releaseList(rt,arenaList->head);arenaList->head=NULL;arenaList->cursor=NULL;}arenaReleaser.releaseList(rt,rt->gcDoubleArenaList.head);rt->gcDoubleArenaList.head=NULL;rt->gcDoubleArenaList.cursor=NULL;arenaReleaser.freeArenas(rt);JS_ASSERT(rt->gcChunkList==NULL);rt->gcBytes=0;}intNjs_GetExternalStringGCType(JSString*str){JS_STATIC_ASSERT(FINALIZE_STRING+1==FINALIZE_EXTERNAL_STRING0);JS_ASSERT(!JSString::isStatic(str));unsignedthingKind=JSGCArenaInfo::fromGCThing(str)->list->thingKind;JS_ASSERT(IsFinalizableStringKind(thingKind));returnintN(thingKind)-intN(FINALIZE_EXTERNAL_STRING0);}JS_FRIEND_API(uint32)js_GetGCThingTraceKind(void*thing){if(JSString::isStatic(thing))returnJSTRACE_STRING;JSGCArenaInfo*ainfo=JSGCArenaInfo::fromGCThing(thing);if(!ainfo->list)returnJSTRACE_DOUBLE;returnGetFinalizableArenaTraceKind(ainfo);}JSRuntime*js_GetGCStringRuntime(JSString*str){JSGCArenaList*list=JSGCArenaInfo::fromGCThing(str)->list;JS_ASSERT(list->thingSize==sizeof(JSString));unsignedi=list->thingKind;JS_ASSERT(i==FINALIZE_STRING||(FINALIZE_EXTERNAL_STRING0<=i&&i<FINALIZE_EXTERNAL_STRING0+JS_EXTERNAL_STRING_LIMIT));return(JSRuntime*)((uint8*)(list-i)-offsetof(JSRuntime,gcArenaList));}booljs_IsAboutToBeFinalized(void*thing){if(JSString::isStatic(thing))returnfalse;JSGCArenaInfo*ainfo=JSGCArenaInfo::fromGCThing(thing);if(!ainfo->list){/* * Check if arena has no marked doubles. In that case the bitmap with * the mark flags contains all garbage as it is initialized only when * marking the first double in the arena. */if(!ainfo->hasMarkedDoubles)returntrue;}return!IsMarkedGCThing(thing);}/* This is compatible with JSDHashEntryStub. */typedefstructJSGCRootHashEntry{JSDHashEntryHdrhdr;void*root;constchar*name;}JSGCRootHashEntry;/* * Initial size of the gcRootsHash and gcLocksHash tables (SWAG, small enough * to amortize). */constuint32GC_ROOTS_SIZE=256;structJSGCLockHashEntry:publicJSDHashEntryHdr{constvoid*thing;uint32count;};JSBooljs_InitGC(JSRuntime*rt,uint32maxbytes){InitGCArenaLists(rt);if(!JS_DHashTableInit(&rt->gcRootsHash,JS_DHashGetStubOps(),NULL,sizeof(JSGCRootHashEntry),GC_ROOTS_SIZE)){rt->gcRootsHash.ops=NULL;returnfalse;}if(!JS_DHashTableInit(&rt->gcLocksHash,JS_DHashGetStubOps(),NULL,sizeof(JSGCLockHashEntry),GC_ROOTS_SIZE)){rt->gcLocksHash.ops=NULL;returnfalse;}/* * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes * for default backward API compatibility. */rt->gcMaxBytes=maxbytes;rt->setGCMaxMallocBytes(maxbytes);rt->gcEmptyArenaPoolLifespan=30000;/* * By default the trigger factor gets maximum possible value. This * means that GC will not be triggered by growth of GC memory (gcBytes). */rt->setGCTriggerFactor((uint32)-1);/* * The assigned value prevents GC from running when GC memory is too low * (during JS engine start). */rt->setGCLastBytes(8192);METER(PodZero(&rt->gcStats));returntrue;}#ifdef JS_GCMETERstaticvoidUpdateArenaStats(JSGCArenaStats*st,uint32nlivearenas,uint32nkilledArenas,uint32nthings){size_tnarenas;narenas=nlivearenas+nkilledArenas;JS_ASSERT(narenas>=st->livearenas);st->newarenas=narenas-st->livearenas;st->narenas=narenas;st->livearenas=nlivearenas;if(st->maxarenas<narenas)st->maxarenas=narenas;st->totalarenas+=narenas;st->nthings=nthings;if(st->maxthings<nthings)st->maxthings=nthings;st->totalthings+=nthings;}JS_FRIEND_API(void)js_DumpGCStats(JSRuntime*rt,FILE*fp){staticconstchar*constGC_ARENA_NAMES[]={"double","object","iter","function",#if JS_HAS_XML_SUPPORT"xml",#endif"string","external_string_0","external_string_1","external_string_2","external_string_3","external_string_4","external_string_5","external_string_6","external_string_7",};JS_STATIC_ASSERT(JS_ARRAY_LENGTH(GC_ARENA_NAMES)==FINALIZE_LIMIT+1);fprintf(fp,"\nGC allocation statistics:\n\n");#define UL(x) ((unsigned long)(x))#define ULSTAT(x) UL(rt->gcStats.x)#define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))size_tsumArenas=0;size_tsumTotalArenas=0;size_tsumThings=0;size_tsumMaxThings=0;size_tsumThingSize=0;size_tsumTotalThingSize=0;size_tsumArenaCapacity=0;size_tsumTotalArenaCapacity=0;size_tsumAlloc=0;size_tsumLocalAlloc=0;size_tsumFail=0;size_tsumRetry=0;for(inti=-1;i<(int)FINALIZE_LIMIT;i++){size_tthingSize,thingsPerArena;JSGCArenaStats*st;if(i==-1){thingSize=sizeof(jsdouble);thingsPerArena=DOUBLES_PER_ARENA;st=&rt->gcStats.doubleArenaStats;}else{thingSize=rt->gcArenaList[i].thingSize;thingsPerArena=ThingsPerArena(thingSize);st=&rt->gcStats.arenaStats[i];}if(st->maxarenas==0)continue;fprintf(fp,"%s arenas (thing size %lu, %lu things per arena):",GC_ARENA_NAMES[i+1],UL(thingSize),UL(thingsPerArena));putc('\n',fp);fprintf(fp," arenas before GC: %lu\n",UL(st->narenas));fprintf(fp," new arenas before GC: %lu (%.1f%%)\n",UL(st->newarenas),PERCENT(st->newarenas,st->narenas));fprintf(fp," arenas after GC: %lu (%.1f%%)\n",UL(st->livearenas),PERCENT(st->livearenas,st->narenas));fprintf(fp," max arenas: %lu\n",UL(st->maxarenas));fprintf(fp," things: %lu\n",UL(st->nthings));fprintf(fp," GC cell utilization: %.1f%%\n",PERCENT(st->nthings,thingsPerArena*st->narenas));fprintf(fp," average cell utilization: %.1f%%\n",PERCENT(st->totalthings,thingsPerArena*st->totalarenas));fprintf(fp," max things: %lu\n",UL(st->maxthings));fprintf(fp," alloc attempts: %lu\n",UL(st->alloc));fprintf(fp," alloc without locks: %lu (%.1f%%)\n",UL(st->localalloc),PERCENT(st->localalloc,st->alloc));sumArenas+=st->narenas;sumTotalArenas+=st->totalarenas;sumThings+=st->nthings;sumMaxThings+=st->maxthings;sumThingSize+=thingSize*st->nthings;sumTotalThingSize+=size_t(thingSize*st->totalthings);sumArenaCapacity+=thingSize*thingsPerArena*st->narenas;sumTotalArenaCapacity+=thingSize*thingsPerArena*st->totalarenas;sumAlloc+=st->alloc;sumLocalAlloc+=st->localalloc;sumFail+=st->fail;sumRetry+=st->retry;putc('\n',fp);}fputs("Never used arenas:\n",fp);for(inti=-1;i<(int)FINALIZE_LIMIT;i++){size_tthingSize,thingsPerArena;JSGCArenaStats*st;if(i==-1){thingSize=sizeof(jsdouble);thingsPerArena=DOUBLES_PER_ARENA;st=&rt->gcStats.doubleArenaStats;}else{thingSize=rt->gcArenaList[i].thingSize;thingsPerArena=ThingsPerArena(thingSize);st=&rt->gcStats.arenaStats[i];}if(st->maxarenas!=0)continue;fprintf(fp,"%s (thing size %lu, %lu things per arena)\n",GC_ARENA_NAMES[i+1],UL(thingSize),UL(thingsPerArena));}fprintf(fp,"\nTOTAL STATS:\n");fprintf(fp," bytes allocated: %lu\n",UL(rt->gcBytes));fprintf(fp," total GC arenas: %lu\n",UL(sumArenas));fprintf(fp," total GC things: %lu\n",UL(sumThings));fprintf(fp," max total GC things: %lu\n",UL(sumMaxThings));fprintf(fp," GC cell utilization: %.1f%%\n",PERCENT(sumThingSize,sumArenaCapacity));fprintf(fp," average cell utilization: %.1f%%\n",PERCENT(sumTotalThingSize,sumTotalArenaCapacity));fprintf(fp,"allocation retries after GC: %lu\n",UL(sumRetry));fprintf(fp," alloc attempts: %lu\n",UL(sumAlloc));fprintf(fp," alloc without locks: %lu (%.1f%%)\n",UL(sumLocalAlloc),PERCENT(sumLocalAlloc,sumAlloc));fprintf(fp," allocation failures: %lu\n",UL(sumFail));fprintf(fp," things born locked: %lu\n",ULSTAT(lockborn));fprintf(fp," valid lock calls: %lu\n",ULSTAT(lock));fprintf(fp," valid unlock calls: %lu\n",ULSTAT(unlock));fprintf(fp," mark recursion depth: %lu\n",ULSTAT(depth));fprintf(fp," maximum mark recursion: %lu\n",ULSTAT(maxdepth));fprintf(fp," mark C recursion depth: %lu\n",ULSTAT(cdepth));fprintf(fp," maximum mark C recursion: %lu\n",ULSTAT(maxcdepth));fprintf(fp," delayed tracing calls: %lu\n",ULSTAT(unmarked));#ifdef DEBUGfprintf(fp," max trace later count: %lu\n",ULSTAT(maxunmarked));#endiffprintf(fp," maximum GC nesting level: %lu\n",ULSTAT(maxlevel));fprintf(fp,"potentially useful GC calls: %lu\n",ULSTAT(poke));fprintf(fp," thing arenas freed so far: %lu\n",ULSTAT(afree));fprintf(fp," stack segments scanned: %lu\n",ULSTAT(stackseg));fprintf(fp,"stack segment slots scanned: %lu\n",ULSTAT(segslots));fprintf(fp,"reachable closeable objects: %lu\n",ULSTAT(nclose));fprintf(fp," max reachable closeable: %lu\n",ULSTAT(maxnclose));fprintf(fp," scheduled close hooks: %lu\n",ULSTAT(closelater));fprintf(fp," max scheduled close hooks: %lu\n",ULSTAT(maxcloselater));#undef UL#undef ULSTAT#undef PERCENT}#endif#ifdef DEBUGstaticvoidCheckLeakedRoots(JSRuntime*rt);#endifvoidDestroyEmptyGCChunks(JSRuntime*rt,boolreleaseAll){size_tnewLength=0;GCEmptyChunks*chunks=&rt->gcEmptyChunks;JSGCChunkInfo**array=chunks->begin();for(JSGCChunkInfo**i=chunks->begin();i!=chunks->end();++i){JSGCChunkInfo*ci=*i;JS_ASSERT(ci->numFreeArenas==GC_ARENAS_PER_CHUNK);if(releaseAll||ci->gcChunkAge>GC_MAX_CHUNK_AGE){jsuwordchunk=ci->getChunk();ReleaseGCChunk(rt,(void*)chunk);}else{ci->gcChunkAge++;array[newLength++]=ci;}}rt->gcEmptyChunks.resize(newLength);}voidjs_FinishGC(JSRuntime*rt){#ifdef JS_ARENAMETERJS_DumpArenaStats(stdout);#endif#ifdef JS_GCMETERif(JS_WANT_GC_METER_PRINT)js_DumpGCStats(rt,stdout);#endifFinishGCArenaLists(rt);DestroyEmptyGCChunks(rt,true);JS_ASSERT(rt->gcEmptyChunks.length()==0);if(rt->gcRootsHash.ops){#ifdef DEBUGCheckLeakedRoots(rt);#endifJS_DHashTableFinish(&rt->gcRootsHash);rt->gcRootsHash.ops=NULL;}if(rt->gcLocksHash.ops){JS_DHashTableFinish(&rt->gcLocksHash);rt->gcLocksHash.ops=NULL;}}JSBooljs_AddRoot(JSContext*cx,void*rp,constchar*name){JSBoolok=js_AddRootRT(cx->runtime,rp,name);if(!ok)JS_ReportOutOfMemory(cx);returnok;}JSBooljs_AddRootRT(JSRuntime*rt,void*rp,constchar*name){JSBoolok;JSGCRootHashEntry*rhe;/* * Due to the long-standing, but now removed, use of rt->gcLock across the * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking * properly with a racing GC, without calling JS_AddRoot from a request. * We have to preserve API compatibility here, now that we avoid holding * rt->gcLock across the mark phase (including the root hashtable mark). */AutoLockGClock(rt);js_WaitForGC(rt);rhe=(JSGCRootHashEntry*)JS_DHashTableOperate(&rt->gcRootsHash,rp,JS_DHASH_ADD);if(rhe){rhe->root=rp;rhe->name=name;ok=JS_TRUE;}else{ok=JS_FALSE;}returnok;}JSBooljs_RemoveRoot(JSRuntime*rt,void*rp){/* * Due to the JS_RemoveRootRT API, we may be called outside of a request. * Same synchronization drill as above in js_AddRoot. */AutoLockGClock(rt);js_WaitForGC(rt);(void)JS_DHashTableOperate(&rt->gcRootsHash,rp,JS_DHASH_REMOVE);rt->gcPoke=JS_TRUE;returnJS_TRUE;}#ifdef DEBUGstaticJSDHashOperatorjs_root_printer(JSDHashTable*table,JSDHashEntryHdr*hdr,uint32i,void*arg){uint32*leakedroots=(uint32*)arg;JSGCRootHashEntry*rhe=(JSGCRootHashEntry*)hdr;(*leakedroots)++;fprintf(stderr,"JS engine warning: leaking GC root \'%s\' at %p\n",rhe->name?(char*)rhe->name:"",rhe->root);returnJS_DHASH_NEXT;}staticvoidCheckLeakedRoots(JSRuntime*rt){uint32leakedroots=0;/* Warn (but don't assert) debug builds of any remaining roots. */JS_DHashTableEnumerate(&rt->gcRootsHash,js_root_printer,&leakedroots);if(leakedroots>0){if(leakedroots==1){fprintf(stderr,"JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"" This root may point to freed memory. Objects reachable\n"" through it have not been finalized.\n",(void*)rt);}else{fprintf(stderr,"JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"" These roots may point to freed memory. Objects reachable\n"" through them have not been finalized.\n",(unsignedlong)leakedroots,(void*)rt);}}}typedefstructNamedRootDumpArgs{void(*dump)(constchar*name,void*rp,void*data);void*data;}NamedRootDumpArgs;staticJSDHashOperatorjs_named_root_dumper(JSDHashTable*table,JSDHashEntryHdr*hdr,uint32number,void*arg){NamedRootDumpArgs*args=(NamedRootDumpArgs*)arg;JSGCRootHashEntry*rhe=(JSGCRootHashEntry*)hdr;if(rhe->name)args->dump(rhe->name,rhe->root,args->data);returnJS_DHASH_NEXT;}JS_BEGIN_EXTERN_Cvoidjs_DumpNamedRoots(JSRuntime*rt,void(*dump)(constchar*name,void*rp,void*data),void*data){NamedRootDumpArgsargs;args.dump=dump;args.data=data;JS_DHashTableEnumerate(&rt->gcRootsHash,js_named_root_dumper,&args);}JS_END_EXTERN_C#endif /* DEBUG */typedefstructGCRootMapArgs{JSGCRootMapFunmap;void*data;}GCRootMapArgs;staticJSDHashOperatorjs_gcroot_mapper(JSDHashTable*table,JSDHashEntryHdr*hdr,uint32number,void*arg){GCRootMapArgs*args=(GCRootMapArgs*)arg;JSGCRootHashEntry*rhe=(JSGCRootHashEntry*)hdr;intNmapflags;intop;mapflags=args->map(rhe->root,rhe->name,args->data);#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \ JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \ JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVEop=(JSDHashOperator)mapflags;#elseop=JS_DHASH_NEXT;if(mapflags&JS_MAP_GCROOT_STOP)op|=JS_DHASH_STOP;if(mapflags&JS_MAP_GCROOT_REMOVE)op|=JS_DHASH_REMOVE;#endifreturn(JSDHashOperator)op;}uint32js_MapGCRoots(JSRuntime*rt,JSGCRootMapFunmap,void*data){GCRootMapArgsargs={map,data};AutoLockGClock(rt);returnJS_DHashTableEnumerate(&rt->gcRootsHash,js_gcroot_mapper,&args);}voidJSRuntime::setGCTriggerFactor(uint32factor){JS_ASSERT(factor>=100);gcTriggerFactor=factor;setGCLastBytes(gcLastBytes);}voidJSRuntime::setGCLastBytes(size_tlastBytes){gcLastBytes=lastBytes;uint64triggerBytes=uint64(lastBytes)*uint64(gcTriggerFactor/100);if(triggerBytes!=size_t(triggerBytes))triggerBytes=size_t(-1);gcTriggerBytes=size_t(triggerBytes);}voidJSGCFreeLists::purge(){/* * Return the free list back to the arena so the GC finalization will not * run the finalizers over unitialized bytes from free things. */for(JSGCThing**p=finalizables;p!=JS_ARRAY_END(finalizables);++p){JSGCThing*freeListHead=*p;if(freeListHead){JSGCArenaInfo*ainfo=JSGCArenaInfo::fromGCThing(freeListHead);JS_ASSERT(!ainfo->freeList);ainfo->freeList=freeListHead;*p=NULL;}}doubles=NULL;}voidJSGCFreeLists::moveTo(JSGCFreeLists*another){*another=*this;doubles=NULL;PodArrayZero(finalizables);JS_ASSERT(isEmpty());}staticinlineboolIsGCThresholdReached(JSRuntime*rt){#ifdef JS_GC_ZEALif(rt->gcZeal>=1)returntrue;#endif/* * Since the initial value of the gcLastBytes parameter is not equal to * zero (see the js_InitGC function) the return value is false when * the gcBytes value is close to zero at the JS engine start. */returnrt->isGCMallocLimitReached()||rt->gcBytes>=rt->gcTriggerBytes;}staticinlineJSGCFreeLists*GetGCFreeLists(JSContext*cx){JSThreadData*td=JS_THREAD_DATA(cx);if(!td->localRootStack)return&td->gcFreeLists;JS_ASSERT(td->gcFreeLists.isEmpty());return&td->localRootStack->gcFreeLists;}staticJSGCThing*RefillFinalizableFreeList(JSContext*cx,unsignedthingKind){JS_ASSERT(!GetGCFreeLists(cx)->finalizables[thingKind]);JSRuntime*rt=cx->runtime;JSGCArenaList*arenaList;JSGCArena*a;{AutoLockGClock(rt);JS_ASSERT(!rt->gcRunning);if(rt->gcRunning){METER(rt->gcStats.finalfail++);returnNULL;}boolcanGC=!JS_ON_TRACE(cx)&&!JS_THREAD_DATA(cx)->waiveGCQuota;booldoGC=canGC&&IsGCThresholdReached(rt);arenaList=&rt->gcArenaList[thingKind];for(;;){if(doGC){/* * Keep rt->gcLock across the call into js_GC so we don't * starve and lose to racing threads who deplete the heap just * after js_GC has replenished it (or has synchronized with a * racing GC that collected a bunch of garbage). This unfair * scheduling can happen on certain operating systems. For the * gory details, see bug 162779. */js_GC(cx,GC_LAST_DITCH);METER(cx->runtime->gcStats.arenaStats[thingKind].retry++);canGC=false;/* * The JSGC_END callback can legitimately allocate new GC * things and populate the free list. If that happens, just * return that list head. */JSGCThing*freeList=GetGCFreeLists(cx)->finalizables[thingKind];if(freeList)returnfreeList;}while((a=arenaList->cursor)!=NULL){JSGCArenaInfo*ainfo=a->getInfo();arenaList->cursor=ainfo->prev;JSGCThing*freeList=ainfo->freeList;if(freeList){ainfo->freeList=NULL;returnfreeList;}}a=NewGCArena(cx);if(a)break;if(!canGC){METER(cx->runtime->gcStats.arenaStats[thingKind].fail++);returnNULL;}doGC=true;}/* * Do only minimal initialization of the arena inside the GC lock. We * can do the rest outside the lock because no other threads will see * the arena until the GC is run. */JSGCArenaInfo*ainfo=a->getInfo();ainfo->list=arenaList;ainfo->prev=arenaList->head;ainfo->freeList=NULL;arenaList->head=a;}a->clearMarkBitmap();JSGCMarkingDelay*markingDelay=a->getMarkingDelay();markingDelay->link=NULL;markingDelay->unmarkedChildren=0;returnMakeNewArenaFreeList(a,arenaList->thingSize);}staticinlinevoidCheckGCFreeListLink(JSGCThing*thing){/* * The GC things on the free lists come from one arena and the things on * the free list are linked in ascending address order. */JS_ASSERT_IF(thing->link,JSGCArena::fromGCThing(thing)==JSGCArena::fromGCThing(thing->link));JS_ASSERT_IF(thing->link,thing<thing->link);}void*js_NewFinalizableGCThing(JSContext*cx,unsignedthingKind){JS_ASSERT(thingKind<FINALIZE_LIMIT);#ifdef JS_THREADSAFEJS_ASSERT(cx->thread);#endif/* Updates of metering counters here may not be thread-safe. */METER(cx->runtime->gcStats.arenaStats[thingKind].alloc++);JSGCThing**freeListp=JS_THREAD_DATA(cx)->gcFreeLists.finalizables+thingKind;JSGCThing*thing=*freeListp;if(thing){JS_ASSERT(!JS_THREAD_DATA(cx)->localRootStack);*freeListp=thing->link;cx->weakRoots.finalizableNewborns[thingKind]=thing;CheckGCFreeListLink(thing);METER(cx->runtime->gcStats.arenaStats[thingKind].localalloc++);returnthing;}/* * To avoid for the local roots on each GC allocation when the local roots * are not active we move the GC free lists from JSThreadData to lrs in * JS_EnterLocalRootScope(). This way with inactive local roots we only * check for non-null lrs only when we exhaust the free list. */JSLocalRootStack*lrs=JS_THREAD_DATA(cx)->localRootStack;for(;;){if(lrs){freeListp=lrs->gcFreeLists.finalizables+thingKind;thing=*freeListp;if(thing){*freeListp=thing->link;METER(cx->runtime->gcStats.arenaStats[thingKind].localalloc++);break;}}thing=RefillFinalizableFreeList(cx,thingKind);if(thing){/* * See comments in RefillFinalizableFreeList about a possibility * of *freeListp == thing. */JS_ASSERT(!*freeListp||*freeListp==thing);*freeListp=thing->link;break;}js_ReportOutOfMemory(cx);returnNULL;}CheckGCFreeListLink(thing);if(lrs){/* * If we're in a local root scope, don't set newborn[type] at all, to * avoid entraining garbage from it for an unbounded amount of time * on this context. A caller will leave the local root scope and pop * this reference, allowing thing to be GC'd if it has no other refs. * See JS_EnterLocalRootScope and related APIs. */if(js_PushLocalRoot(cx,lrs,(jsval)thing)<0){JS_ASSERT(thing->link==*freeListp);*freeListp=thing;returnNULL;}}else{/* * No local root scope, so we're stuck with the old, fragile model of * depending on a pigeon-hole newborn per type per context. */cx->weakRoots.finalizableNewborns[thingKind]=thing;}returnthing;}staticJSGCThing*TurnUsedArenaIntoDoubleList(JSGCArena*a){JSGCThing*head;JSGCThing**tailp=&head;jsuwordthing=a->toPageStart();jsbitmap*markBitmap=a->getMarkBitmap();jsbitmap*lastMarkWord=markBitmap+GC_MARK_BITMAP_WORDS-1;for(jsbitmap*m=markBitmap;m<=lastMarkWord;++m){JS_ASSERT(thing<a->toPageStart()+GC_ARENA_SIZE);JS_ASSERT((thing-a->toPageStart())%(JS_BITS_PER_WORD*sizeof(jsdouble))==0);jsbitmapbits=*m;if(bits==jsbitmap(-1)){thing+=JS_BITS_PER_WORD*sizeof(jsdouble);}else{/* * We have some zero bits. Turn corresponding cells into a list * unrolling the loop for better performance. */constunsignedunroll=4;constjsbitmapunrollMask=(jsbitmap(1)<<unroll)-1;JS_STATIC_ASSERT((JS_BITS_PER_WORD&unrollMask)==0);for(unsignedn=0;n!=JS_BITS_PER_WORD;n+=unroll){jsbitmapbitsChunk=bits&unrollMask;bits>>=unroll;if(bitsChunk==unrollMask){thing+=unroll*sizeof(jsdouble);}else{#define DO_BIT(bit) \ if (!(bitsChunk & (jsbitmap(1) << (bit)))) { \ JS_ASSERT(thing - a->toPageStart() <= \ (DOUBLES_PER_ARENA - 1) * sizeof(jsdouble));\ JSGCThing *t = reinterpret_cast<JSGCThing *>(thing); \ *tailp = t; \ tailp = &t->link; \ } \ thing += sizeof(jsdouble);DO_BIT(0);DO_BIT(1);DO_BIT(2);DO_BIT(3);#undef DO_BIT}}}}*tailp=NULL;returnhead;}staticJSGCThing*RefillDoubleFreeList(JSContext*cx){JS_ASSERT(!GetGCFreeLists(cx)->doubles);JSRuntime*rt=cx->runtime;JS_ASSERT(!rt->gcRunning);JS_LOCK_GC(rt);boolcanGC=!JS_ON_TRACE(cx)&&!JS_THREAD_DATA(cx)->waiveGCQuota;booldoGC=canGC&&IsGCThresholdReached(rt);JSGCArena*a;for(;;){if(doGC){js_GC(cx,GC_LAST_DITCH);METER(rt->gcStats.doubleArenaStats.retry++);canGC=false;/* See comments in RefillFinalizableFreeList. */JSGCThing*freeList=GetGCFreeLists(cx)->doubles;if(freeList){JS_UNLOCK_GC(rt);returnfreeList;}}/* * Loop until we find arena with some free doubles. We turn arenas * into free lists outside the lock to minimize contention between * threads. */while(!!(a=rt->gcDoubleArenaList.cursor)){rt->gcDoubleArenaList.cursor=a->getInfo()->prev;JS_UNLOCK_GC(rt);JSGCThing*list=TurnUsedArenaIntoDoubleList(a);if(list)returnlist;JS_LOCK_GC(rt);}a=NewGCArena(cx);if(a)break;if(!canGC){METER(rt->gcStats.doubleArenaStats.fail++);JS_UNLOCK_GC(rt);returnNULL;}doGC=true;}JSGCArenaInfo*ainfo=a->getInfo();ainfo->list=NULL;ainfo->freeList=NULL;ainfo->prev=rt->gcDoubleArenaList.head;rt->gcDoubleArenaList.head=a;JS_UNLOCK_GC(rt);ainfo->hasMarkedDoubles=false;returnMakeNewArenaFreeList(a,sizeof(jsdouble));}JSBooljs_NewDoubleInRootedValue(JSContext*cx,jsdoubled,jsval*vp){/* Updates of metering counters here are not thread-safe. */METER(cx->runtime->gcStats.doubleArenaStats.alloc++);JSGCThing**freeListp=&JS_THREAD_DATA(cx)->gcFreeLists.doubles;JSGCThing*thing=*freeListp;if(thing){METER(cx->runtime->gcStats.doubleArenaStats.localalloc++);JS_ASSERT(!JS_THREAD_DATA(cx)->localRootStack);CheckGCFreeListLink(thing);*freeListp=thing->link;jsdouble*dp=reinterpret_cast<jsdouble*>(thing);*dp=d;*vp=DOUBLE_TO_JSVAL(dp);returntrue;}JSLocalRootStack*lrs=JS_THREAD_DATA(cx)->localRootStack;for(;;){if(lrs){freeListp=&lrs->gcFreeLists.doubles;thing=*freeListp;if(thing){METER(cx->runtime->gcStats.doubleArenaStats.localalloc++);break;}}thing=RefillDoubleFreeList(cx);if(thing){JS_ASSERT(!*freeListp||*freeListp==thing);break;}if(!JS_ON_TRACE(cx)){/* Trace code handle this on its own. */js_ReportOutOfMemory(cx);METER(cx->runtime->gcStats.doubleArenaStats.fail++);}returnfalse;}CheckGCFreeListLink(thing);*freeListp=thing->link;jsdouble*dp=reinterpret_cast<jsdouble*>(thing);*dp=d;*vp=DOUBLE_TO_JSVAL(dp);return!lrs||js_PushLocalRoot(cx,lrs,*vp)>=0;}jsdouble*js_NewWeaklyRootedDouble(JSContext*cx,jsdoubled){jsvalv;if(!js_NewDoubleInRootedValue(cx,d,&v))returnNULL;jsdouble*dp=JSVAL_TO_DOUBLE(v);cx->weakRoots.newbornDouble=dp;returndp;}JSBooljs_LockGCThingRT(JSRuntime*rt,void*thing){if(!thing)returntrue;AutoLockGClock(rt);JSGCLockHashEntry*lhe=(JSGCLockHashEntry*)JS_DHashTableOperate(&rt->gcLocksHash,thing,JS_DHASH_ADD);boolok=!!lhe;if(ok){if(!lhe->thing){lhe->thing=thing;lhe->count=1;}else{JS_ASSERT(lhe->count>=1);lhe->count++;}METER(rt->gcStats.lock++);}returnok;}voidjs_UnlockGCThingRT(JSRuntime*rt,void*thing){if(!thing)return;AutoLockGClock(rt);JSGCLockHashEntry*lhe=(JSGCLockHashEntry*)JS_DHashTableOperate(&rt->gcLocksHash,thing,JS_DHASH_LOOKUP);if(JS_DHASH_ENTRY_IS_BUSY(lhe)){rt->gcPoke=true;if(--lhe->count==0)JS_DHashTableOperate(&rt->gcLocksHash,thing,JS_DHASH_REMOVE);METER(rt->gcStats.unlock++);}}JS_PUBLIC_API(void)JS_TraceChildren(JSTracer*trc,void*thing,uint32kind){switch(kind){caseJSTRACE_OBJECT:{/* If obj has no map, it must be a newborn. */JSObject*obj=(JSObject*)thing;if(!obj->map)break;obj->map->ops->trace(trc,obj);break;}caseJSTRACE_STRING:{JSString*str=(JSString*)thing;if(str->isDependent())JS_CALL_STRING_TRACER(trc,str->dependentBase(),"base");break;}#if JS_HAS_XML_SUPPORTcaseJSTRACE_XML:js_TraceXML(trc,(JSXML*)thing);break;#endif}}/* * When the native stack is low, the GC does not call JS_TraceChildren to mark * the reachable "children" of the thing. Rather the thing is put aside and * JS_TraceChildren is called later with more space on the C stack. * * To implement such delayed marking of the children with minimal overhead for * the normal case of sufficient native stack, the code uses two fields per * arena stored in JSGCMarkingDelay. The first field, JSGCMarkingDelay::link, * links all arenas with delayed things into a stack list with the pointer to * stack top in JSRuntime::gcUnmarkedArenaStackTop. DelayMarkingChildren adds * arenas to the stack as necessary while MarkDelayedChildren pops the arenas * from the stack until it empties. * * The second field, JSGCMarkingDelay::unmarkedChildren, is a bitmap that * tells for which things the GC should call JS_TraceChildren later. The * bitmap is a single word. As such it does not pinpoint the delayed things * in the arena but rather tells the intervals containing * ThingsPerUnmarkedBit(thingSize) things. Later the code in * MarkDelayedChildren discovers such intervals and calls JS_TraceChildren on * any marked thing in the interval. This implies that JS_TraceChildren can be * called many times for a single thing if the thing shares the same interval * with some delayed things. This should be fine as any GC graph * marking/traversing hooks must allow repeated calls during the same GC cycle. * In particular, xpcom cycle collector relies on this. * * Note that such repeated scanning may slow down the GC. In particular, it is * possible to construct an object graph where the GC calls JS_TraceChildren * ThingsPerUnmarkedBit(thingSize) for almost all things in the graph. We * tolerate this as the max value for ThingsPerUnmarkedBit(thingSize) is 4. * This is archived for JSObject on 32 bit system as it is exactly JSObject * that has the smallest size among the GC things that can be delayed. On 32 * bit CPU we have less than 128 objects per 4K GC arena so each bit in * unmarkedChildren covers 4 objects. */inlineunsignedThingsPerUnmarkedBit(unsignedthingSize){returnJS_HOWMANY(ThingsPerArena(thingSize),JS_BITS_PER_WORD);}staticvoidDelayMarkingChildren(JSRuntime*rt,void*thing){JS_ASSERT(IsMarkedGCThing(thing));METER(rt->gcStats.unmarked++);JSGCArena*a=JSGCArena::fromGCThing(thing);JSGCArenaInfo*ainfo=a->getInfo();JSGCMarkingDelay*markingDelay=a->getMarkingDelay();size_tthingArenaIndex=GCThingToArenaIndex(thing);size_tunmarkedBitIndex=thingArenaIndex/ThingsPerUnmarkedBit(ainfo->list->thingSize);JS_ASSERT(unmarkedBitIndex<JS_BITS_PER_WORD);jsuwordbit=jsuword(1)<<unmarkedBitIndex;if(markingDelay->unmarkedChildren!=0){JS_ASSERT(rt->gcUnmarkedArenaStackTop);if(markingDelay->unmarkedChildren&bit){/* bit already covers things with children to mark later. */return;}markingDelay->unmarkedChildren|=bit;}else{/* * The thing is the first thing with not yet marked children in the * whole arena, so push the arena on the stack of arenas with things * to be marked later unless the arena has already been pushed. We * detect that through checking prevUnmarked as the field is 0 * only for not yet pushed arenas. To ensure that * prevUnmarked != 0 * even when the stack contains one element, we make prevUnmarked * for the arena at the bottom to point to itself. * * See comments in MarkDelayedChildren. */markingDelay->unmarkedChildren=bit;if(!markingDelay->link){if(!rt->gcUnmarkedArenaStackTop){/* Stack was empty, mark the arena as the bottom element. */markingDelay->link=a;}else{JS_ASSERT(rt->gcUnmarkedArenaStackTop->getMarkingDelay()->link);markingDelay->link=rt->gcUnmarkedArenaStackTop;}rt->gcUnmarkedArenaStackTop=a;}JS_ASSERT(rt->gcUnmarkedArenaStackTop);}#ifdef DEBUGrt->gcMarkLaterCount+=ThingsPerUnmarkedBit(ainfo->list->thingSize);METER_UPDATE_MAX(rt->gcStats.maxunmarked,rt->gcMarkLaterCount);#endif}staticvoidMarkDelayedChildren(JSTracer*trc){JSRuntime*rt;JSGCArena*a,*aprev;unsignedthingSize,traceKind;unsignedthingsPerUnmarkedBit;unsignedunmarkedBitIndex,thingIndex,indexLimit,endIndex;rt=trc->context->runtime;a=rt->gcUnmarkedArenaStackTop;if(!a){JS_ASSERT(rt->gcMarkLaterCount==0);return;}for(;;){/* * The following assert verifies that the current arena belongs to the * unmarked stack, since DelayMarkingChildren ensures that even for * the stack's bottom, prevUnmarked != 0 but rather points to * itself. */JSGCArenaInfo*ainfo=a->getInfo();JSGCMarkingDelay*markingDelay=a->getMarkingDelay();JS_ASSERT(markingDelay->link);JS_ASSERT(rt->gcUnmarkedArenaStackTop->getMarkingDelay()->link);thingSize=ainfo->list->thingSize;traceKind=GetFinalizableArenaTraceKind(ainfo);indexLimit=ThingsPerArena(thingSize);thingsPerUnmarkedBit=ThingsPerUnmarkedBit(thingSize);/* * We cannot use do-while loop here as a->unmarkedChildren can be zero * before the loop as a leftover from the previous iterations. See * comments after the loop. */while(markingDelay->unmarkedChildren!=0){unmarkedBitIndex=JS_FLOOR_LOG2W(markingDelay->unmarkedChildren);markingDelay->unmarkedChildren&=~(jsuword(1)<<unmarkedBitIndex);#ifdef DEBUGJS_ASSERT(rt->gcMarkLaterCount>=thingsPerUnmarkedBit);rt->gcMarkLaterCount-=thingsPerUnmarkedBit;#endifthingIndex=unmarkedBitIndex*thingsPerUnmarkedBit;endIndex=thingIndex+thingsPerUnmarkedBit;/* * endIndex can go beyond the last allocated thing as the real * limit can be "inside" the bit. */if(endIndex>indexLimit)endIndex=indexLimit;uint8*thing=GCArenaIndexToThing(a,ainfo,thingIndex);uint8*end=GCArenaIndexToThing(a,ainfo,endIndex);do{JS_ASSERT(thing<end);if(IsMarkedGCThing(thing))JS_TraceChildren(trc,thing,traceKind);thing+=thingSize;}while(thing!=end);}/* * We finished tracing of all things in the the arena but we can only * pop it from the stack if the arena is the stack's top. * * When JS_TraceChildren from the above calls JS_CallTracer that in * turn on low C stack calls DelayMarkingChildren and the latter * pushes new arenas to the unmarked stack, we have to skip popping * of this arena until it becomes the top of the stack again. */if(a==rt->gcUnmarkedArenaStackTop){aprev=markingDelay->link;markingDelay->link=NULL;if(a==aprev){/* * prevUnmarked points to itself and we reached the bottom of * the stack. */break;}rt->gcUnmarkedArenaStackTop=a=aprev;}else{a=rt->gcUnmarkedArenaStackTop;}}JS_ASSERT(rt->gcUnmarkedArenaStackTop);JS_ASSERT(!rt->gcUnmarkedArenaStackTop->getMarkingDelay()->link);rt->gcUnmarkedArenaStackTop=NULL;JS_ASSERT(rt->gcMarkLaterCount==0);}voidjs_CallGCMarker(JSTracer*trc,void*thing,uint32kind){JSContext*cx;JSRuntime*rt;JS_ASSERT(thing);JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));JS_ASSERT(trc->debugPrinter||trc->debugPrintArg);if(!IS_GC_MARKING_TRACER(trc)){trc->callback(trc,thing,kind);gotoout;}cx=trc->context;rt=cx->runtime;JS_ASSERT(rt->gcMarkingTracer==trc);JS_ASSERT(rt->gcLevel>0);/* * Optimize for string and double as their size is known and their tracing * is not recursive. */switch(kind){caseJSTRACE_DOUBLE:{JSGCArenaInfo*ainfo=JSGCArenaInfo::fromGCThing(thing);JS_ASSERT(!ainfo->list);if(!ainfo->hasMarkedDoubles){ainfo->hasMarkedDoubles=true;JSGCArena::fromGCThing(thing)->clearMarkBitmap();}MarkIfUnmarkedGCThing(thing);gotoout;}caseJSTRACE_STRING:for(;;){if(JSString::isStatic(thing))gotoout;JS_ASSERT(kind==GetFinalizableThingTraceKind(thing));if(!MarkIfUnmarkedGCThing(thing))gotoout;if(!((JSString*)thing)->isDependent())gotoout;thing=((JSString*)thing)->dependentBase();}/* NOTREACHED */}JS_ASSERT(kind==GetFinalizableThingTraceKind(thing));if(!MarkIfUnmarkedGCThing(thing))gotoout;if(!cx->insideGCMarkCallback){/* * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always * uses the non-recursive code that otherwise would be called only on * a low C stack condition. */#ifdef JS_GC_ASSUME_LOW_C_STACK# define RECURSION_TOO_DEEP() JS_TRUE#elseintstackDummy;# define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))#endifif(RECURSION_TOO_DEEP())DelayMarkingChildren(rt,thing);elseJS_TraceChildren(trc,thing,kind);}else{/* * For API compatibility we allow for the callback to assume that * after it calls JS_MarkGCThing for the last time, the callback can * start to finalize its own objects that are only referenced by * unmarked GC things. * * Since we do not know which call from inside the callback is the * last, we ensure that children of all marked things are traced and * call MarkDelayedChildren(trc) after tracing the thing. * * As MarkDelayedChildren unconditionally invokes JS_TraceChildren * for the things with unmarked children, calling DelayMarkingChildren * is useless here. Hence we always trace thing's children even with a * low native stack. */cx->insideGCMarkCallback=false;JS_TraceChildren(trc,thing,kind);MarkDelayedChildren(trc);cx->insideGCMarkCallback=true;}out:#ifdef DEBUGtrc->debugPrinter=NULL;trc->debugPrintArg=NULL;#endifreturn;/* to avoid out: right_curl when DEBUG is not defined */}voidjs_CallValueTracerIfGCThing(JSTracer*trc,jsvalv){void*thing;uint32kind;if(JSVAL_IS_DOUBLE(v)||JSVAL_IS_STRING(v)){thing=JSVAL_TO_TRACEABLE(v);kind=JSVAL_TRACE_KIND(v);JS_ASSERT(kind==js_GetGCThingTraceKind(thing));}elseif(JSVAL_IS_OBJECT(v)&&v!=JSVAL_NULL){/* v can be an arbitrary GC thing reinterpreted as an object. */thing=JSVAL_TO_OBJECT(v);kind=js_GetGCThingTraceKind(thing);}else{return;}js_CallGCMarker(trc,thing,kind);}staticJSDHashOperatorgc_root_traversal(JSDHashTable*table,JSDHashEntryHdr*hdr,uint32num,void*arg){JSGCRootHashEntry*rhe=(JSGCRootHashEntry*)hdr;JSTracer*trc=(JSTracer*)arg;jsval*rp=(jsval*)rhe->root;jsvalv=*rp;/* Ignore null reference, scalar values, and static strings. */if(JSVAL_IS_TRACEABLE(v)){#ifdef DEBUGif(!JSString::isStatic(JSVAL_TO_GCTHING(v))){boolroot_points_to_gcArenaList=false;jsuwordthing=(jsuword)JSVAL_TO_GCTHING(v);JSRuntime*rt=trc->context->runtime;for(unsignedi=0;i!=FINALIZE_LIMIT;i++){JSGCArenaList*arenaList=&rt->gcArenaList[i];size_tthingSize=arenaList->thingSize;size_tlimit=ThingsPerArena(thingSize)*thingSize;for(JSGCArena*a=arenaList->head;a;a=a->getInfo()->prev){if(thing-a->toPageStart()<limit){root_points_to_gcArenaList=true;break;}}}if(!root_points_to_gcArenaList){for(JSGCArena*a=rt->gcDoubleArenaList.head;a;a=a->getInfo()->prev){if(thing-a->toPageStart()<DOUBLES_PER_ARENA*sizeof(jsdouble)){root_points_to_gcArenaList=true;break;}}}if(!root_points_to_gcArenaList&&rhe->name){fprintf(stderr,"JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n""invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n""The root's name is \"%s\".\n",rhe->name);}JS_ASSERT(root_points_to_gcArenaList);}#endifJS_SET_TRACING_NAME(trc,rhe->name?rhe->name:"root");js_CallValueTracerIfGCThing(trc,v);}returnJS_DHASH_NEXT;}staticJSDHashOperatorgc_lock_traversal(JSDHashTable*table,JSDHashEntryHdr*hdr,uint32num,void*arg){JSGCLockHashEntry*lhe=(JSGCLockHashEntry*)hdr;void*thing=(void*)lhe->thing;JSTracer*trc=(JSTracer*)arg;uint32traceKind;JS_ASSERT(lhe->count>=1);traceKind=js_GetGCThingTraceKind(thing);JS_CALL_TRACER(trc,thing,traceKind,"locked object");returnJS_DHASH_NEXT;}namespacejs{voidTraceObjectVector(JSTracer*trc,JSObject**vec,uint32len){for(uint32i=0;i<len;i++){if(JSObject*obj=vec[i]){JS_SET_TRACING_INDEX(trc,"vector",i);js_CallGCMarker(trc,obj,JSTRACE_OBJECT);}}}}voidjs_TraceStackFrame(JSTracer*trc,JSStackFrame*fp){uintNnslots,minargs,skip;if(fp->callobj)JS_CALL_OBJECT_TRACER(trc,fp->callobj,"call");if(fp->argsobj)JS_CALL_OBJECT_TRACER(trc,JSVAL_TO_OBJECT(fp->argsobj),"arguments");if(fp->script){js_TraceScript(trc,fp->script);/* fp->slots is null for watch pseudo-frames, see js_watch_set. */if(fp->slots){/* * Don't mark what has not been pushed yet, or what has been * popped already. */if(fp->regs&&fp->regs->sp){nslots=(uintN)(fp->regs->sp-fp->slots);JS_ASSERT(nslots>=fp->script->nfixed);}else{nslots=fp->script->nfixed;}TraceValues(trc,nslots,fp->slots,"slot");}}else{JS_ASSERT(!fp->slots);JS_ASSERT(!fp->regs);}/* Allow for primitive this parameter due to JSFUN_THISP_* flags. */JS_CALL_VALUE_TRACER(trc,fp->thisv,"this");if(fp->argv){JS_CALL_VALUE_TRACER(trc,fp->calleeValue(),"callee");nslots=fp->argc;skip=0;if(fp->fun){minargs=FUN_MINARGS(fp->fun);if(minargs>nslots)nslots=minargs;if(!FUN_INTERPRETED(fp->fun)){JS_ASSERT(!(fp->fun->flags&JSFUN_FAST_NATIVE));nslots+=fp->fun->u.n.extra;}if(fp->fun->flags&JSFRAME_ROOTED_ARGV)skip=2+fp->argc;}TraceValues(trc,2+nslots-skip,fp->argv-2+skip,"operand");}JS_CALL_VALUE_TRACER(trc,fp->rval,"rval");if(fp->scopeChain)JS_CALL_OBJECT_TRACER(trc,fp->scopeChain,"scope chain");}voidJSWeakRoots::mark(JSTracer*trc){#ifdef DEBUGconstchar*constnewbornNames[]={"newborn_object",/* FINALIZE_OBJECT */"newborn_iter",/* FINALIZE_ITER */"newborn_function",/* FINALIZE_FUNCTION */#if JS_HAS_XML_SUPPORT"newborn_xml",/* FINALIZE_XML */#endif"newborn_string",/* FINALIZE_STRING */"newborn_external_string0",/* FINALIZE_EXTERNAL_STRING0 */"newborn_external_string1",/* FINALIZE_EXTERNAL_STRING1 */"newborn_external_string2",/* FINALIZE_EXTERNAL_STRING2 */"newborn_external_string3",/* FINALIZE_EXTERNAL_STRING3 */"newborn_external_string4",/* FINALIZE_EXTERNAL_STRING4 */"newborn_external_string5",/* FINALIZE_EXTERNAL_STRING5 */"newborn_external_string6",/* FINALIZE_EXTERNAL_STRING6 */"newborn_external_string7",/* FINALIZE_EXTERNAL_STRING7 */};#endiffor(size_ti=0;i!=JS_ARRAY_LENGTH(finalizableNewborns);++i){void*newborn=finalizableNewborns[i];if(newborn){JS_CALL_TRACER(trc,newborn,GetFinalizableTraceKind(i),newbornNames[i]);}}if(newbornDouble)JS_CALL_DOUBLE_TRACER(trc,newbornDouble,"newborn_double");JS_CALL_VALUE_TRACER(trc,lastAtom,"lastAtom");JS_SET_TRACING_NAME(trc,"lastInternalResult");js_CallValueTracerIfGCThing(trc,lastInternalResult);}staticvoidinlineTraceFrameChain(JSTracer*trc,JSStackFrame*fp){do{js_TraceStackFrame(trc,fp);}while((fp=fp->down)!=NULL);}JS_REQUIRES_STACKJS_FRIEND_API(void)js_TraceContext(JSTracer*trc,JSContext*acx){JSStackHeader*sh;/* * Trace active and suspended callstacks. * * Since js_GetTopStackFrame needs to dereference cx->thread to check for * JIT frames, we check for non-null thread here and avoid null checks * there. See bug 471197. */#ifdef JS_THREADSAFEif(acx->thread)#endif{/* If |cx->fp|, the active callstack has newest (top) frame |cx->fp|. */JSStackFrame*fp=js_GetTopStackFrame(acx);if(fp){JS_ASSERT(!acx->activeCallStack()->isSuspended());TraceFrameChain(trc,fp);if(JSObject*o=acx->activeCallStack()->getInitialVarObj())JS_CALL_OBJECT_TRACER(trc,o,"variables");}/* Trace suspended frames. */CallStack*cur=acx->currentCallStack;CallStack*cs=fp?cur->getPrevious():cur;for(;cs;cs=cs->getPrevious()){TraceFrameChain(trc,cs->getSuspendedFrame());if(cs->getInitialVarObj())JS_CALL_OBJECT_TRACER(trc,cs->getInitialVarObj(),"var env");}}/* Mark other roots-by-definition in acx. */if(acx->globalObject&&!JS_HAS_OPTION(acx,JSOPTION_UNROOTED_GLOBAL))JS_CALL_OBJECT_TRACER(trc,acx->globalObject,"global object");acx->weakRoots.mark(trc);if(acx->throwing){JS_CALL_VALUE_TRACER(trc,acx->exception,"exception");}else{/* Avoid keeping GC-ed junk stored in JSContext.exception. */acx->exception=JSVAL_NULL;}for(sh=acx->stackHeaders;sh;sh=sh->down){METER(trc->context->runtime->gcStats.stackseg++);METER(trc->context->runtime->gcStats.segslots+=sh->nslots);TraceValues(trc,sh->nslots,JS_STACK_SEGMENT(sh),"stack");}for(js::AutoGCRooter*gcr=acx->autoGCRooters;gcr;gcr=gcr->down)gcr->trace(trc);if(acx->sharpObjectMap.depth>0)js_TraceSharpMap(trc,&acx->sharpObjectMap);js_TraceRegExpStatics(trc,acx);#ifdef JS_TRACERInterpState*state=acx->interpState;while(state){if(state->nativeVp)TraceValues(trc,state->nativeVpLen,state->nativeVp,"nativeVp");state=state->prev;}#endif}JS_REQUIRES_STACKvoidjs_TraceRuntime(JSTracer*trc,JSBoolallAtoms){JSRuntime*rt=trc->context->runtime;JSContext*iter,*acx;JS_DHashTableEnumerate(&rt->gcRootsHash,gc_root_traversal,trc);JS_DHashTableEnumerate(&rt->gcLocksHash,gc_lock_traversal,trc);js_TraceAtomState(trc,allAtoms);js_TraceRuntimeNumberState(trc);js_MarkTraps(trc);iter=NULL;while((acx=js_ContextIterator(rt,JS_TRUE,&iter))!=NULL)js_TraceContext(trc,acx);js_TraceThreads(rt,trc);if(rt->gcExtraRootsTraceOp)rt->gcExtraRootsTraceOp(trc,rt->gcExtraRootsData);#ifdef JS_TRACERfor(inti=0;i<JSBUILTIN_LIMIT;i++){if(rt->builtinFunctions[i])JS_CALL_OBJECT_TRACER(trc,rt->builtinFunctions[i],"builtin function");}#endif}voidjs_TriggerGC(JSContext*cx,JSBoolgcLocked){JSRuntime*rt=cx->runtime;#ifdef JS_THREADSAFEJS_ASSERT(cx->requestDepth>0);#endifJS_ASSERT(!rt->gcRunning);if(rt->gcIsNeeded)return;/* * Trigger the GC when it is safe to call an operation callback on any * thread. */rt->gcIsNeeded=JS_TRUE;js_TriggerAllOperationCallbacks(rt,gcLocked);}staticvoidProcessSetSlotRequest(JSContext*cx,JSSetSlotRequest*ssr){JSObject*obj=ssr->obj;JSObject*pobj=ssr->pobj;uint32slot=ssr->slot;while(pobj){pobj=js_GetWrappedObject(cx,pobj);if(pobj==obj){ssr->cycle=true;return;}pobj=JSVAL_TO_OBJECT(pobj->getSlot(slot));}pobj=ssr->pobj;if(slot==JSSLOT_PROTO){obj->setProto(pobj);}else{JS_ASSERT(slot==JSSLOT_PARENT);obj->setParent(pobj);}}voidjs_DestroyScriptsToGC(JSContext*cx,JSThreadData*data){JSScript**listp,*script;for(size_ti=0;i!=JS_ARRAY_LENGTH(data->scriptsToGC);++i){listp=&data->scriptsToGC[i];while((script=*listp)!=NULL){*listp=script->u.nextToGC;script->u.nextToGC=NULL;js_DestroyScript(cx,script);}}}inlinevoidFinalizeObject(JSContext*cx,JSObject*obj,unsignedthingKind){JS_ASSERT(thingKind==FINALIZE_OBJECT||thingKind==FINALIZE_ITER||thingKind==FINALIZE_FUNCTION);/* Cope with stillborn objects that have no map. */if(!obj->map)return;/* Finalize obj first, in case it needs map and slots. */JSClass*clasp=obj->getClass();if(clasp->finalize)clasp->finalize(cx,obj);#ifdef INCLUDE_MOZILLA_DTRACEif(JAVASCRIPT_OBJECT_FINALIZE_ENABLED())jsdtrace_object_finalize(obj);#endifif(JS_LIKELY(obj->isNative())){JSScope*scope=obj->scope();if(scope->isSharedEmpty())static_cast<JSEmptyScope*>(scope)->dropFromGC(cx);elsescope->destroy(cx);}if(obj->hasSlotsArray())obj->freeSlotsArray(cx);}inlinevoidFinalizeFunction(JSContext*cx,JSFunction*fun,unsignedthingKind){FinalizeObject(cx,FUN_OBJECT(fun),thingKind);}inlinevoidFinalizeHookedObject(JSContext*cx,JSObject*obj,unsignedthingKind){if(!obj->map)return;if(cx->debugHooks->objectHook){cx->debugHooks->objectHook(cx,obj,JS_FALSE,cx->debugHooks->objectHookData);}FinalizeObject(cx,obj,thingKind);}inlinevoidFinalizeHookedFunction(JSContext*cx,JSFunction*fun,unsignedthingKind){FinalizeHookedObject(cx,FUN_OBJECT(fun),thingKind);}#if JS_HAS_XML_SUPPORTinlinevoidFinalizeXML(JSContext*cx,JSXML*xml,unsignedthingKind){js_FinalizeXML(cx,xml);}#endifJS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT==8);staticJSStringFinalizeOpstr_finalizers[JS_EXTERNAL_STRING_LIMIT]={NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL};intNjs_ChangeExternalStringFinalizer(JSStringFinalizeOpoldop,JSStringFinalizeOpnewop){for(uintNi=0;i!=JS_ARRAY_LENGTH(str_finalizers);i++){if(str_finalizers[i]==oldop){str_finalizers[i]=newop;returnintN(i);}}return-1;}inlinevoidFinalizeString(JSContext*cx,JSString*str,unsignedthingKind){JS_ASSERT(FINALIZE_STRING==thingKind);JS_ASSERT(!JSString::isStatic(str));JS_RUNTIME_UNMETER(cx->runtime,liveStrings);if(str->isDependent()){JS_ASSERT(str->dependentBase());JS_RUNTIME_UNMETER(cx->runtime,liveDependentStrings);}else{/* * flatChars for stillborn string is null, but cx->free would checks * for a null pointer on its own. */cx->free(str->flatChars());}}inlinevoidFinalizeExternalString(JSContext*cx,JSString*str,unsignedthingKind){unsignedtype=thingKind-FINALIZE_EXTERNAL_STRING0;JS_ASSERT(type<JS_ARRAY_LENGTH(str_finalizers));JS_ASSERT(!JSString::isStatic(str));JS_ASSERT(!str->isDependent());JS_RUNTIME_UNMETER(cx->runtime,liveStrings);/* A stillborn string has null chars. */jschar*chars=str->flatChars();if(!chars)return;JSStringFinalizeOpfinalizer=str_finalizers[type];if(finalizer)finalizer(cx,str);}/* * This function is called from js_FinishAtomState to force the finalization * of the permanently interned strings when cx is not available. */voidjs_FinalizeStringRT(JSRuntime*rt,JSString*str){JS_RUNTIME_UNMETER(rt,liveStrings);JS_ASSERT(!JSString::isStatic(str));if(str->isDependent()){/* A dependent string can not be external and must be valid. */JS_ASSERT(JSGCArenaInfo::fromGCThing(str)->list->thingKind==FINALIZE_STRING);JS_ASSERT(str->dependentBase());JS_RUNTIME_UNMETER(rt,liveDependentStrings);}else{unsignedthingKind=JSGCArenaInfo::fromGCThing(str)->list->thingKind;JS_ASSERT(IsFinalizableStringKind(thingKind));/* A stillborn string has null chars, so is not valid. */jschar*chars=str->flatChars();if(!chars)return;if(thingKind==FINALIZE_STRING){rt->free(chars);}else{unsignedtype=thingKind-FINALIZE_EXTERNAL_STRING0;JS_ASSERT(type<JS_ARRAY_LENGTH(str_finalizers));JSStringFinalizeOpfinalizer=str_finalizers[type];if(finalizer){/* * Assume that the finalizer for the permanently interned * string knows how to deal with null context. */finalizer(NULL,str);}}}}template<typenameT,voidfinalizer(JSContext*cx,T*thing,unsignedthingKind)>staticvoidFinalizeArenaList(JSContext*cx,unsignedthingKind,GCArenaReleaser*releaser){JS_STATIC_ASSERT(!(sizeof(T)&GC_CELL_MASK));JSGCArenaList*arenaList=&cx->runtime->gcArenaList[thingKind];JS_ASSERT(sizeof(T)==arenaList->thingSize);JSGCArena**ap=&arenaList->head;JSGCArena*a=*ap;if(!a)return;#ifdef JS_GCMETERuint32nlivearenas=0,nkilledarenas=0,nthings=0;#endiffor(;;){JSGCArenaInfo*ainfo=a->getInfo();JS_ASSERT(ainfo->list==arenaList);JS_ASSERT(!a->getMarkingDelay()->link);JS_ASSERT(a->getMarkingDelay()->unmarkedChildren==0);JSGCThing*freeList=NULL;JSGCThing**tailp=&freeList;boolallClear=true;jsuwordthing=a->toPageStart();jsuwordthingsEnd=thing+GC_ARENA_SIZE/sizeof(T)*sizeof(T);jsuwordnextFree=reinterpret_cast<jsuword>(ainfo->freeList);if(!nextFree){nextFree=thingsEnd;}else{JS_ASSERT(thing<=nextFree);JS_ASSERT(nextFree<thingsEnd);}jsuwordgcCellIndex=0;jsbitmap*bitmap=a->getMarkBitmap();for(;;thing+=sizeof(T),gcCellIndex+=sizeof(T)>>GC_CELL_SHIFT){if(thing==nextFree){if(thing==thingsEnd)break;nextFree=reinterpret_cast<jsuword>(reinterpret_cast<JSGCThing*>(nextFree)->link);if(!nextFree){nextFree=thingsEnd;}else{JS_ASSERT(thing<nextFree);JS_ASSERT(nextFree<thingsEnd);}}elseif(JS_TEST_BIT(bitmap,gcCellIndex)){allClear=false;METER(nthings++);continue;}else{T*t=reinterpret_cast<T*>(thing);finalizer(cx,t,thingKind);#ifdef DEBUGmemset(t,JS_FREE_PATTERN,sizeof(T));#endif}JSGCThing*t=reinterpret_cast<JSGCThing*>(thing);*tailp=t;tailp=&t->link;}#ifdef DEBUG/* Check that the free list is consistent. */unsignednfree=0;if(freeList){JS_ASSERT(tailp!=&freeList);JSGCThing*t=freeList;for(;;){++nfree;if(&t->link==tailp)break;JS_ASSERT(t<t->link);t=t->link;}}#endifif(allClear){/* * Forget just assembled free list head for the arena and * add the arena itself to the destroy list. */JS_ASSERT(nfree==ThingsPerArena(sizeof(T)));*ap=ainfo->prev;releaser->release(cx->runtime,a);METER(nkilledarenas++);}else{JS_ASSERT(nfree<ThingsPerArena(sizeof(T)));a->clearMarkBitmap();*tailp=NULL;ainfo->freeList=freeList;ap=&ainfo->prev;METER(nlivearenas++);}if(!(a=*ap))break;}arenaList->cursor=arenaList->head;METER(UpdateArenaStats(&cx->runtime->gcStats.arenaStats[thingKind],nlivearenas,nkilledarenas,nthings));}#ifdef MOZ_GCTIMERstructGCTimer{uint64enter;uint64startMark;uint64startSweep;uint64sweepObjectEnd;uint64sweepStringEnd;uint64sweepDoubleEnd;uint64sweepDestroyEnd;uint64end;};voiddumpGCTimer(GCTimer*gcT,uint64firstEnter,boollastGC){staticFILE*gcFile;if(!gcFile){gcFile=fopen("gcTimer.dat","w");fprintf(gcFile," AppTime, Total, Mark, Sweep, FinObj, ");fprintf(gcFile,"FinStr, FinDbl, Destroy, newChunks, destoyChunks\n");}JS_ASSERT(gcFile);fprintf(gcFile,"%12.1f, %6.1f, %6.1f, %6.1f, %6.1f, %6.1f, %6.1f, %7.1f, ",(double)(gcT->enter-firstEnter)/1E6,(double)(gcT->end-gcT->enter)/1E6,(double)(gcT->startSweep-gcT->startMark)/1E6,(double)(gcT->sweepDestroyEnd-gcT->startSweep)/1E6,(double)(gcT->sweepObjectEnd-gcT->startSweep)/1E6,(double)(gcT->sweepStringEnd-gcT->sweepObjectEnd)/1E6,(double)(gcT->sweepDoubleEnd-gcT->sweepStringEnd)/1E6,(double)(gcT->sweepDestroyEnd-gcT->sweepDoubleEnd)/1E6);fprintf(gcFile,"%10d, %10d \n",newChunkCount,destroyChunkCount);fflush(gcFile);if(lastGC){fclose(gcFile);gcFile=NULL;}}# define GCTIMER_PARAM , GCTimer &gcTimer# define GCTIMER_ARG , gcTimer# define TIMESTAMP(x) (x = rdtsc())#else# define GCTIMER_PARAM# define GCTIMER_ARG# define TIMESTAMP(x) ((void) 0)#endifstaticvoidSweepDoubles(JSRuntime*rt,GCArenaReleaser*releaser){#ifdef JS_GCMETERuint32nlivearenas=0,nkilledarenas=0,nthings=0;#endifJSGCArena**ap=&rt->gcDoubleArenaList.head;while(JSGCArena*a=*ap){JSGCArenaInfo*ainfo=a->getInfo();if(!ainfo->hasMarkedDoubles){/* No marked double values in the arena. */*ap=ainfo->prev;releaser->release(rt,a);METER(nkilledarenas++);}else{#ifdef JS_GCMETERjsdouble*thing=reinterpret_cast<jsdouble*>(a->toPageStart());jsdouble*end=thing+DOUBLES_PER_ARENA;for(;thing!=end;++thing){if(IsMarkedGCThing(thing))METER(nthings++);}METER(nlivearenas++);#endifainfo->hasMarkedDoubles=false;ap=&ainfo->prev;}}METER(UpdateArenaStats(&rt->gcStats.doubleArenaStats,nlivearenas,nkilledarenas,nthings));rt->gcDoubleArenaList.cursor=rt->gcDoubleArenaList.head;}/* * Common cache invalidation and so forth that must be done before GC. Even if * GCUntilDone calls GC several times, this work only needs to be done once. */staticvoidPreGCCleanup(JSContext*cx,JSGCInvocationKindgckind){JSRuntime*rt=cx->runtime;/* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */rt->gcIsNeeded=JS_FALSE;/* Reset malloc counter. */rt->resetGCMallocBytes();#ifdef JS_DUMP_SCOPE_METERS{externvoidjs_DumpScopeMeters(JSRuntime*rt);js_DumpScopeMeters(rt);}#endif#ifdef JS_TRACERPurgeJITOracle();#endif/* * Reset the property cache's type id generator so we can compress ids. * Same for the protoHazardShape proxy-shape standing in for all object * prototypes having readonly or setter properties. */if(rt->shapeGen&SHAPE_OVERFLOW_BIT#ifdef JS_GC_ZEAL||rt->gcZeal>=1#endif){rt->gcRegenShapes=true;rt->gcRegenShapesScopeFlag^=JSScope::SHAPE_REGEN;rt->shapeGen=0;rt->protoHazardShape=0;}js_PurgeThreads(cx);{JSContext*iter=NULL;while(JSContext*acx=js_ContextIterator(rt,JS_TRUE,&iter))acx->purge();}#ifdef JS_TRACERif(gckind==GC_LAST_CONTEXT){/* Clear builtin functions, which are recreated on demand. */PodArrayZero(rt->builtinFunctions);}#endif/* The last ditch GC preserves weak roots. */if(!(gckind&GC_KEEP_ATOMS))JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);}/* * Perform mark-and-sweep GC. * * In a JS_THREADSAFE build, the calling thread must be rt->gcThread and each * other thread must be either outside all requests or blocked waiting for GC * to finish. Note that the caller does not hold rt->gcLock. */staticvoidGC(JSContext*cx,JSGCInvocationKindgckindGCTIMER_PARAM){JSRuntime*rt=cx->runtime;rt->gcNumber++;JS_ASSERT(!rt->gcUnmarkedArenaStackTop);JS_ASSERT(rt->gcMarkLaterCount==0);/* * Mark phase. */JSTracertrc;JS_TRACER_INIT(&trc,cx,NULL);rt->gcMarkingTracer=&trc;JS_ASSERT(IS_GC_MARKING_TRACER(&trc));for(JSGCArena*a=rt->gcDoubleArenaList.head;a;){JSGCArenaInfo*ainfo=a->getInfo();JS_ASSERT(!ainfo->hasMarkedDoubles);a=ainfo->prev;}{/* * Query rt->gcKeepAtoms only when we know that all other threads are * suspended, see bug 541790. */boolkeepAtoms=(gckind&GC_KEEP_ATOMS)||rt->gcKeepAtoms!=0;js_TraceRuntime(&trc,keepAtoms);js_MarkScriptFilenames(rt,keepAtoms);}/* * Mark children of things that caused too deep recursion during the above * tracing. */MarkDelayedChildren(&trc);JS_ASSERT(!cx->insideGCMarkCallback);if(rt->gcCallback){cx->insideGCMarkCallback=JS_TRUE;(void)rt->gcCallback(cx,JSGC_MARK_END);JS_ASSERT(cx->insideGCMarkCallback);cx->insideGCMarkCallback=JS_FALSE;}JS_ASSERT(rt->gcMarkLaterCount==0);rt->gcMarkingTracer=NULL;#ifdef JS_THREADSAFEcx->createDeallocatorTask();#endif/* * Sweep phase. * * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set * so that any attempt to allocate a GC-thing from a finalizer will fail, * rather than nest badly and leave the unmarked newborn to be swept. * * We first sweep atom state so we can use js_IsAboutToBeFinalized on * JSString or jsdouble held in a hashtable to check if the hashtable * entry can be freed. Note that even after the entry is freed, JSObject * finalizers can continue to access the corresponding jsdouble* and * JSString* assuming that they are unique. This works since the * atomization API must not be called during GC. */TIMESTAMP(gcTimer.startSweep);js_SweepAtomState(cx);/* Finalize watch points associated with unreachable objects. */js_SweepWatchPoints(cx);#ifdef DEBUG/* Save the pre-sweep count of scope-mapped properties. */rt->liveScopePropsPreSweep=rt->liveScopeProps;#endif/* * We finalize JSObject instances before JSString, double and other GC * things to ensure that object's finalizer can access them even if they * will be freed. * * To minimize the number of checks per each to be freed object and * function we use separated list finalizers when a debug hook is * installed. */js::GCArenaReleaserarenaReleaser;if(!cx->debugHooks->objectHook){FinalizeArenaList<JSObject,FinalizeObject>(cx,FINALIZE_ITER,&arenaReleaser);FinalizeArenaList<JSObject,FinalizeObject>(cx,FINALIZE_OBJECT,&arenaReleaser);FinalizeArenaList<JSFunction,FinalizeFunction>(cx,FINALIZE_FUNCTION,&arenaReleaser);}else{FinalizeArenaList<JSObject,FinalizeHookedObject>(cx,FINALIZE_ITER,&arenaReleaser);FinalizeArenaList<JSObject,FinalizeHookedObject>(cx,FINALIZE_OBJECT,&arenaReleaser);FinalizeArenaList<JSFunction,FinalizeHookedFunction>(cx,FINALIZE_FUNCTION,&arenaReleaser);}#if JS_HAS_XML_SUPPORTFinalizeArenaList<JSXML,FinalizeXML>(cx,FINALIZE_XML,&arenaReleaser);#endifTIMESTAMP(gcTimer.sweepObjectEnd);/* * We sweep the deflated cache before we finalize the strings so the * cache can safely use js_IsAboutToBeFinalized.. */rt->deflatedStringCache->sweep(cx);FinalizeArenaList<JSString,FinalizeString>(cx,FINALIZE_STRING,&arenaReleaser);for(unsignedi=FINALIZE_EXTERNAL_STRING0;i<=FINALIZE_EXTERNAL_STRING_LAST;++i){FinalizeArenaList<JSString,FinalizeExternalString>(cx,i,&arenaReleaser);}TIMESTAMP(gcTimer.sweepStringEnd);SweepDoubles(rt,&arenaReleaser);TIMESTAMP(gcTimer.sweepDoubleEnd);/* * Sweep the runtime's property tree after finalizing objects, in case any * had watchpoints referencing tree nodes. */js::SweepScopeProperties(cx);/* * Sweep script filenames after sweeping functions in the generic loop * above. In this way when a scripted function's finalizer destroys the * script and calls rt->destroyScriptHook, the hook can still access the * script's filename. See bug 323267. */js_SweepScriptFilenames(rt);/* * Destroy arenas after we finished the sweeping so finalizers can safely * use js_IsAboutToBeFinalized(). */arenaReleaser.freeArenas(rt);DestroyEmptyGCChunks(rt,false);TIMESTAMP(gcTimer.sweepDestroyEnd);#ifdef JS_THREADSAFEcx->submitDeallocatorTask();#endifif(rt->gcCallback)(void)rt->gcCallback(cx,JSGC_FINALIZE_END);#ifdef DEBUG_srcnotesize{externvoidDumpSrcNoteSizeHist();DumpSrcNoteSizeHist();printf("GC HEAP SIZE %lu\n",(unsignedlong)rt->gcBytes);}#endif#ifdef JS_SCOPE_DEPTH_METER{staticFILE*fp;if(!fp)fp=fopen("/tmp/scopedepth.stats","w");if(fp){JS_DumpBasicStats(&rt->protoLookupDepthStats,"proto-lookup depth",fp);JS_DumpBasicStats(&rt->scopeSearchDepthStats,"scope-search depth",fp);JS_DumpBasicStats(&rt->hostenvScopeDepthStats,"hostenv scope depth",fp);JS_DumpBasicStats(&rt->lexicalScopeDepthStats,"lexical scope depth",fp);putc('\n',fp);fflush(fp);}}#endif /* JS_SCOPE_DEPTH_METER */#ifdef JS_DUMP_LOOP_STATS{staticFILE*lsfp;if(!lsfp)lsfp=fopen("/tmp/loopstats","w");if(lsfp){JS_DumpBasicStats(&rt->loopStats,"loops",lsfp);fflush(lsfp);}}#endif /* JS_DUMP_LOOP_STATS */}/* * GC, repeatedly if necessary, until we think we have not created any new * garbage and no other threads are demanding more GC. */staticvoidGCUntilDone(JSContext*cx,JSGCInvocationKindgckindGCTIMER_PARAM){JS_ASSERT_NOT_ON_TRACE(cx);JSRuntime*rt=cx->runtime;boolfirstRun=true;do{rt->gcLevel=1;rt->gcPoke=JS_FALSE;AutoUnlockGCunlock(rt);if(firstRun){PreGCCleanup(cx,gckind);TIMESTAMP(gcTimer.startMark);firstRun=false;}GC(cx,gckindGCTIMER_ARG);// GC again if:// - another thread, not in a request, called js_GC// - js_GC was called recursively// - a finalizer called js_RemoveRoot or js_UnlockGCThingRT.}while(rt->gcLevel>1||rt->gcPoke);}/* * Call the GC callback, if any, to signal that GC is starting. Return false if * the callback vetoes GC. */staticboolFireGCBegin(JSContext*cx,JSGCInvocationKindgckind){JSRuntime*rt=cx->runtime;JSGCCallbackcallback=rt->gcCallback;/* * Let the API user decide to defer a GC if it wants to (unless this * is the last context). Invoke the callback regardless. Sample the * callback in case we are freely racing with a JS_SetGCCallback{,RT} on * another thread. */if(gckind!=GC_SET_SLOT_REQUEST&&callback){Conditionally<AutoUnlockGC>unlockIf(!!(gckind&GC_LOCK_HELD),rt);returncallback(cx,JSGC_BEGIN)||gckind==GC_LAST_CONTEXT;}returntrue;}/* * Call the GC callback, if any, to signal that GC is finished. If the callback * creates garbage and we should GC again, return false; otherwise return true. */staticboolFireGCEnd(JSContext*cx,JSGCInvocationKindgckind){JSRuntime*rt=cx->runtime;JSGCCallbackcallback=rt->gcCallback;/* * Execute JSGC_END callback outside the lock. Again, sample the callback * pointer in case it changes, since we are outside of the GC vs. requests * interlock mechanism here. */if(gckind!=GC_SET_SLOT_REQUEST&&callback){if(!(gckind&GC_KEEP_ATOMS)){(void)callback(cx,JSGC_END);/* * On shutdown, iterate until the JSGC_END callback stops creating * garbage. */if(gckind==GC_LAST_CONTEXT&&rt->gcPoke)returnfalse;}else{/* * We allow JSGC_END implementation to force a full GC or allocate * new GC things. Thus we must protect the weak roots from garbage * collection and overwrites. */AutoSaveWeakRootssave(cx);AutoKeepAtomskeep(rt);AutoUnlockGCunlock(rt);(void)callback(cx,JSGC_END);}}returntrue;}/* * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with * rt->gcLock already held, so the lock should be kept on return. */voidjs_GC(JSContext*cx,JSGCInvocationKindgckind){JSRuntime*rt;#ifdef JS_THREADSAFEsize_trequestDebit;#endifJS_ASSERT_IF(gckind==GC_LAST_DITCH,!JS_ON_TRACE(cx));rt=cx->runtime;#ifdef JS_THREADSAFE/* * We allow js_GC calls outside a request but the context must be bound * to the current thread. */JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));/* Avoid deadlock. */JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));#endif/* * Don't collect garbage if the runtime isn't up, and cx is not the last * context in the runtime. The last context must force a GC, and nothing * should suppress that final collection or there may be shutdown leaks, * or runtime bloat until the next context is created. */if(rt->state!=JSRTS_UP&&gckind!=GC_LAST_CONTEXT)return;#ifdef MOZ_GCTIMERstaticuint64firstEnter=rdtsc();GCTimergcTimer;memset(&gcTimer,0,sizeof(GCTimer));#endifTIMESTAMP(gcTimer.enter);restart_at_beginning:if(!FireGCBegin(cx,gckind)){/* * It's possible that we've looped back to this code from the 'goto * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and * that rt->gcLevel is now 0. Don't return without notifying! */if(rt->gcLevel==0&&(gckind&GC_LOCK_HELD))JS_NOTIFY_GC_DONE(rt);return;}/* Lock out other GC allocator and collector invocations. */if(!(gckind&GC_LOCK_HELD))JS_LOCK_GC(rt);METER(rt->gcStats.poke++);rt->gcPoke=JS_FALSE;#ifdef JS_THREADSAFE/* * Check if the GC is already running on this or another thread and * delegate the job to it. */if(rt->gcLevel>0){JS_ASSERT(rt->gcThread);/* Bump gcLevel to restart the current GC, so it finds new garbage. */rt->gcLevel++;METER_UPDATE_MAX(rt->gcStats.maxlevel,rt->gcLevel);/* * If the GC runs on another thread, temporarily suspend all requests * running on the current thread and wait until the GC is done. */if(rt->gcThread!=cx->thread){requestDebit=js_CountThreadRequests(cx);JS_ASSERT(requestDebit<=rt->requestCount);#ifdef JS_TRACERJS_ASSERT_IF(requestDebit==0,!JS_ON_TRACE(cx));#endifif(requestDebit!=0){#ifdef JS_TRACERif(JS_ON_TRACE(cx)){/* * Leave trace before we decrease rt->requestCount and * notify the GC. Otherwise the GC may start immediately * after we unlock while this thread is still on trace. */AutoUnlockGCunlock(rt);LeaveTrace(cx);}#endifrt->requestCount-=requestDebit;if(rt->requestCount==0)JS_NOTIFY_REQUEST_DONE(rt);/* * See comments before another call to js_ShareWaitingTitles * below. */cx->thread->gcWaiting=true;js_ShareWaitingTitles(cx);/* * Make sure that the GC from another thread respects * GC_KEEP_ATOMS. */Conditionally<AutoKeepAtoms>keepIf(!!(gckind&GC_KEEP_ATOMS),rt);/* * Check that we did not release the GC lock above and let the * GC to finish before we wait. */JS_ASSERT(rt->gcLevel>0);do{JS_AWAIT_GC_DONE(rt);}while(rt->gcLevel>0);cx->thread->gcWaiting=false;rt->requestCount+=requestDebit;}}if(!(gckind&GC_LOCK_HELD))JS_UNLOCK_GC(rt);return;}/* No other thread is in GC, so indicate that we're now in GC. */rt->gcLevel=1;rt->gcThread=cx->thread;/* * Notify all operation callbacks, which will give them a chance to * yield their current request. Contexts that are not currently * executing will perform their callback at some later point, * which then will be unnecessary, but harmless. */js_NudgeOtherContexts(cx);/* * Discount all the requests on the current thread from contributing * to rt->requestCount before we wait for all other requests to finish. * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on * rt->requestCount transitions to 0. */requestDebit=js_CountThreadRequests(cx);JS_ASSERT_IF(cx->requestDepth!=0,requestDebit>=1);JS_ASSERT(requestDebit<=rt->requestCount);if(requestDebit!=rt->requestCount){rt->requestCount-=requestDebit;/* * Share any title that is owned by the GC thread before we wait, to * avoid a deadlock with ClaimTitle. We also set the gcWaiting flag so * that ClaimTitle can claim the title ownership from the GC thread if * that function is called while the GC is waiting. */cx->thread->gcWaiting=true;js_ShareWaitingTitles(cx);do{JS_AWAIT_REQUEST_DONE(rt);}while(rt->requestCount>0);cx->thread->gcWaiting=false;rt->requestCount+=requestDebit;}#else /* !JS_THREADSAFE *//* Bump gcLevel and return rather than nest; the outer gc will restart. */rt->gcLevel++;METER_UPDATE_MAX(rt->gcStats.maxlevel,rt->gcLevel);if(rt->gcLevel>1)return;#endif /* !JS_THREADSAFE *//* * Set rt->gcRunning here within the GC lock, and after waiting for any * active requests to end, so that new requests that try to JS_AddRoot, * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for * rt->gcLevel to drop to zero, while request-less calls to the *Root* * APIs block in js_AddRoot or js_RemoveRoot (see above in this file), * waiting for GC to finish. */rt->gcRunning=JS_TRUE;if(gckind==GC_SET_SLOT_REQUEST){JSSetSlotRequest*ssr;while((ssr=rt->setSlotRequests)!=NULL){rt->setSlotRequests=ssr->next;AutoUnlockGCunlock(rt);ssr->next=NULL;ProcessSetSlotRequest(cx,ssr);}/* * We assume here that killing links to parent and prototype objects * does not create garbage (such objects typically are long-lived and * widely shared, e.g. global objects, Function.prototype, etc.). We * collect garbage only if a racing thread attempted GC and is waiting * for us to finish (gcLevel > 1) or if someone already poked us. */if(rt->gcLevel==1&&!rt->gcPoke&&!rt->gcIsNeeded)gotodone_running;rt->gcLevel=0;rt->gcPoke=JS_FALSE;rt->gcRunning=JS_FALSE;#ifdef JS_THREADSAFErt->gcThread=NULL;#endifgckind=GC_LOCK_HELD;gotorestart_at_beginning;}if(!JS_ON_TRACE(cx))GCUntilDone(cx,gckindGCTIMER_ARG);rt->setGCLastBytes(rt->gcBytes);done_running:rt->gcLevel=0;rt->gcRunning=rt->gcRegenShapes=false;#ifdef JS_THREADSAFErt->gcThread=NULL;JS_NOTIFY_GC_DONE(rt);/* * Unlock unless we have GC_LOCK_HELD which requires locked GC on return. */if(!(gckind&GC_LOCK_HELD))JS_UNLOCK_GC(rt);#endifif(!FireGCEnd(cx,gckind))gotorestart_at_beginning;TIMESTAMP(gcTimer.end);#ifdef MOZ_GCTIMERif(gcTimer.startMark>0)dumpGCTimer(&gcTimer,firstEnter,gckind==GC_LAST_CONTEXT);newChunkCount=0;destroyChunkCount=0;#endif}