2021-10-08 16:15:33 +08:00
|
|
|
/*
|
|
|
|
** $Id: lgc.c $
|
|
|
|
** Garbage Collector
|
|
|
|
** See Copyright Notice in lua.h
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define lgc_c
|
|
|
|
#define LUA_CORE
|
|
|
|
|
|
|
|
#include "lprefix.h"
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
|
|
|
|
#include "lua.h"
|
|
|
|
|
|
|
|
#include "ldebug.h"
|
|
|
|
#include "ldo.h"
|
|
|
|
#include "lfunc.h"
|
|
|
|
#include "lgc.h"
|
|
|
|
#include "lmem.h"
|
|
|
|
#include "lobject.h"
|
|
|
|
#include "lstate.h"
|
|
|
|
#include "lstring.h"
|
|
|
|
#include "ltable.h"
|
|
|
|
#include "ltm.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Maximum number of elements to sweep in each single step.
|
|
|
|
** (Large enough to dissipate fixed overheads but small enough
|
|
|
|
** to allow small steps for the collector.)
|
|
|
|
*/
|
|
|
|
#define GCSWEEPMAX 100
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Maximum number of finalizers to call in each single step.
|
|
|
|
*/
|
|
|
|
#define GCFINMAX 10
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Cost of calling one finalizer.
|
|
|
|
*/
|
|
|
|
#define GCFINALIZECOST 50
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** The equivalent, in bytes, of one unit of "work" (visiting a slot,
|
|
|
|
** sweeping an object, etc.)
|
|
|
|
*/
|
|
|
|
#define WORK2MEM sizeof(TValue)
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** macro to adjust 'pause': 'pause' is actually used like
|
|
|
|
** 'pause / PAUSEADJ' (value chosen by tests)
|
|
|
|
*/
|
|
|
|
#define PAUSEADJ 100
|
|
|
|
|
|
|
|
|
|
|
|
/* mask with all color bits */
|
|
|
|
#define maskcolors (bitmask(BLACKBIT) | WHITEBITS)
|
|
|
|
|
|
|
|
/* mask with all GC bits */
|
|
|
|
#define maskgcbits (maskcolors | AGEBITS)
|
|
|
|
|
|
|
|
|
|
|
|
/* macro to erase all color bits then set only the current white bit */
|
|
|
|
#define makewhite(g,x) \
|
|
|
|
(x->marked = cast_byte((x->marked & ~maskcolors) | luaC_white(g)))
|
|
|
|
|
|
|
|
/* make an object gray (neither white nor black) */
|
|
|
|
#define set2gray(x) resetbits(x->marked, maskcolors)
|
|
|
|
|
|
|
|
|
|
|
|
/* make an object black (coming from any color) */
|
|
|
|
#define set2black(x) \
|
|
|
|
(x->marked = cast_byte((x->marked & ~WHITEBITS) | bitmask(BLACKBIT)))
|
|
|
|
|
|
|
|
|
|
|
|
#define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x)))
|
|
|
|
|
|
|
|
#define keyiswhite(n) (keyiscollectable(n) && iswhite(gckey(n)))
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Protected access to objects in values
|
|
|
|
*/
|
|
|
|
#define gcvalueN(o) (iscollectable(o) ? gcvalue(o) : NULL)
|
|
|
|
|
|
|
|
|
|
|
|
#define markvalue(g,o) { checkliveness(g->mainthread,o); \
|
|
|
|
if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); }
|
|
|
|
|
|
|
|
#define markkey(g, n) { if keyiswhite(n) reallymarkobject(g,gckey(n)); }
|
|
|
|
|
|
|
|
#define markobject(g,t) { if (iswhite(t)) reallymarkobject(g, obj2gco(t)); }
|
|
|
|
|
|
|
|
/*
|
|
|
|
** mark an object that can be NULL (either because it is really optional,
|
|
|
|
** or it was stripped as debug info, or inside an uncompleted structure)
|
|
|
|
*/
|
|
|
|
#define markobjectN(g,t) { if (t) markobject(g,t); }
|
|
|
|
|
|
|
|
static void reallymarkobject (global_State *g, GCObject *o);
|
|
|
|
static lu_mem atomic (lua_State *L);
|
|
|
|
static void entersweep (lua_State *L);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Generic functions
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** one after last element in a hash array
|
|
|
|
*/
|
|
|
|
#define gnodelast(h) gnode(h, cast_sizet(sizenode(h)))
|
|
|
|
|
|
|
|
|
|
|
|
static GCObject **getgclist (GCObject *o) {
|
|
|
|
switch (o->tt) {
|
|
|
|
case LUA_VTABLE: return &gco2t(o)->gclist;
|
|
|
|
case LUA_VLCL: return &gco2lcl(o)->gclist;
|
|
|
|
case LUA_VCCL: return &gco2ccl(o)->gclist;
|
|
|
|
case LUA_VTHREAD: return &gco2th(o)->gclist;
|
|
|
|
case LUA_VPROTO: return &gco2p(o)->gclist;
|
|
|
|
case LUA_VUSERDATA: {
|
|
|
|
Udata *u = gco2u(o);
|
|
|
|
lua_assert(u->nuvalue > 0);
|
|
|
|
return &u->gclist;
|
|
|
|
}
|
|
|
|
default: lua_assert(0); return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Link a collectable object 'o' with a known type into the list 'p'.
|
|
|
|
** (Must be a macro to access the 'gclist' field in different types.)
|
|
|
|
*/
|
|
|
|
#define linkgclist(o,p) linkgclist_(obj2gco(o), &(o)->gclist, &(p))
|
|
|
|
|
|
|
|
static void linkgclist_ (GCObject *o, GCObject **pnext, GCObject **list) {
|
|
|
|
lua_assert(!isgray(o)); /* cannot be in a gray list */
|
|
|
|
*pnext = *list;
|
|
|
|
*list = o;
|
|
|
|
set2gray(o); /* now it is */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Link a generic collectable object 'o' into the list 'p'.
|
|
|
|
*/
|
|
|
|
#define linkobjgclist(o,p) linkgclist_(obj2gco(o), getgclist(o), &(p))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Clear keys for empty entries in tables. If entry is empty, mark its
|
|
|
|
** entry as dead. This allows the collection of the key, but keeps its
|
|
|
|
** entry in the table: its removal could break a chain and could break
|
|
|
|
** a table traversal. Other places never manipulate dead keys, because
|
|
|
|
** its associated empty value is enough to signal that the entry is
|
|
|
|
** logically empty.
|
|
|
|
*/
|
|
|
|
static void clearkey (Node *n) {
|
|
|
|
lua_assert(isempty(gval(n)));
|
|
|
|
if (keyiscollectable(n))
|
|
|
|
setdeadkey(n); /* unused key; remove it */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** tells whether a key or value can be cleared from a weak
|
|
|
|
** table. Non-collectable objects are never removed from weak
|
|
|
|
** tables. Strings behave as 'values', so are never removed too. for
|
|
|
|
** other objects: if really collected, cannot keep them; for objects
|
|
|
|
** being finalized, keep them in keys, but not in values
|
|
|
|
*/
|
|
|
|
static int iscleared (global_State *g, const GCObject *o) {
|
|
|
|
if (o == NULL) return 0; /* non-collectable value */
|
|
|
|
else if (novariant(o->tt) == LUA_TSTRING) {
|
|
|
|
markobject(g, o); /* strings are 'values', so are never weak */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else return iswhite(o);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Barrier that moves collector forward, that is, marks the white object
|
|
|
|
** 'v' being pointed by the black object 'o'. In the generational
|
|
|
|
** mode, 'v' must also become old, if 'o' is old; however, it cannot
|
|
|
|
** be changed directly to OLD, because it may still point to non-old
|
|
|
|
** objects. So, it is marked as OLD0. In the next cycle it will become
|
|
|
|
** OLD1, and in the next it will finally become OLD (regular old). By
|
|
|
|
** then, any object it points to will also be old. If called in the
|
|
|
|
** incremental sweep phase, it clears the black object to white (sweep
|
|
|
|
** it) to avoid other barrier calls for this same object. (That cannot
|
|
|
|
** be done is generational mode, as its sweep does not distinguish
|
|
|
|
** whites from deads.)
|
|
|
|
*/
|
|
|
|
void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
|
|
|
|
if (keepinvariant(g)) { /* must keep invariant? */
|
|
|
|
reallymarkobject(g, v); /* restore invariant */
|
|
|
|
if (isold(o)) {
|
|
|
|
lua_assert(!isold(v)); /* white object could not be old */
|
|
|
|
setage(v, G_OLD0); /* restore generational invariant */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { /* sweep phase */
|
|
|
|
lua_assert(issweepphase(g));
|
|
|
|
if (g->gckind == KGC_INC) /* incremental mode? */
|
|
|
|
makewhite(g, o); /* mark 'o' as white to avoid other barriers */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** barrier that moves collector backward, that is, mark the black object
|
|
|
|
** pointing to a white object as gray again.
|
|
|
|
*/
|
|
|
|
void luaC_barrierback_ (lua_State *L, GCObject *o) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lua_assert(isblack(o) && !isdead(g, o));
|
|
|
|
lua_assert((g->gckind == KGC_GEN) == (isold(o) && getage(o) != G_TOUCHED1));
|
|
|
|
if (getage(o) == G_TOUCHED2) /* already in gray list? */
|
|
|
|
set2gray(o); /* make it gray to become touched1 */
|
|
|
|
else /* link it in 'grayagain' and paint it gray */
|
|
|
|
linkobjgclist(o, g->grayagain);
|
|
|
|
if (isold(o)) /* generational mode? */
|
|
|
|
setage(o, G_TOUCHED1); /* touched in current cycle */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void luaC_fix (lua_State *L, GCObject *o) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lua_assert(g->allgc == o); /* object must be 1st in 'allgc' list! */
|
|
|
|
set2gray(o); /* they will be gray forever */
|
|
|
|
setage(o, G_OLD); /* and old forever */
|
|
|
|
g->allgc = o->next; /* remove object from 'allgc' list */
|
|
|
|
o->next = g->fixedgc; /* link it to 'fixedgc' list */
|
|
|
|
g->fixedgc = o;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** create a new collectable object (with given type and size) and link
|
|
|
|
** it to 'allgc' list.
|
|
|
|
*/
|
|
|
|
GCObject *luaC_newobj (lua_State *L, int tt, size_t sz) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
GCObject *o = cast(GCObject *, luaM_newobject(L, novariant(tt), sz));
|
|
|
|
o->marked = luaC_white(g);
|
|
|
|
o->tt = tt;
|
|
|
|
o->next = g->allgc;
|
|
|
|
g->allgc = o;
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Mark functions
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Mark an object. Userdata with no user values, strings, and closed
|
|
|
|
** upvalues are visited and turned black here. Open upvalues are
|
|
|
|
** already indirectly linked through their respective threads in the
|
|
|
|
** 'twups' list, so they don't go to the gray list; nevertheless, they
|
|
|
|
** are kept gray to avoid barriers, as their values will be revisited
|
|
|
|
** by the thread or by 'remarkupvals'. Other objects are added to the
|
|
|
|
** gray list to be visited (and turned black) later. Both userdata and
|
|
|
|
** upvalues can call this function recursively, but this recursion goes
|
|
|
|
** for at most two levels: An upvalue cannot refer to another upvalue
|
|
|
|
** (only closures can), and a userdata's metatable must be a table.
|
|
|
|
*/
|
|
|
|
static void reallymarkobject (global_State *g, GCObject *o) {
|
|
|
|
switch (o->tt) {
|
|
|
|
case LUA_VSHRSTR:
|
|
|
|
case LUA_VLNGSTR: {
|
|
|
|
set2black(o); /* nothing to visit */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VUPVAL: {
|
|
|
|
UpVal *uv = gco2upv(o);
|
|
|
|
if (upisopen(uv))
|
|
|
|
set2gray(uv); /* open upvalues are kept gray */
|
|
|
|
else
|
|
|
|
set2black(uv); /* closed upvalues are visited here */
|
|
|
|
markvalue(g, uv->v); /* mark its content */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VUSERDATA: {
|
|
|
|
Udata *u = gco2u(o);
|
|
|
|
if (u->nuvalue == 0) { /* no user values? */
|
|
|
|
markobjectN(g, u->metatable); /* mark its metatable */
|
|
|
|
set2black(u); /* nothing else to mark */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* else... */
|
|
|
|
} /* FALLTHROUGH */
|
|
|
|
case LUA_VLCL: case LUA_VCCL: case LUA_VTABLE:
|
|
|
|
case LUA_VTHREAD: case LUA_VPROTO: {
|
|
|
|
linkobjgclist(o, g->gray); /* to be visited later */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: lua_assert(0); break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** mark metamethods for basic types
|
|
|
|
*/
|
|
|
|
static void markmt (global_State *g) {
|
|
|
|
int i;
|
|
|
|
for (i=0; i < LUA_NUMTAGS; i++)
|
|
|
|
markobjectN(g, g->mt[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** mark all objects in list of being-finalized
|
|
|
|
*/
|
|
|
|
static lu_mem markbeingfnz (global_State *g) {
|
|
|
|
GCObject *o;
|
|
|
|
lu_mem count = 0;
|
|
|
|
for (o = g->tobefnz; o != NULL; o = o->next) {
|
|
|
|
count++;
|
|
|
|
markobject(g, o);
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** For each non-marked thread, simulates a barrier between each open
|
|
|
|
** upvalue and its value. (If the thread is collected, the value will be
|
|
|
|
** assigned to the upvalue, but then it can be too late for the barrier
|
|
|
|
** to act. The "barrier" does not need to check colors: A non-marked
|
|
|
|
** thread must be young; upvalues cannot be older than their threads; so
|
|
|
|
** any visited upvalue must be young too.) Also removes the thread from
|
|
|
|
** the list, as it was already visited. Removes also threads with no
|
|
|
|
** upvalues, as they have nothing to be checked. (If the thread gets an
|
|
|
|
** upvalue later, it will be linked in the list again.)
|
|
|
|
*/
|
|
|
|
static int remarkupvals (global_State *g) {
|
|
|
|
lua_State *thread;
|
|
|
|
lua_State **p = &g->twups;
|
|
|
|
int work = 0; /* estimate of how much work was done here */
|
|
|
|
while ((thread = *p) != NULL) {
|
|
|
|
work++;
|
|
|
|
if (!iswhite(thread) && thread->openupval != NULL)
|
|
|
|
p = &thread->twups; /* keep marked thread with upvalues in the list */
|
|
|
|
else { /* thread is not marked or without upvalues */
|
|
|
|
UpVal *uv;
|
|
|
|
lua_assert(!isold(thread) || thread->openupval == NULL);
|
|
|
|
*p = thread->twups; /* remove thread from the list */
|
|
|
|
thread->twups = thread; /* mark that it is out of list */
|
|
|
|
for (uv = thread->openupval; uv != NULL; uv = uv->u.open.next) {
|
|
|
|
lua_assert(getage(uv) <= getage(thread));
|
|
|
|
work++;
|
|
|
|
if (!iswhite(uv)) { /* upvalue already visited? */
|
|
|
|
lua_assert(upisopen(uv) && isgray(uv));
|
|
|
|
markvalue(g, uv->v); /* mark its value */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return work;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void cleargraylists (global_State *g) {
|
|
|
|
g->gray = g->grayagain = NULL;
|
|
|
|
g->weak = g->allweak = g->ephemeron = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** mark root set and reset all gray lists, to start a new collection
|
|
|
|
*/
|
|
|
|
static void restartcollection (global_State *g) {
|
|
|
|
cleargraylists(g);
|
|
|
|
markobject(g, g->mainthread);
|
|
|
|
markvalue(g, &g->l_registry);
|
|
|
|
markmt(g);
|
|
|
|
markbeingfnz(g); /* mark any finalizing object left from previous cycle */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Traverse functions
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Check whether object 'o' should be kept in the 'grayagain' list for
|
|
|
|
** post-processing by 'correctgraylist'. (It could put all old objects
|
|
|
|
** in the list and leave all the work to 'correctgraylist', but it is
|
|
|
|
** more efficient to avoid adding elements that will be removed.) Only
|
|
|
|
** TOUCHED1 objects need to be in the list. TOUCHED2 doesn't need to go
|
|
|
|
** back to a gray list, but then it must become OLD. (That is what
|
|
|
|
** 'correctgraylist' does when it finds a TOUCHED2 object.)
|
|
|
|
*/
|
|
|
|
static void genlink (global_State *g, GCObject *o) {
|
|
|
|
lua_assert(isblack(o));
|
|
|
|
if (getage(o) == G_TOUCHED1) { /* touched in this cycle? */
|
|
|
|
linkobjgclist(o, g->grayagain); /* link it back in 'grayagain' */
|
|
|
|
} /* everything else do not need to be linked back */
|
|
|
|
else if (getage(o) == G_TOUCHED2)
|
|
|
|
changeage(o, G_TOUCHED2, G_OLD); /* advance age */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse a table with weak values and link it to proper list. During
|
|
|
|
** propagate phase, keep it in 'grayagain' list, to be revisited in the
|
|
|
|
** atomic phase. In the atomic phase, if table has any white value,
|
|
|
|
** put it in 'weak' list, to be cleared.
|
|
|
|
*/
|
|
|
|
static void traverseweakvalue (global_State *g, Table *h) {
|
|
|
|
Node *n, *limit = gnodelast(h);
|
|
|
|
/* if there is array part, assume it may have white values (it is not
|
|
|
|
worth traversing it now just to check) */
|
|
|
|
int hasclears = (h->alimit > 0);
|
|
|
|
for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */
|
|
|
|
if (isempty(gval(n))) /* entry is empty? */
|
|
|
|
clearkey(n); /* clear its key */
|
|
|
|
else {
|
|
|
|
lua_assert(!keyisnil(n));
|
|
|
|
markkey(g, n);
|
|
|
|
if (!hasclears && iscleared(g, gcvalueN(gval(n)))) /* a white value? */
|
|
|
|
hasclears = 1; /* table will have to be cleared */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (g->gcstate == GCSatomic && hasclears)
|
|
|
|
linkgclist(h, g->weak); /* has to be cleared later */
|
|
|
|
else
|
|
|
|
linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse an ephemeron table and link it to proper list. Returns true
|
|
|
|
** iff any object was marked during this traversal (which implies that
|
|
|
|
** convergence has to continue). During propagation phase, keep table
|
|
|
|
** in 'grayagain' list, to be visited again in the atomic phase. In
|
|
|
|
** the atomic phase, if table has any white->white entry, it has to
|
|
|
|
** be revisited during ephemeron convergence (as that key may turn
|
|
|
|
** black). Otherwise, if it has any white key, table has to be cleared
|
|
|
|
** (in the atomic phase). In generational mode, some tables
|
|
|
|
** must be kept in some gray list for post-processing; this is done
|
|
|
|
** by 'genlink'.
|
|
|
|
*/
|
|
|
|
static int traverseephemeron (global_State *g, Table *h, int inv) {
|
|
|
|
int marked = 0; /* true if an object is marked in this traversal */
|
|
|
|
int hasclears = 0; /* true if table has white keys */
|
|
|
|
int hasww = 0; /* true if table has entry "white-key -> white-value" */
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int asize = luaH_realasize(h);
|
|
|
|
unsigned int nsize = sizenode(h);
|
|
|
|
/* traverse array part */
|
|
|
|
for (i = 0; i < asize; i++) {
|
|
|
|
if (valiswhite(&h->array[i])) {
|
|
|
|
marked = 1;
|
|
|
|
reallymarkobject(g, gcvalue(&h->array[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* traverse hash part; if 'inv', traverse descending
|
|
|
|
(see 'convergeephemerons') */
|
|
|
|
for (i = 0; i < nsize; i++) {
|
|
|
|
Node *n = inv ? gnode(h, nsize - 1 - i) : gnode(h, i);
|
|
|
|
if (isempty(gval(n))) /* entry is empty? */
|
|
|
|
clearkey(n); /* clear its key */
|
|
|
|
else if (iscleared(g, gckeyN(n))) { /* key is not marked (yet)? */
|
|
|
|
hasclears = 1; /* table must be cleared */
|
|
|
|
if (valiswhite(gval(n))) /* value not marked yet? */
|
|
|
|
hasww = 1; /* white-white entry */
|
|
|
|
}
|
|
|
|
else if (valiswhite(gval(n))) { /* value not marked yet? */
|
|
|
|
marked = 1;
|
|
|
|
reallymarkobject(g, gcvalue(gval(n))); /* mark it now */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* link table into proper list */
|
|
|
|
if (g->gcstate == GCSpropagate)
|
|
|
|
linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */
|
|
|
|
else if (hasww) /* table has white->white entries? */
|
|
|
|
linkgclist(h, g->ephemeron); /* have to propagate again */
|
|
|
|
else if (hasclears) /* table has white keys? */
|
|
|
|
linkgclist(h, g->allweak); /* may have to clean white keys */
|
|
|
|
else
|
|
|
|
genlink(g, obj2gco(h)); /* check whether collector still needs to see it */
|
|
|
|
return marked;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void traversestrongtable (global_State *g, Table *h) {
|
|
|
|
Node *n, *limit = gnodelast(h);
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int asize = luaH_realasize(h);
|
|
|
|
for (i = 0; i < asize; i++) /* traverse array part */
|
|
|
|
markvalue(g, &h->array[i]);
|
|
|
|
for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */
|
|
|
|
if (isempty(gval(n))) /* entry is empty? */
|
|
|
|
clearkey(n); /* clear its key */
|
|
|
|
else {
|
|
|
|
lua_assert(!keyisnil(n));
|
|
|
|
markkey(g, n);
|
|
|
|
markvalue(g, gval(n));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
genlink(g, obj2gco(h));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static lu_mem traversetable (global_State *g, Table *h) {
|
|
|
|
const char *weakkey, *weakvalue;
|
|
|
|
const TValue *mode = gfasttm(g, h->metatable, TM_MODE);
|
|
|
|
markobjectN(g, h->metatable);
|
|
|
|
if (mode && ttisstring(mode) && /* is there a weak mode? */
|
|
|
|
(cast_void(weakkey = strchr(svalue(mode), 'k')),
|
|
|
|
cast_void(weakvalue = strchr(svalue(mode), 'v')),
|
|
|
|
(weakkey || weakvalue))) { /* is really weak? */
|
|
|
|
if (!weakkey) /* strong keys? */
|
|
|
|
traverseweakvalue(g, h);
|
|
|
|
else if (!weakvalue) /* strong values? */
|
|
|
|
traverseephemeron(g, h, 0);
|
|
|
|
else /* all weak */
|
|
|
|
linkgclist(h, g->allweak); /* nothing to traverse now */
|
|
|
|
}
|
|
|
|
else /* not weak */
|
|
|
|
traversestrongtable(g, h);
|
|
|
|
return 1 + h->alimit + 2 * allocsizenode(h);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int traverseudata (global_State *g, Udata *u) {
|
|
|
|
int i;
|
|
|
|
markobjectN(g, u->metatable); /* mark its metatable */
|
|
|
|
for (i = 0; i < u->nuvalue; i++)
|
|
|
|
markvalue(g, &u->uv[i].uv);
|
|
|
|
genlink(g, obj2gco(u));
|
|
|
|
return 1 + u->nuvalue;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse a prototype. (While a prototype is being build, its
|
|
|
|
** arrays can be larger than needed; the extra slots are filled with
|
|
|
|
** NULL, so the use of 'markobjectN')
|
|
|
|
*/
|
|
|
|
static int traverseproto (global_State *g, Proto *f) {
|
|
|
|
int i;
|
|
|
|
markobjectN(g, f->source);
|
|
|
|
for (i = 0; i < f->sizek; i++) /* mark literals */
|
|
|
|
markvalue(g, &f->k[i]);
|
|
|
|
for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */
|
|
|
|
markobjectN(g, f->upvalues[i].name);
|
|
|
|
for (i = 0; i < f->sizep; i++) /* mark nested protos */
|
|
|
|
markobjectN(g, f->p[i]);
|
|
|
|
for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */
|
|
|
|
markobjectN(g, f->locvars[i].varname);
|
|
|
|
return 1 + f->sizek + f->sizeupvalues + f->sizep + f->sizelocvars;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int traverseCclosure (global_State *g, CClosure *cl) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */
|
|
|
|
markvalue(g, &cl->upvalue[i]);
|
|
|
|
return 1 + cl->nupvalues;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse a Lua closure, marking its prototype and its upvalues.
|
|
|
|
** (Both can be NULL while closure is being created.)
|
|
|
|
*/
|
|
|
|
static int traverseLclosure (global_State *g, LClosure *cl) {
|
|
|
|
int i;
|
|
|
|
markobjectN(g, cl->p); /* mark its prototype */
|
|
|
|
for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */
|
|
|
|
UpVal *uv = cl->upvals[i];
|
|
|
|
markobjectN(g, uv); /* mark upvalue */
|
|
|
|
}
|
|
|
|
return 1 + cl->nupvalues;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse a thread, marking the elements in the stack up to its top
|
|
|
|
** and cleaning the rest of the stack in the final traversal. That
|
|
|
|
** ensures that the entire stack have valid (non-dead) objects.
|
|
|
|
** Threads have no barriers. In gen. mode, old threads must be visited
|
|
|
|
** at every cycle, because they might point to young objects. In inc.
|
|
|
|
** mode, the thread can still be modified before the end of the cycle,
|
|
|
|
** and therefore it must be visited again in the atomic phase. To ensure
|
|
|
|
** these visits, threads must return to a gray list if they are not new
|
|
|
|
** (which can only happen in generational mode) or if the traverse is in
|
|
|
|
** the propagate phase (which can only happen in incremental mode).
|
|
|
|
*/
|
|
|
|
static int traversethread (global_State *g, lua_State *th) {
|
|
|
|
UpVal *uv;
|
|
|
|
StkId o = th->stack;
|
|
|
|
if (isold(th) || g->gcstate == GCSpropagate)
|
|
|
|
linkgclist(th, g->grayagain); /* insert into 'grayagain' list */
|
|
|
|
if (o == NULL)
|
|
|
|
return 1; /* stack not completely built yet */
|
|
|
|
lua_assert(g->gcstate == GCSatomic ||
|
|
|
|
th->openupval == NULL || isintwups(th));
|
|
|
|
for (; o < th->top; o++) /* mark live elements in the stack */
|
|
|
|
markvalue(g, s2v(o));
|
|
|
|
for (uv = th->openupval; uv != NULL; uv = uv->u.open.next)
|
|
|
|
markobject(g, uv); /* open upvalues cannot be collected */
|
|
|
|
if (g->gcstate == GCSatomic) { /* final traversal? */
|
|
|
|
for (; o < th->stack_last + EXTRA_STACK; o++)
|
|
|
|
setnilvalue(s2v(o)); /* clear dead stack slice */
|
|
|
|
/* 'remarkupvals' may have removed thread from 'twups' list */
|
|
|
|
if (!isintwups(th) && th->openupval != NULL) {
|
|
|
|
th->twups = g->twups; /* link it back to the list */
|
|
|
|
g->twups = th;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!g->gcemergency)
|
|
|
|
luaD_shrinkstack(th); /* do not change stack in emergency cycle */
|
|
|
|
return 1 + stacksize(th);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** traverse one gray object, turning it to black.
|
|
|
|
*/
|
|
|
|
static lu_mem propagatemark (global_State *g) {
|
|
|
|
GCObject *o = g->gray;
|
|
|
|
nw2black(o);
|
|
|
|
g->gray = *getgclist(o); /* remove from 'gray' list */
|
|
|
|
switch (o->tt) {
|
|
|
|
case LUA_VTABLE: return traversetable(g, gco2t(o));
|
|
|
|
case LUA_VUSERDATA: return traverseudata(g, gco2u(o));
|
|
|
|
case LUA_VLCL: return traverseLclosure(g, gco2lcl(o));
|
|
|
|
case LUA_VCCL: return traverseCclosure(g, gco2ccl(o));
|
|
|
|
case LUA_VPROTO: return traverseproto(g, gco2p(o));
|
|
|
|
case LUA_VTHREAD: return traversethread(g, gco2th(o));
|
|
|
|
default: lua_assert(0); return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static lu_mem propagateall (global_State *g) {
|
|
|
|
lu_mem tot = 0;
|
|
|
|
while (g->gray)
|
|
|
|
tot += propagatemark(g);
|
|
|
|
return tot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse all ephemeron tables propagating marks from keys to values.
|
|
|
|
** Repeat until it converges, that is, nothing new is marked. 'dir'
|
|
|
|
** inverts the direction of the traversals, trying to speed up
|
|
|
|
** convergence on chains in the same table.
|
|
|
|
**
|
|
|
|
*/
|
|
|
|
static void convergeephemerons (global_State *g) {
|
|
|
|
int changed;
|
|
|
|
int dir = 0;
|
|
|
|
do {
|
|
|
|
GCObject *w;
|
|
|
|
GCObject *next = g->ephemeron; /* get ephemeron list */
|
|
|
|
g->ephemeron = NULL; /* tables may return to this list when traversed */
|
|
|
|
changed = 0;
|
|
|
|
while ((w = next) != NULL) { /* for each ephemeron table */
|
|
|
|
Table *h = gco2t(w);
|
|
|
|
next = h->gclist; /* list is rebuilt during loop */
|
|
|
|
nw2black(h); /* out of the list (for now) */
|
|
|
|
if (traverseephemeron(g, h, dir)) { /* marked some value? */
|
|
|
|
propagateall(g); /* propagate changes */
|
|
|
|
changed = 1; /* will have to revisit all ephemeron tables */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dir = !dir; /* invert direction next time */
|
|
|
|
} while (changed); /* repeat until no more changes */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Sweep Functions
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** clear entries with unmarked keys from all weaktables in list 'l'
|
|
|
|
*/
|
|
|
|
static void clearbykeys (global_State *g, GCObject *l) {
|
|
|
|
for (; l; l = gco2t(l)->gclist) {
|
|
|
|
Table *h = gco2t(l);
|
|
|
|
Node *limit = gnodelast(h);
|
|
|
|
Node *n;
|
|
|
|
for (n = gnode(h, 0); n < limit; n++) {
|
|
|
|
if (iscleared(g, gckeyN(n))) /* unmarked key? */
|
|
|
|
setempty(gval(n)); /* remove entry */
|
|
|
|
if (isempty(gval(n))) /* is entry empty? */
|
|
|
|
clearkey(n); /* clear its key */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** clear entries with unmarked values from all weaktables in list 'l' up
|
|
|
|
** to element 'f'
|
|
|
|
*/
|
|
|
|
static void clearbyvalues (global_State *g, GCObject *l, GCObject *f) {
|
|
|
|
for (; l != f; l = gco2t(l)->gclist) {
|
|
|
|
Table *h = gco2t(l);
|
|
|
|
Node *n, *limit = gnodelast(h);
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int asize = luaH_realasize(h);
|
|
|
|
for (i = 0; i < asize; i++) {
|
|
|
|
TValue *o = &h->array[i];
|
|
|
|
if (iscleared(g, gcvalueN(o))) /* value was collected? */
|
|
|
|
setempty(o); /* remove entry */
|
|
|
|
}
|
|
|
|
for (n = gnode(h, 0); n < limit; n++) {
|
|
|
|
if (iscleared(g, gcvalueN(gval(n)))) /* unmarked value? */
|
|
|
|
setempty(gval(n)); /* remove entry */
|
|
|
|
if (isempty(gval(n))) /* is entry empty? */
|
|
|
|
clearkey(n); /* clear its key */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void freeupval (lua_State *L, UpVal *uv) {
|
|
|
|
if (upisopen(uv))
|
|
|
|
luaF_unlinkupval(uv);
|
|
|
|
luaM_free(L, uv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void freeobj (lua_State *L, GCObject *o) {
|
|
|
|
switch (o->tt) {
|
|
|
|
case LUA_VPROTO:
|
|
|
|
luaF_freeproto(L, gco2p(o));
|
|
|
|
break;
|
|
|
|
case LUA_VUPVAL:
|
|
|
|
freeupval(L, gco2upv(o));
|
|
|
|
break;
|
|
|
|
case LUA_VLCL: {
|
|
|
|
LClosure *cl = gco2lcl(o);
|
|
|
|
luaM_freemem(L, cl, sizeLclosure(cl->nupvalues));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VCCL: {
|
|
|
|
CClosure *cl = gco2ccl(o);
|
|
|
|
luaM_freemem(L, cl, sizeCclosure(cl->nupvalues));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VTABLE:
|
|
|
|
luaH_free(L, gco2t(o));
|
|
|
|
break;
|
|
|
|
case LUA_VTHREAD:
|
|
|
|
luaE_freethread(L, gco2th(o));
|
|
|
|
break;
|
|
|
|
case LUA_VUSERDATA: {
|
|
|
|
Udata *u = gco2u(o);
|
|
|
|
luaM_freemem(L, o, sizeudata(u->nuvalue, u->len));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VSHRSTR: {
|
|
|
|
TString *ts = gco2ts(o);
|
|
|
|
luaS_remove(L, ts); /* remove it from hash table */
|
|
|
|
luaM_freemem(L, ts, sizelstring(ts->shrlen));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LUA_VLNGSTR: {
|
|
|
|
TString *ts = gco2ts(o);
|
|
|
|
luaM_freemem(L, ts, sizelstring(ts->u.lnglen));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: lua_assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** sweep at most 'countin' elements from a list of GCObjects erasing dead
|
|
|
|
** objects, where a dead object is one marked with the old (non current)
|
|
|
|
** white; change all non-dead objects back to white, preparing for next
|
|
|
|
** collection cycle. Return where to continue the traversal or NULL if
|
|
|
|
** list is finished. ('*countout' gets the number of elements traversed.)
|
|
|
|
*/
|
|
|
|
static GCObject **sweeplist (lua_State *L, GCObject **p, int countin,
|
|
|
|
int *countout) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
int ow = otherwhite(g);
|
|
|
|
int i;
|
|
|
|
int white = luaC_white(g); /* current white */
|
|
|
|
for (i = 0; *p != NULL && i < countin; i++) {
|
|
|
|
GCObject *curr = *p;
|
|
|
|
int marked = curr->marked;
|
|
|
|
if (isdeadm(ow, marked)) { /* is 'curr' dead? */
|
|
|
|
*p = curr->next; /* remove 'curr' from list */
|
|
|
|
freeobj(L, curr); /* erase 'curr' */
|
|
|
|
}
|
|
|
|
else { /* change mark to 'white' */
|
|
|
|
curr->marked = cast_byte((marked & ~maskgcbits) | white);
|
|
|
|
p = &curr->next; /* go to next element */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (countout)
|
|
|
|
*countout = i; /* number of elements traversed */
|
|
|
|
return (*p == NULL) ? NULL : p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** sweep a list until a live object (or end of list)
|
|
|
|
*/
|
|
|
|
static GCObject **sweeptolive (lua_State *L, GCObject **p) {
|
|
|
|
GCObject **old = p;
|
|
|
|
do {
|
|
|
|
p = sweeplist(L, p, 1, NULL);
|
|
|
|
} while (p == old);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Finalization
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
** If possible, shrink string table.
|
|
|
|
*/
|
|
|
|
static void checkSizes (lua_State *L, global_State *g) {
|
|
|
|
if (!g->gcemergency) {
|
|
|
|
if (g->strt.nuse < g->strt.size / 4) { /* string table too big? */
|
|
|
|
l_mem olddebt = g->GCdebt;
|
|
|
|
luaS_resize(L, g->strt.size / 2);
|
|
|
|
g->GCestimate += g->GCdebt - olddebt; /* correct estimate */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Get the next udata to be finalized from the 'tobefnz' list, and
|
|
|
|
** link it back into the 'allgc' list.
|
|
|
|
*/
|
|
|
|
static GCObject *udata2finalize (global_State *g) {
|
|
|
|
GCObject *o = g->tobefnz; /* get first element */
|
|
|
|
lua_assert(tofinalize(o));
|
|
|
|
g->tobefnz = o->next; /* remove it from 'tobefnz' list */
|
|
|
|
o->next = g->allgc; /* return it to 'allgc' list */
|
|
|
|
g->allgc = o;
|
|
|
|
resetbit(o->marked, FINALIZEDBIT); /* object is "normal" again */
|
|
|
|
if (issweepphase(g))
|
|
|
|
makewhite(g, o); /* "sweep" object */
|
|
|
|
else if (getage(o) == G_OLD1)
|
|
|
|
g->firstold1 = o; /* it is the first OLD1 object in the list */
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void dothecall (lua_State *L, void *ud) {
|
|
|
|
UNUSED(ud);
|
|
|
|
luaD_callnoyield(L, L->top - 2, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void GCTM (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
const TValue *tm;
|
|
|
|
TValue v;
|
|
|
|
lua_assert(!g->gcemergency);
|
|
|
|
setgcovalue(L, &v, udata2finalize(g));
|
|
|
|
tm = luaT_gettmbyobj(L, &v, TM_GC);
|
|
|
|
if (!notm(tm)) { /* is there a finalizer? */
|
|
|
|
int status;
|
|
|
|
lu_byte oldah = L->allowhook;
|
2022-02-17 18:06:19 +08:00
|
|
|
int oldgcstp = g->gcstp;
|
|
|
|
g->gcstp |= GCSTPGC; /* avoid GC steps */
|
2021-10-08 16:15:33 +08:00
|
|
|
L->allowhook = 0; /* stop debug hooks during GC metamethod */
|
|
|
|
setobj2s(L, L->top++, tm); /* push finalizer... */
|
|
|
|
setobj2s(L, L->top++, &v); /* ... and its argument */
|
|
|
|
L->ci->callstatus |= CIST_FIN; /* will run a finalizer */
|
|
|
|
status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0);
|
|
|
|
L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */
|
|
|
|
L->allowhook = oldah; /* restore hooks */
|
2022-02-17 18:06:19 +08:00
|
|
|
g->gcstp = oldgcstp; /* restore state */
|
2021-10-08 16:15:33 +08:00
|
|
|
if (l_unlikely(status != LUA_OK)) { /* error while running __gc? */
|
2022-02-17 18:06:19 +08:00
|
|
|
luaE_warnerror(L, "__gc");
|
2021-10-08 16:15:33 +08:00
|
|
|
L->top--; /* pops error object */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Call a few finalizers
|
|
|
|
*/
|
|
|
|
static int runafewfinalizers (lua_State *L, int n) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < n && g->tobefnz; i++)
|
|
|
|
GCTM(L); /* call one finalizer */
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** call all pending finalizers
|
|
|
|
*/
|
|
|
|
static void callallpendingfinalizers (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
while (g->tobefnz)
|
|
|
|
GCTM(L);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** find last 'next' field in list 'p' list (to add elements in its end)
|
|
|
|
*/
|
|
|
|
static GCObject **findlast (GCObject **p) {
|
|
|
|
while (*p != NULL)
|
|
|
|
p = &(*p)->next;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Move all unreachable objects (or 'all' objects) that need
|
|
|
|
** finalization from list 'finobj' to list 'tobefnz' (to be finalized).
|
|
|
|
** (Note that objects after 'finobjold1' cannot be white, so they
|
|
|
|
** don't need to be traversed. In incremental mode, 'finobjold1' is NULL,
|
|
|
|
** so the whole list is traversed.)
|
|
|
|
*/
|
|
|
|
static void separatetobefnz (global_State *g, int all) {
|
|
|
|
GCObject *curr;
|
|
|
|
GCObject **p = &g->finobj;
|
|
|
|
GCObject **lastnext = findlast(&g->tobefnz);
|
|
|
|
while ((curr = *p) != g->finobjold1) { /* traverse all finalizable objects */
|
|
|
|
lua_assert(tofinalize(curr));
|
|
|
|
if (!(iswhite(curr) || all)) /* not being collected? */
|
|
|
|
p = &curr->next; /* don't bother with it */
|
|
|
|
else {
|
|
|
|
if (curr == g->finobjsur) /* removing 'finobjsur'? */
|
|
|
|
g->finobjsur = curr->next; /* correct it */
|
|
|
|
*p = curr->next; /* remove 'curr' from 'finobj' list */
|
|
|
|
curr->next = *lastnext; /* link at the end of 'tobefnz' list */
|
|
|
|
*lastnext = curr;
|
|
|
|
lastnext = &curr->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** If pointer 'p' points to 'o', move it to the next element.
|
|
|
|
*/
|
|
|
|
static void checkpointer (GCObject **p, GCObject *o) {
|
|
|
|
if (o == *p)
|
|
|
|
*p = o->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Correct pointers to objects inside 'allgc' list when
|
|
|
|
** object 'o' is being removed from the list.
|
|
|
|
*/
|
|
|
|
static void correctpointers (global_State *g, GCObject *o) {
|
|
|
|
checkpointer(&g->survival, o);
|
|
|
|
checkpointer(&g->old1, o);
|
|
|
|
checkpointer(&g->reallyold, o);
|
|
|
|
checkpointer(&g->firstold1, o);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** if object 'o' has a finalizer, remove it from 'allgc' list (must
|
|
|
|
** search the list to find it) and link it in 'finobj' list.
|
|
|
|
*/
|
|
|
|
void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
if (tofinalize(o) || /* obj. is already marked... */
|
2022-02-17 18:06:19 +08:00
|
|
|
gfasttm(g, mt, TM_GC) == NULL || /* or has no finalizer... */
|
|
|
|
(g->gcstp & GCSTPCLS)) /* or closing state? */
|
2021-10-08 16:15:33 +08:00
|
|
|
return; /* nothing to be done */
|
|
|
|
else { /* move 'o' to 'finobj' list */
|
|
|
|
GCObject **p;
|
|
|
|
if (issweepphase(g)) {
|
|
|
|
makewhite(g, o); /* "sweep" object 'o' */
|
|
|
|
if (g->sweepgc == &o->next) /* should not remove 'sweepgc' object */
|
|
|
|
g->sweepgc = sweeptolive(L, g->sweepgc); /* change 'sweepgc' */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
correctpointers(g, o);
|
|
|
|
/* search for pointer pointing to 'o' */
|
|
|
|
for (p = &g->allgc; *p != o; p = &(*p)->next) { /* empty */ }
|
|
|
|
*p = o->next; /* remove 'o' from 'allgc' list */
|
|
|
|
o->next = g->finobj; /* link it in 'finobj' list */
|
|
|
|
g->finobj = o;
|
|
|
|
l_setbit(o->marked, FINALIZEDBIT); /* mark it as such */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** Generational Collector
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void setpause (global_State *g);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Sweep a list of objects to enter generational mode. Deletes dead
|
|
|
|
** objects and turns the non dead to old. All non-dead threads---which
|
|
|
|
** are now old---must be in a gray list. Everything else is not in a
|
|
|
|
** gray list. Open upvalues are also kept gray.
|
|
|
|
*/
|
|
|
|
static void sweep2old (lua_State *L, GCObject **p) {
|
|
|
|
GCObject *curr;
|
|
|
|
global_State *g = G(L);
|
|
|
|
while ((curr = *p) != NULL) {
|
|
|
|
if (iswhite(curr)) { /* is 'curr' dead? */
|
|
|
|
lua_assert(isdead(g, curr));
|
|
|
|
*p = curr->next; /* remove 'curr' from list */
|
|
|
|
freeobj(L, curr); /* erase 'curr' */
|
|
|
|
}
|
|
|
|
else { /* all surviving objects become old */
|
|
|
|
setage(curr, G_OLD);
|
|
|
|
if (curr->tt == LUA_VTHREAD) { /* threads must be watched */
|
|
|
|
lua_State *th = gco2th(curr);
|
|
|
|
linkgclist(th, g->grayagain); /* insert into 'grayagain' list */
|
|
|
|
}
|
|
|
|
else if (curr->tt == LUA_VUPVAL && upisopen(gco2upv(curr)))
|
|
|
|
set2gray(curr); /* open upvalues are always gray */
|
|
|
|
else /* everything else is black */
|
|
|
|
nw2black(curr);
|
|
|
|
p = &curr->next; /* go to next element */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Sweep for generational mode. Delete dead objects. (Because the
|
|
|
|
** collection is not incremental, there are no "new white" objects
|
|
|
|
** during the sweep. So, any white object must be dead.) For
|
|
|
|
** non-dead objects, advance their ages and clear the color of
|
|
|
|
** new objects. (Old objects keep their colors.)
|
|
|
|
** The ages of G_TOUCHED1 and G_TOUCHED2 objects cannot be advanced
|
|
|
|
** here, because these old-generation objects are usually not swept
|
|
|
|
** here. They will all be advanced in 'correctgraylist'. That function
|
|
|
|
** will also remove objects turned white here from any gray list.
|
|
|
|
*/
|
|
|
|
static GCObject **sweepgen (lua_State *L, global_State *g, GCObject **p,
|
|
|
|
GCObject *limit, GCObject **pfirstold1) {
|
|
|
|
static const lu_byte nextage[] = {
|
|
|
|
G_SURVIVAL, /* from G_NEW */
|
|
|
|
G_OLD1, /* from G_SURVIVAL */
|
|
|
|
G_OLD1, /* from G_OLD0 */
|
|
|
|
G_OLD, /* from G_OLD1 */
|
|
|
|
G_OLD, /* from G_OLD (do not change) */
|
|
|
|
G_TOUCHED1, /* from G_TOUCHED1 (do not change) */
|
|
|
|
G_TOUCHED2 /* from G_TOUCHED2 (do not change) */
|
|
|
|
};
|
|
|
|
int white = luaC_white(g);
|
|
|
|
GCObject *curr;
|
|
|
|
while ((curr = *p) != limit) {
|
|
|
|
if (iswhite(curr)) { /* is 'curr' dead? */
|
|
|
|
lua_assert(!isold(curr) && isdead(g, curr));
|
|
|
|
*p = curr->next; /* remove 'curr' from list */
|
|
|
|
freeobj(L, curr); /* erase 'curr' */
|
|
|
|
}
|
|
|
|
else { /* correct mark and age */
|
|
|
|
if (getage(curr) == G_NEW) { /* new objects go back to white */
|
|
|
|
int marked = curr->marked & ~maskgcbits; /* erase GC bits */
|
|
|
|
curr->marked = cast_byte(marked | G_SURVIVAL | white);
|
|
|
|
}
|
|
|
|
else { /* all other objects will be old, and so keep their color */
|
|
|
|
setage(curr, nextage[getage(curr)]);
|
|
|
|
if (getage(curr) == G_OLD1 && *pfirstold1 == NULL)
|
|
|
|
*pfirstold1 = curr; /* first OLD1 object in the list */
|
|
|
|
}
|
|
|
|
p = &curr->next; /* go to next element */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Traverse a list making all its elements white and clearing their
|
|
|
|
** age. In incremental mode, all objects are 'new' all the time,
|
|
|
|
** except for fixed strings (which are always old).
|
|
|
|
*/
|
|
|
|
static void whitelist (global_State *g, GCObject *p) {
|
|
|
|
int white = luaC_white(g);
|
|
|
|
for (; p != NULL; p = p->next)
|
|
|
|
p->marked = cast_byte((p->marked & ~maskgcbits) | white);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Correct a list of gray objects. Return pointer to where rest of the
|
|
|
|
** list should be linked.
|
|
|
|
** Because this correction is done after sweeping, young objects might
|
|
|
|
** be turned white and still be in the list. They are only removed.
|
|
|
|
** 'TOUCHED1' objects are advanced to 'TOUCHED2' and remain on the list;
|
|
|
|
** Non-white threads also remain on the list; 'TOUCHED2' objects become
|
|
|
|
** regular old; they and anything else are removed from the list.
|
|
|
|
*/
|
|
|
|
static GCObject **correctgraylist (GCObject **p) {
|
|
|
|
GCObject *curr;
|
|
|
|
while ((curr = *p) != NULL) {
|
|
|
|
GCObject **next = getgclist(curr);
|
|
|
|
if (iswhite(curr))
|
|
|
|
goto remove; /* remove all white objects */
|
|
|
|
else if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */
|
|
|
|
lua_assert(isgray(curr));
|
|
|
|
nw2black(curr); /* make it black, for next barrier */
|
|
|
|
changeage(curr, G_TOUCHED1, G_TOUCHED2);
|
|
|
|
goto remain; /* keep it in the list and go to next element */
|
|
|
|
}
|
|
|
|
else if (curr->tt == LUA_VTHREAD) {
|
|
|
|
lua_assert(isgray(curr));
|
|
|
|
goto remain; /* keep non-white threads on the list */
|
|
|
|
}
|
|
|
|
else { /* everything else is removed */
|
|
|
|
lua_assert(isold(curr)); /* young objects should be white here */
|
|
|
|
if (getage(curr) == G_TOUCHED2) /* advance from TOUCHED2... */
|
|
|
|
changeage(curr, G_TOUCHED2, G_OLD); /* ... to OLD */
|
|
|
|
nw2black(curr); /* make object black (to be removed) */
|
|
|
|
goto remove;
|
|
|
|
}
|
|
|
|
remove: *p = *next; continue;
|
|
|
|
remain: p = next; continue;
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Correct all gray lists, coalescing them into 'grayagain'.
|
|
|
|
*/
|
|
|
|
static void correctgraylists (global_State *g) {
|
|
|
|
GCObject **list = correctgraylist(&g->grayagain);
|
|
|
|
*list = g->weak; g->weak = NULL;
|
|
|
|
list = correctgraylist(list);
|
|
|
|
*list = g->allweak; g->allweak = NULL;
|
|
|
|
list = correctgraylist(list);
|
|
|
|
*list = g->ephemeron; g->ephemeron = NULL;
|
|
|
|
correctgraylist(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Mark black 'OLD1' objects when starting a new young collection.
|
|
|
|
** Gray objects are already in some gray list, and so will be visited
|
|
|
|
** in the atomic step.
|
|
|
|
*/
|
|
|
|
static void markold (global_State *g, GCObject *from, GCObject *to) {
|
|
|
|
GCObject *p;
|
|
|
|
for (p = from; p != to; p = p->next) {
|
|
|
|
if (getage(p) == G_OLD1) {
|
|
|
|
lua_assert(!iswhite(p));
|
|
|
|
changeage(p, G_OLD1, G_OLD); /* now they are old */
|
|
|
|
if (isblack(p))
|
|
|
|
reallymarkobject(g, p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Finish a young-generation collection.
|
|
|
|
*/
|
|
|
|
static void finishgencycle (lua_State *L, global_State *g) {
|
|
|
|
correctgraylists(g);
|
|
|
|
checkSizes(L, g);
|
|
|
|
g->gcstate = GCSpropagate; /* skip restart */
|
|
|
|
if (!g->gcemergency)
|
|
|
|
callallpendingfinalizers(L);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Does a young collection. First, mark 'OLD1' objects. Then does the
|
|
|
|
** atomic step. Then, sweep all lists and advance pointers. Finally,
|
|
|
|
** finish the collection.
|
|
|
|
*/
|
|
|
|
static void youngcollection (lua_State *L, global_State *g) {
|
|
|
|
GCObject **psurvival; /* to point to first non-dead survival object */
|
|
|
|
GCObject *dummy; /* dummy out parameter to 'sweepgen' */
|
|
|
|
lua_assert(g->gcstate == GCSpropagate);
|
|
|
|
if (g->firstold1) { /* are there regular OLD1 objects? */
|
|
|
|
markold(g, g->firstold1, g->reallyold); /* mark them */
|
|
|
|
g->firstold1 = NULL; /* no more OLD1 objects (for now) */
|
|
|
|
}
|
|
|
|
markold(g, g->finobj, g->finobjrold);
|
|
|
|
markold(g, g->tobefnz, NULL);
|
|
|
|
atomic(L);
|
|
|
|
|
|
|
|
/* sweep nursery and get a pointer to its last live element */
|
|
|
|
g->gcstate = GCSswpallgc;
|
|
|
|
psurvival = sweepgen(L, g, &g->allgc, g->survival, &g->firstold1);
|
|
|
|
/* sweep 'survival' */
|
|
|
|
sweepgen(L, g, psurvival, g->old1, &g->firstold1);
|
|
|
|
g->reallyold = g->old1;
|
|
|
|
g->old1 = *psurvival; /* 'survival' survivals are old now */
|
|
|
|
g->survival = g->allgc; /* all news are survivals */
|
|
|
|
|
|
|
|
/* repeat for 'finobj' lists */
|
|
|
|
dummy = NULL; /* no 'firstold1' optimization for 'finobj' lists */
|
|
|
|
psurvival = sweepgen(L, g, &g->finobj, g->finobjsur, &dummy);
|
|
|
|
/* sweep 'survival' */
|
|
|
|
sweepgen(L, g, psurvival, g->finobjold1, &dummy);
|
|
|
|
g->finobjrold = g->finobjold1;
|
|
|
|
g->finobjold1 = *psurvival; /* 'survival' survivals are old now */
|
|
|
|
g->finobjsur = g->finobj; /* all news are survivals */
|
|
|
|
|
|
|
|
sweepgen(L, g, &g->tobefnz, NULL, &dummy);
|
|
|
|
finishgencycle(L, g);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Clears all gray lists, sweeps objects, and prepare sublists to enter
|
|
|
|
** generational mode. The sweeps remove dead objects and turn all
|
|
|
|
** surviving objects to old. Threads go back to 'grayagain'; everything
|
|
|
|
** else is turned black (not in any gray list).
|
|
|
|
*/
|
|
|
|
static void atomic2gen (lua_State *L, global_State *g) {
|
|
|
|
cleargraylists(g);
|
|
|
|
/* sweep all elements making them old */
|
|
|
|
g->gcstate = GCSswpallgc;
|
|
|
|
sweep2old(L, &g->allgc);
|
|
|
|
/* everything alive now is old */
|
|
|
|
g->reallyold = g->old1 = g->survival = g->allgc;
|
|
|
|
g->firstold1 = NULL; /* there are no OLD1 objects anywhere */
|
|
|
|
|
|
|
|
/* repeat for 'finobj' lists */
|
|
|
|
sweep2old(L, &g->finobj);
|
|
|
|
g->finobjrold = g->finobjold1 = g->finobjsur = g->finobj;
|
|
|
|
|
|
|
|
sweep2old(L, &g->tobefnz);
|
|
|
|
|
|
|
|
g->gckind = KGC_GEN;
|
|
|
|
g->lastatomic = 0;
|
|
|
|
g->GCestimate = gettotalbytes(g); /* base for memory control */
|
|
|
|
finishgencycle(L, g);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Enter generational mode. Must go until the end of an atomic cycle
|
|
|
|
** to ensure that all objects are correctly marked and weak tables
|
|
|
|
** are cleared. Then, turn all objects into old and finishes the
|
|
|
|
** collection.
|
|
|
|
*/
|
|
|
|
static lu_mem entergen (lua_State *L, global_State *g) {
|
|
|
|
lu_mem numobjs;
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpause)); /* prepare to start a new cycle */
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */
|
|
|
|
numobjs = atomic(L); /* propagates all and then do the atomic stuff */
|
|
|
|
atomic2gen(L, g);
|
|
|
|
return numobjs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Enter incremental mode. Turn all objects white, make all
|
|
|
|
** intermediate lists point to NULL (to avoid invalid pointers),
|
|
|
|
** and go to the pause state.
|
|
|
|
*/
|
|
|
|
static void enterinc (global_State *g) {
|
|
|
|
whitelist(g, g->allgc);
|
|
|
|
g->reallyold = g->old1 = g->survival = NULL;
|
|
|
|
whitelist(g, g->finobj);
|
|
|
|
whitelist(g, g->tobefnz);
|
|
|
|
g->finobjrold = g->finobjold1 = g->finobjsur = NULL;
|
|
|
|
g->gcstate = GCSpause;
|
|
|
|
g->gckind = KGC_INC;
|
|
|
|
g->lastatomic = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Change collector mode to 'newmode'.
|
|
|
|
*/
|
|
|
|
void luaC_changemode (lua_State *L, int newmode) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
if (newmode != g->gckind) {
|
|
|
|
if (newmode == KGC_GEN) /* entering generational mode? */
|
|
|
|
entergen(L, g);
|
|
|
|
else
|
|
|
|
enterinc(g); /* entering incremental mode */
|
|
|
|
}
|
|
|
|
g->lastatomic = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Does a full collection in generational mode.
|
|
|
|
*/
|
|
|
|
static lu_mem fullgen (lua_State *L, global_State *g) {
|
|
|
|
enterinc(g);
|
|
|
|
return entergen(L, g);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Set debt for the next minor collection, which will happen when
|
|
|
|
** memory grows 'genminormul'%.
|
|
|
|
*/
|
|
|
|
static void setminordebt (global_State *g) {
|
|
|
|
luaE_setdebt(g, -(cast(l_mem, (gettotalbytes(g) / 100)) * g->genminormul));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Does a major collection after last collection was a "bad collection".
|
|
|
|
**
|
|
|
|
** When the program is building a big structure, it allocates lots of
|
|
|
|
** memory but generates very little garbage. In those scenarios,
|
|
|
|
** the generational mode just wastes time doing small collections, and
|
|
|
|
** major collections are frequently what we call a "bad collection", a
|
|
|
|
** collection that frees too few objects. To avoid the cost of switching
|
|
|
|
** between generational mode and the incremental mode needed for full
|
|
|
|
** (major) collections, the collector tries to stay in incremental mode
|
|
|
|
** after a bad collection, and to switch back to generational mode only
|
|
|
|
** after a "good" collection (one that traverses less than 9/8 objects
|
|
|
|
** of the previous one).
|
|
|
|
** The collector must choose whether to stay in incremental mode or to
|
|
|
|
** switch back to generational mode before sweeping. At this point, it
|
|
|
|
** does not know the real memory in use, so it cannot use memory to
|
|
|
|
** decide whether to return to generational mode. Instead, it uses the
|
|
|
|
** number of objects traversed (returned by 'atomic') as a proxy. The
|
|
|
|
** field 'g->lastatomic' keeps this count from the last collection.
|
|
|
|
** ('g->lastatomic != 0' also means that the last collection was bad.)
|
|
|
|
*/
|
|
|
|
static void stepgenfull (lua_State *L, global_State *g) {
|
|
|
|
lu_mem newatomic; /* count of traversed objects */
|
|
|
|
lu_mem lastatomic = g->lastatomic; /* count from last collection */
|
|
|
|
if (g->gckind == KGC_GEN) /* still in generational mode? */
|
|
|
|
enterinc(g); /* enter incremental mode */
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */
|
|
|
|
newatomic = atomic(L); /* mark everybody */
|
|
|
|
if (newatomic < lastatomic + (lastatomic >> 3)) { /* good collection? */
|
|
|
|
atomic2gen(L, g); /* return to generational mode */
|
|
|
|
setminordebt(g);
|
|
|
|
}
|
|
|
|
else { /* another bad collection; stay in incremental mode */
|
|
|
|
g->GCestimate = gettotalbytes(g); /* first estimate */;
|
|
|
|
entersweep(L);
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */
|
|
|
|
setpause(g);
|
|
|
|
g->lastatomic = newatomic;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Does a generational "step".
|
|
|
|
** Usually, this means doing a minor collection and setting the debt to
|
|
|
|
** make another collection when memory grows 'genminormul'% larger.
|
|
|
|
**
|
|
|
|
** However, there are exceptions. If memory grows 'genmajormul'%
|
|
|
|
** larger than it was at the end of the last major collection (kept
|
|
|
|
** in 'g->GCestimate'), the function does a major collection. At the
|
|
|
|
** end, it checks whether the major collection was able to free a
|
|
|
|
** decent amount of memory (at least half the growth in memory since
|
|
|
|
** previous major collection). If so, the collector keeps its state,
|
|
|
|
** and the next collection will probably be minor again. Otherwise,
|
|
|
|
** we have what we call a "bad collection". In that case, set the field
|
|
|
|
** 'g->lastatomic' to signal that fact, so that the next collection will
|
|
|
|
** go to 'stepgenfull'.
|
|
|
|
**
|
|
|
|
** 'GCdebt <= 0' means an explicit call to GC step with "size" zero;
|
|
|
|
** in that case, do a minor collection.
|
|
|
|
*/
|
|
|
|
static void genstep (lua_State *L, global_State *g) {
|
|
|
|
if (g->lastatomic != 0) /* last collection was a bad one? */
|
|
|
|
stepgenfull(L, g); /* do a full step */
|
|
|
|
else {
|
|
|
|
lu_mem majorbase = g->GCestimate; /* memory after last major collection */
|
|
|
|
lu_mem majorinc = (majorbase / 100) * getgcparam(g->genmajormul);
|
|
|
|
if (g->GCdebt > 0 && gettotalbytes(g) > majorbase + majorinc) {
|
|
|
|
lu_mem numobjs = fullgen(L, g); /* do a major collection */
|
|
|
|
if (gettotalbytes(g) < majorbase + (majorinc / 2)) {
|
|
|
|
/* collected at least half of memory growth since last major
|
|
|
|
collection; keep doing minor collections */
|
|
|
|
setminordebt(g);
|
|
|
|
}
|
|
|
|
else { /* bad collection */
|
|
|
|
g->lastatomic = numobjs; /* signal that last collection was bad */
|
|
|
|
setpause(g); /* do a long wait for next (major) collection */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { /* regular case; do a minor collection */
|
|
|
|
youngcollection(L, g);
|
|
|
|
setminordebt(g);
|
|
|
|
g->GCestimate = majorbase; /* preserve base value */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lua_assert(isdecGCmodegen(g));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** {======================================================
|
|
|
|
** GC control
|
|
|
|
** =======================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Set the "time" to wait before starting a new GC cycle; cycle will
|
|
|
|
** start when memory use hits the threshold of ('estimate' * pause /
|
|
|
|
** PAUSEADJ). (Division by 'estimate' should be OK: it cannot be zero,
|
|
|
|
** because Lua cannot even start with less than PAUSEADJ bytes).
|
|
|
|
*/
|
|
|
|
static void setpause (global_State *g) {
|
|
|
|
l_mem threshold, debt;
|
|
|
|
int pause = getgcparam(g->gcpause);
|
|
|
|
l_mem estimate = g->GCestimate / PAUSEADJ; /* adjust 'estimate' */
|
|
|
|
lua_assert(estimate > 0);
|
|
|
|
threshold = (pause < MAX_LMEM / estimate) /* overflow? */
|
|
|
|
? estimate * pause /* no overflow */
|
|
|
|
: MAX_LMEM; /* overflow; truncate to maximum */
|
|
|
|
debt = gettotalbytes(g) - threshold;
|
|
|
|
if (debt > 0) debt = 0;
|
|
|
|
luaE_setdebt(g, debt);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Enter first sweep phase.
|
|
|
|
** The call to 'sweeptolive' makes the pointer point to an object
|
|
|
|
** inside the list (instead of to the header), so that the real sweep do
|
|
|
|
** not need to skip objects created between "now" and the start of the
|
|
|
|
** real sweep.
|
|
|
|
*/
|
|
|
|
static void entersweep (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
g->gcstate = GCSswpallgc;
|
|
|
|
lua_assert(g->sweepgc == NULL);
|
|
|
|
g->sweepgc = sweeptolive(L, &g->allgc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Delete all objects in list 'p' until (but not including) object
|
|
|
|
** 'limit'.
|
|
|
|
*/
|
|
|
|
static void deletelist (lua_State *L, GCObject *p, GCObject *limit) {
|
|
|
|
while (p != limit) {
|
|
|
|
GCObject *next = p->next;
|
|
|
|
freeobj(L, p);
|
|
|
|
p = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Call all finalizers of the objects in the given Lua state, and
|
|
|
|
** then free all objects, except for the main thread.
|
|
|
|
*/
|
|
|
|
void luaC_freeallobjects (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
2022-02-17 18:06:19 +08:00
|
|
|
g->gcstp = GCSTPCLS; /* no extra finalizers after here */
|
2021-10-08 16:15:33 +08:00
|
|
|
luaC_changemode(L, KGC_INC);
|
|
|
|
separatetobefnz(g, 1); /* separate all objects with finalizers */
|
|
|
|
lua_assert(g->finobj == NULL);
|
|
|
|
callallpendingfinalizers(L);
|
|
|
|
deletelist(L, g->allgc, obj2gco(g->mainthread));
|
2022-02-17 18:06:19 +08:00
|
|
|
lua_assert(g->finobj == NULL); /* no new finalizers */
|
2021-10-08 16:15:33 +08:00
|
|
|
deletelist(L, g->fixedgc, NULL); /* collect fixed objects */
|
|
|
|
lua_assert(g->strt.nuse == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static lu_mem atomic (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lu_mem work = 0;
|
|
|
|
GCObject *origweak, *origall;
|
|
|
|
GCObject *grayagain = g->grayagain; /* save original list */
|
|
|
|
g->grayagain = NULL;
|
|
|
|
lua_assert(g->ephemeron == NULL && g->weak == NULL);
|
|
|
|
lua_assert(!iswhite(g->mainthread));
|
|
|
|
g->gcstate = GCSatomic;
|
|
|
|
markobject(g, L); /* mark running thread */
|
|
|
|
/* registry and global metatables may be changed by API */
|
|
|
|
markvalue(g, &g->l_registry);
|
|
|
|
markmt(g); /* mark global metatables */
|
|
|
|
work += propagateall(g); /* empties 'gray' list */
|
|
|
|
/* remark occasional upvalues of (maybe) dead threads */
|
|
|
|
work += remarkupvals(g);
|
|
|
|
work += propagateall(g); /* propagate changes */
|
|
|
|
g->gray = grayagain;
|
|
|
|
work += propagateall(g); /* traverse 'grayagain' list */
|
|
|
|
convergeephemerons(g);
|
|
|
|
/* at this point, all strongly accessible objects are marked. */
|
|
|
|
/* Clear values from weak tables, before checking finalizers */
|
|
|
|
clearbyvalues(g, g->weak, NULL);
|
|
|
|
clearbyvalues(g, g->allweak, NULL);
|
|
|
|
origweak = g->weak; origall = g->allweak;
|
|
|
|
separatetobefnz(g, 0); /* separate objects to be finalized */
|
|
|
|
work += markbeingfnz(g); /* mark objects that will be finalized */
|
|
|
|
work += propagateall(g); /* remark, to propagate 'resurrection' */
|
|
|
|
convergeephemerons(g);
|
|
|
|
/* at this point, all resurrected objects are marked. */
|
|
|
|
/* remove dead objects from weak tables */
|
|
|
|
clearbykeys(g, g->ephemeron); /* clear keys from all ephemeron tables */
|
|
|
|
clearbykeys(g, g->allweak); /* clear keys from all 'allweak' tables */
|
|
|
|
/* clear values from resurrected weak tables */
|
|
|
|
clearbyvalues(g, g->weak, origweak);
|
|
|
|
clearbyvalues(g, g->allweak, origall);
|
|
|
|
luaS_clearcache(g);
|
|
|
|
g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */
|
|
|
|
lua_assert(g->gray == NULL);
|
|
|
|
return work; /* estimate of slots marked by 'atomic' */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int sweepstep (lua_State *L, global_State *g,
|
|
|
|
int nextstate, GCObject **nextlist) {
|
|
|
|
if (g->sweepgc) {
|
|
|
|
l_mem olddebt = g->GCdebt;
|
|
|
|
int count;
|
|
|
|
g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX, &count);
|
|
|
|
g->GCestimate += g->GCdebt - olddebt; /* update estimate */
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
else { /* enter next state */
|
|
|
|
g->gcstate = nextstate;
|
|
|
|
g->sweepgc = nextlist;
|
|
|
|
return 0; /* no work done */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static lu_mem singlestep (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lu_mem work;
|
|
|
|
lua_assert(!g->gcstopem); /* collector is not reentrant */
|
|
|
|
g->gcstopem = 1; /* no emergency collections while collecting */
|
|
|
|
switch (g->gcstate) {
|
|
|
|
case GCSpause: {
|
|
|
|
restartcollection(g);
|
|
|
|
g->gcstate = GCSpropagate;
|
|
|
|
work = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSpropagate: {
|
|
|
|
if (g->gray == NULL) { /* no more gray objects? */
|
|
|
|
g->gcstate = GCSenteratomic; /* finish propagate phase */
|
|
|
|
work = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
work = propagatemark(g); /* traverse one gray object */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSenteratomic: {
|
|
|
|
work = atomic(L); /* work is what was traversed by 'atomic' */
|
|
|
|
entersweep(L);
|
|
|
|
g->GCestimate = gettotalbytes(g); /* first estimate */;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSswpallgc: { /* sweep "regular" objects */
|
|
|
|
work = sweepstep(L, g, GCSswpfinobj, &g->finobj);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSswpfinobj: { /* sweep objects with finalizers */
|
|
|
|
work = sweepstep(L, g, GCSswptobefnz, &g->tobefnz);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSswptobefnz: { /* sweep objects to be finalized */
|
|
|
|
work = sweepstep(L, g, GCSswpend, NULL);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCSswpend: { /* finish sweeps */
|
|
|
|
checkSizes(L, g);
|
|
|
|
g->gcstate = GCScallfin;
|
|
|
|
work = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GCScallfin: { /* call remaining finalizers */
|
|
|
|
if (g->tobefnz && !g->gcemergency) {
|
|
|
|
g->gcstopem = 0; /* ok collections during finalizers */
|
|
|
|
work = runafewfinalizers(L, GCFINMAX) * GCFINALIZECOST;
|
|
|
|
}
|
|
|
|
else { /* emergency mode or no more finalizers */
|
|
|
|
g->gcstate = GCSpause; /* finish collection */
|
|
|
|
work = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: lua_assert(0); return 0;
|
|
|
|
}
|
|
|
|
g->gcstopem = 0;
|
|
|
|
return work;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** advances the garbage collector until it reaches a state allowed
|
|
|
|
** by 'statemask'
|
|
|
|
*/
|
|
|
|
void luaC_runtilstate (lua_State *L, int statesmask) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
while (!testbit(statesmask, g->gcstate))
|
|
|
|
singlestep(L);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-17 18:06:19 +08:00
|
|
|
|
2021-10-08 16:15:33 +08:00
|
|
|
/*
|
|
|
|
** Performs a basic incremental step. The debt and step size are
|
|
|
|
** converted from bytes to "units of work"; then the function loops
|
|
|
|
** running single steps until adding that many units of work or
|
|
|
|
** finishing a cycle (pause state). Finally, it sets the debt that
|
|
|
|
** controls when next step will be performed.
|
|
|
|
*/
|
|
|
|
static void incstep (lua_State *L, global_State *g) {
|
|
|
|
int stepmul = (getgcparam(g->gcstepmul) | 1); /* avoid division by 0 */
|
|
|
|
l_mem debt = (g->GCdebt / WORK2MEM) * stepmul;
|
|
|
|
l_mem stepsize = (g->gcstepsize <= log2maxs(l_mem))
|
|
|
|
? ((cast(l_mem, 1) << g->gcstepsize) / WORK2MEM) * stepmul
|
|
|
|
: MAX_LMEM; /* overflow; keep maximum value */
|
|
|
|
do { /* repeat until pause or enough "credit" (negative debt) */
|
|
|
|
lu_mem work = singlestep(L); /* perform one single step */
|
|
|
|
debt -= work;
|
|
|
|
} while (debt > -stepsize && g->gcstate != GCSpause);
|
|
|
|
if (g->gcstate == GCSpause)
|
|
|
|
setpause(g); /* pause until next cycle */
|
|
|
|
else {
|
|
|
|
debt = (debt / stepmul) * WORK2MEM; /* convert 'work units' to bytes */
|
|
|
|
luaE_setdebt(g, debt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** performs a basic GC step if collector is running
|
|
|
|
*/
|
|
|
|
void luaC_step (lua_State *L) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lua_assert(!g->gcemergency);
|
2022-02-17 18:06:19 +08:00
|
|
|
if (gcrunning(g)) { /* running? */
|
2021-10-08 16:15:33 +08:00
|
|
|
if(isdecGCmodegen(g))
|
|
|
|
genstep(L, g);
|
|
|
|
else
|
|
|
|
incstep(L, g);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Perform a full collection in incremental mode.
|
|
|
|
** Before running the collection, check 'keepinvariant'; if it is true,
|
|
|
|
** there may be some objects marked as black, so the collector has
|
|
|
|
** to sweep all objects to turn them back to white (as white has not
|
|
|
|
** changed, nothing will be collected).
|
|
|
|
*/
|
|
|
|
static void fullinc (lua_State *L, global_State *g) {
|
|
|
|
if (keepinvariant(g)) /* black objects? */
|
|
|
|
entersweep(L); /* sweep everything to turn them back to white */
|
|
|
|
/* finish any pending sweep phase to start a new cycle */
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpause));
|
|
|
|
luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */
|
|
|
|
/* estimate must be correct after a full GC cycle */
|
|
|
|
lua_assert(g->GCestimate == gettotalbytes(g));
|
|
|
|
luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */
|
|
|
|
setpause(g);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Performs a full GC cycle; if 'isemergency', set a flag to avoid
|
|
|
|
** some operations which could change the interpreter state in some
|
|
|
|
** unexpected ways (running finalizers and shrinking some structures).
|
|
|
|
*/
|
|
|
|
void luaC_fullgc (lua_State *L, int isemergency) {
|
|
|
|
global_State *g = G(L);
|
|
|
|
lua_assert(!g->gcemergency);
|
|
|
|
g->gcemergency = isemergency; /* set flag */
|
|
|
|
if (g->gckind == KGC_INC)
|
|
|
|
fullinc(L, g);
|
|
|
|
else
|
|
|
|
fullgen(L, g);
|
|
|
|
g->gcemergency = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* }====================================================== */
|
|
|
|
|
|
|
|
|