Improve efficiency of (C++) heap allocations related to BytecodeEmitter::code.

This commit is contained in:
Fedor 2019-06-12 13:44:42 +03:00
parent b4340daf09
commit 706af1885a
2 changed files with 24 additions and 14 deletions

View File

@ -2260,12 +2260,14 @@ BytecodeEmitter::locationOfNameBoundInFunctionScope(JSAtom* name, EmitterScope*
bool bool
BytecodeEmitter::emitCheck(ptrdiff_t delta, ptrdiff_t* offset) BytecodeEmitter::emitCheck(ptrdiff_t delta, ptrdiff_t* offset)
{ {
*offset = code().length(); size_t oldLength = code().length();
*offset = ptrdiff_t(oldLength);
// Start it off moderately large to avoid repeated resizings early on. size_t newLength = oldLength + size_t(delta);
// ~98% of cases fit within 1024 bytes. if (MOZ_UNLIKELY(newLength > MaxBytecodeLength)) {
if (code().capacity() == 0 && !code().reserve(1024)) ReportAllocationOverflow(cx);
return false; return false;
}
if (!code().growBy(delta)) { if (!code().growBy(delta)) {
ReportOutOfMemory(cx); ReportOutOfMemory(cx);
@ -10697,17 +10699,19 @@ BytecodeEmitter::emitTreeInBranch(ParseNode* pn)
static bool static bool
AllocSrcNote(ExclusiveContext* cx, SrcNotesVector& notes, unsigned* index) AllocSrcNote(ExclusiveContext* cx, SrcNotesVector& notes, unsigned* index)
{ {
// Start it off moderately large to avoid repeated resizings early on. size_t oldLength = notes.length();
// ~99% of cases fit within 256 bytes.
if (notes.capacity() == 0 && !notes.reserve(256))
return false;
if (MOZ_UNLIKELY(oldLength + 1 > MaxSrcNotesLength)) {
ReportAllocationOverflow(cx);
return false;
}
if (!notes.growBy(1)) { if (!notes.growBy(1)) {
ReportOutOfMemory(cx); ReportOutOfMemory(cx);
return false; return false;
} }
*index = notes.length() - 1; *index = oldLength;
return true; return true;
} }
@ -10833,6 +10837,10 @@ BytecodeEmitter::setSrcNoteOffset(unsigned index, unsigned which, ptrdiff_t offs
/* Maybe this offset was already set to a four-byte value. */ /* Maybe this offset was already set to a four-byte value. */
if (!(*sn & SN_4BYTE_OFFSET_FLAG)) { if (!(*sn & SN_4BYTE_OFFSET_FLAG)) {
/* Insert three dummy bytes that will be overwritten shortly. */ /* Insert three dummy bytes that will be overwritten shortly. */
if (MOZ_UNLIKELY(notes.length() + 3 > MaxSrcNotesLength)) {
ReportAllocationOverflow(cx);
return false;
}
jssrcnote dummy = 0; jssrcnote dummy = 0;
if (!(sn = notes.insert(sn, dummy)) || if (!(sn = notes.insert(sn, dummy)) ||
!(sn = notes.insert(sn, dummy)) || !(sn = notes.insert(sn, dummy)) ||

View File

@ -109,10 +109,12 @@ struct CGYieldOffsetList {
void finish(YieldOffsetArray& array, uint32_t prologueLength); void finish(YieldOffsetArray& array, uint32_t prologueLength);
}; };
// Use zero inline elements because these go on the stack and affect how many static size_t MaxBytecodeLength = INT32_MAX;
// nested functions are possible. static size_t MaxSrcNotesLength = INT32_MAX;
typedef Vector<jsbytecode, 0> BytecodeVector;
typedef Vector<jssrcnote, 0> SrcNotesVector; // Have a few inline elements to avoid heap allocation for tiny sequences.
typedef Vector<jsbytecode, 256> BytecodeVector;
typedef Vector<jssrcnote, 64> SrcNotesVector;
// Linked list of jump instructions that need to be patched. The linked list is // Linked list of jump instructions that need to be patched. The linked list is
// stored in the bytes of the incomplete bytecode that will be patched, so no // stored in the bytes of the incomplete bytecode that will be patched, so no