Commit 903b0c0b authored by oliver@apple.com's avatar oliver@apple.com

Harden JSC a bit with RELEASE_ASSERT

https://bugs.webkit.org/show_bug.cgi?id=107766

Reviewed by Mark Hahnenberg.

Went through and replaced a pile of ASSERTs that were covering
significantly important details (bounds checks, etc) where
having the checks did not impact release performance in any
measurable way.

* API/JSContextRef.cpp:
(JSContextCreateBacktrace):
* assembler/MacroAssembler.h:
(JSC::MacroAssembler::branchAdd32):
(JSC::MacroAssembler::branchMul32):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::handlerForBytecodeOffset):
(JSC::CodeBlock::lineNumberForBytecodeOffset):
(JSC::CodeBlock::bytecodeOffset):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::bytecodeOffsetForCallAtIndex):
(JSC::CodeBlock::bytecodeOffset):
(JSC::CodeBlock::exceptionHandler):
(JSC::CodeBlock::codeOrigin):
(JSC::CodeBlock::immediateSwitchJumpTable):
(JSC::CodeBlock::characterSwitchJumpTable):
(JSC::CodeBlock::stringSwitchJumpTable):
(JSC::CodeBlock::setIdentifiers):
(JSC::baselineCodeBlockForInlineCallFrame):
(JSC::ExecState::uncheckedR):
* bytecode/CodeOrigin.cpp:
(JSC::CodeOrigin::inlineStack):
* bytecode/CodeOrigin.h:
(JSC::CodeOrigin::CodeOrigin):
* dfg/DFGCSEPhase.cpp:
* dfg/DFGOSRExit.cpp:
* dfg/DFGScratchRegisterAllocator.h:
(JSC::DFG::ScratchRegisterAllocator::preserveUsedRegistersToScratchBuffer):
(JSC::DFG::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBuffer):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::allocate):
(JSC::DFG::SpeculativeJIT::spill):
(JSC::DFG::SpeculativeJIT::integerResult):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::fillInteger):
(JSC::DFG::SpeculativeJIT::fillDouble):
(JSC::DFG::SpeculativeJIT::fillJSValue):
(JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull):
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
(JSC::DFG::SpeculativeJIT::fillSpeculateIntStrict):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGValueSource.h:
(JSC::DFG::dataFormatToValueSourceKind):
(JSC::DFG::ValueSource::ValueSource):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
* heap/BlockAllocator.cpp:
(JSC::BlockAllocator::BlockAllocator):
(JSC::BlockAllocator::releaseFreeRegions):
(JSC::BlockAllocator::blockFreeingThreadMain):
* heap/Heap.cpp:
(JSC::Heap::lastChanceToFinalize):
(JSC::Heap::collect):
* interpreter/Interpreter.cpp:
(JSC::Interpreter::throwException):
(JSC::Interpreter::execute):
* jit/GCAwareJITStubRoutine.cpp:
(JSC::GCAwareJITStubRoutine::observeZeroRefCount):
* jit/JIT.cpp:
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
* jit/JITExceptions.cpp:
(JSC::genericThrow):
* jit/JITInlines.h:
(JSC::JIT::emitLoad):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_end):
(JSC::JIT::emit_resolve_operations):
* jit/JITStubRoutine.cpp:
(JSC::JITStubRoutine::observeZeroRefCount):
* jit/JITStubs.cpp:
(JSC::returnToThrowTrampoline):
* runtime/Arguments.cpp:
(JSC::Arguments::getOwnPropertySlot):
(JSC::Arguments::getOwnPropertyDescriptor):
(JSC::Arguments::deleteProperty):
(JSC::Arguments::defineOwnProperty):
(JSC::Arguments::didTearOffActivation):
* runtime/ArrayPrototype.cpp:
(JSC::shift):
(JSC::unshift):
(JSC::arrayProtoFuncLastIndexOf):
* runtime/ButterflyInlines.h:
(JSC::Butterfly::growPropertyStorage):
* runtime/CodeCache.cpp:
(JSC::CodeCache::getFunctionExecutableFromGlobalCode):
* runtime/CodeCache.h:
(JSC::CacheMap::add):
* runtime/Completion.cpp:
(JSC::checkSyntax):
(JSC::evaluate):
* runtime/Executable.cpp:
(JSC::FunctionExecutable::FunctionExecutable):
(JSC::EvalExecutable::unlinkCalls):
(JSC::ProgramExecutable::compileOptimized):
(JSC::ProgramExecutable::unlinkCalls):
(JSC::ProgramExecutable::initializeGlobalProperties):
(JSC::FunctionExecutable::baselineCodeBlockFor):
(JSC::FunctionExecutable::compileOptimizedForCall):
(JSC::FunctionExecutable::compileOptimizedForConstruct):
(JSC::FunctionExecutable::compileForCallInternal):
(JSC::FunctionExecutable::compileForConstructInternal):
(JSC::FunctionExecutable::unlinkCalls):
(JSC::NativeExecutable::hashFor):
* runtime/Executable.h:
(JSC::EvalExecutable::compile):
(JSC::ProgramExecutable::compile):
(JSC::FunctionExecutable::compileForCall):
(JSC::FunctionExecutable::compileForConstruct):
* runtime/IndexingHeader.h:
(JSC::IndexingHeader::setVectorLength):
* runtime/JSArray.cpp:
(JSC::JSArray::pop):
(JSC::JSArray::shiftCountWithArrayStorage):
(JSC::JSArray::shiftCountWithAnyIndexingType):
(JSC::JSArray::unshiftCountWithArrayStorage):
* runtime/JSGlobalObjectFunctions.cpp:
(JSC::jsStrDecimalLiteral):
* runtime/JSObject.cpp:
(JSC::JSObject::copyButterfly):
(JSC::JSObject::defineOwnIndexedProperty):
(JSC::JSObject::putByIndexBeyondVectorLengthWithoutAttributes):
* runtime/JSString.cpp:
(JSC::JSRopeString::getIndexSlowCase):
* yarr/YarrInterpreter.cpp:
(JSC::Yarr::Interpreter::popParenthesesDisjunctionContext):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@140619 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 1963ba09
......@@ -181,7 +181,7 @@ JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize)
}
}
while (true) {
ASSERT(callFrame);
RELEASE_ASSERT(callFrame);
int signedLineNumber;
intptr_t sourceID;
String urlString;
......
2013-01-23 Oliver Hunt <oliver@apple.com>
Harden JSC a bit with RELEASE_ASSERT
https://bugs.webkit.org/show_bug.cgi?id=107766
Reviewed by Mark Hahnenberg.
Went through and replaced a pile of ASSERTs that were covering
significantly important details (bounds checks, etc) where
having the checks did not impact release performance in any
measurable way.
* API/JSContextRef.cpp:
(JSContextCreateBacktrace):
* assembler/MacroAssembler.h:
(JSC::MacroAssembler::branchAdd32):
(JSC::MacroAssembler::branchMul32):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::handlerForBytecodeOffset):
(JSC::CodeBlock::lineNumberForBytecodeOffset):
(JSC::CodeBlock::bytecodeOffset):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::bytecodeOffsetForCallAtIndex):
(JSC::CodeBlock::bytecodeOffset):
(JSC::CodeBlock::exceptionHandler):
(JSC::CodeBlock::codeOrigin):
(JSC::CodeBlock::immediateSwitchJumpTable):
(JSC::CodeBlock::characterSwitchJumpTable):
(JSC::CodeBlock::stringSwitchJumpTable):
(JSC::CodeBlock::setIdentifiers):
(JSC::baselineCodeBlockForInlineCallFrame):
(JSC::ExecState::uncheckedR):
* bytecode/CodeOrigin.cpp:
(JSC::CodeOrigin::inlineStack):
* bytecode/CodeOrigin.h:
(JSC::CodeOrigin::CodeOrigin):
* dfg/DFGCSEPhase.cpp:
* dfg/DFGOSRExit.cpp:
* dfg/DFGScratchRegisterAllocator.h:
(JSC::DFG::ScratchRegisterAllocator::preserveUsedRegistersToScratchBuffer):
(JSC::DFG::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBuffer):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::allocate):
(JSC::DFG::SpeculativeJIT::spill):
(JSC::DFG::SpeculativeJIT::integerResult):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::fillInteger):
(JSC::DFG::SpeculativeJIT::fillDouble):
(JSC::DFG::SpeculativeJIT::fillJSValue):
(JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull):
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
(JSC::DFG::SpeculativeJIT::fillSpeculateIntStrict):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGValueSource.h:
(JSC::DFG::dataFormatToValueSourceKind):
(JSC::DFG::ValueSource::ValueSource):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
* heap/BlockAllocator.cpp:
(JSC::BlockAllocator::BlockAllocator):
(JSC::BlockAllocator::releaseFreeRegions):
(JSC::BlockAllocator::blockFreeingThreadMain):
* heap/Heap.cpp:
(JSC::Heap::lastChanceToFinalize):
(JSC::Heap::collect):
* interpreter/Interpreter.cpp:
(JSC::Interpreter::throwException):
(JSC::Interpreter::execute):
* jit/GCAwareJITStubRoutine.cpp:
(JSC::GCAwareJITStubRoutine::observeZeroRefCount):
* jit/JIT.cpp:
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
* jit/JITExceptions.cpp:
(JSC::genericThrow):
* jit/JITInlines.h:
(JSC::JIT::emitLoad):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_end):
(JSC::JIT::emit_resolve_operations):
* jit/JITStubRoutine.cpp:
(JSC::JITStubRoutine::observeZeroRefCount):
* jit/JITStubs.cpp:
(JSC::returnToThrowTrampoline):
* runtime/Arguments.cpp:
(JSC::Arguments::getOwnPropertySlot):
(JSC::Arguments::getOwnPropertyDescriptor):
(JSC::Arguments::deleteProperty):
(JSC::Arguments::defineOwnProperty):
(JSC::Arguments::didTearOffActivation):
* runtime/ArrayPrototype.cpp:
(JSC::shift):
(JSC::unshift):
(JSC::arrayProtoFuncLastIndexOf):
* runtime/ButterflyInlines.h:
(JSC::Butterfly::growPropertyStorage):
* runtime/CodeCache.cpp:
(JSC::CodeCache::getFunctionExecutableFromGlobalCode):
* runtime/CodeCache.h:
(JSC::CacheMap::add):
* runtime/Completion.cpp:
(JSC::checkSyntax):
(JSC::evaluate):
* runtime/Executable.cpp:
(JSC::FunctionExecutable::FunctionExecutable):
(JSC::EvalExecutable::unlinkCalls):
(JSC::ProgramExecutable::compileOptimized):
(JSC::ProgramExecutable::unlinkCalls):
(JSC::ProgramExecutable::initializeGlobalProperties):
(JSC::FunctionExecutable::baselineCodeBlockFor):
(JSC::FunctionExecutable::compileOptimizedForCall):
(JSC::FunctionExecutable::compileOptimizedForConstruct):
(JSC::FunctionExecutable::compileForCallInternal):
(JSC::FunctionExecutable::compileForConstructInternal):
(JSC::FunctionExecutable::unlinkCalls):
(JSC::NativeExecutable::hashFor):
* runtime/Executable.h:
(JSC::EvalExecutable::compile):
(JSC::ProgramExecutable::compile):
(JSC::FunctionExecutable::compileForCall):
(JSC::FunctionExecutable::compileForConstruct):
* runtime/IndexingHeader.h:
(JSC::IndexingHeader::setVectorLength):
* runtime/JSArray.cpp:
(JSC::JSArray::pop):
(JSC::JSArray::shiftCountWithArrayStorage):
(JSC::JSArray::shiftCountWithAnyIndexingType):
(JSC::JSArray::unshiftCountWithArrayStorage):
* runtime/JSGlobalObjectFunctions.cpp:
(JSC::jsStrDecimalLiteral):
* runtime/JSObject.cpp:
(JSC::JSObject::copyButterfly):
(JSC::JSObject::defineOwnIndexedProperty):
(JSC::JSObject::putByIndexBeyondVectorLengthWithoutAttributes):
* runtime/JSString.cpp:
(JSC::JSRopeString::getIndexSlowCase):
* yarr/YarrInterpreter.cpp:
(JSC::Yarr::Interpreter::popParenthesesDisjunctionContext):
2013-01-23 Filip Pizlo <fpizlo@apple.com>
Constant folding an access to an uncaptured variable that is captured later in the same basic block shouldn't lead to assertion failures
......
......@@ -1351,7 +1351,7 @@ public:
Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
{
if (src == dest)
RELEASE_ASSERT(scratchRegisterForBlinding());
ASSERT(scratchRegisterForBlinding());
if (shouldBlind(imm)) {
if (src == dest) {
......@@ -1369,7 +1369,7 @@ public:
Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
if (src == dest)
RELEASE_ASSERT(scratchRegisterForBlinding());
ASSERT(scratchRegisterForBlinding());
if (shouldBlind(imm)) {
if (src == dest) {
......
......@@ -1494,7 +1494,7 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
#if ENABLE(LLINT_C_LOOP)
default:
ASSERT(false); // We should never get here.
RELEASE_ASSERT_NOT_REACHED();
#endif
}
......@@ -2492,7 +2492,7 @@ void CodeBlock::dumpBytecodeComments()
HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < instructions().size());
RELEASE_ASSERT(bytecodeOffset < instructions().size());
if (!m_rareData)
return 0;
......@@ -2510,7 +2510,7 @@ HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < instructions().size());
RELEASE_ASSERT(bytecodeOffset < instructions().size());
return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
}
......@@ -2730,11 +2730,11 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
&& returnAddress.value() <= LLInt::getCodePtr(llint_end))
#endif
{
ASSERT(exec->codeBlock());
ASSERT(exec->codeBlock() == this);
ASSERT(JITCode::isBaselineCode(getJITType()));
RELEASE_ASSERT(exec->codeBlock());
RELEASE_ASSERT(exec->codeBlock() == this);
RELEASE_ASSERT(JITCode::isBaselineCode(getJITType()));
Instruction* instruction = exec->currentVPC();
ASSERT(instruction);
RELEASE_ASSERT(instruction);
instruction = adjustPCIfAtCallSite(instruction);
return bytecodeOffset(instruction);
......@@ -2753,7 +2753,7 @@ unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddre
CallReturnOffsetToBytecodeOffset* result =
binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
ASSERT(result->callReturnOffset == callReturnOffset);
RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
return result->bytecodeOffset;
}
......
......@@ -278,7 +278,7 @@ namespace JSC {
Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
if (!callIndices.size())
return 1;
ASSERT(index < m_rareData->m_callReturnIndexVector.size());
RELEASE_ASSERT(index < m_rareData->m_callReturnIndexVector.size());
return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
}
......@@ -457,7 +457,7 @@ namespace JSC {
unsigned bytecodeOffset(Instruction* returnAddress)
{
ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
......@@ -814,7 +814,7 @@ namespace JSC {
}
}
HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
......@@ -849,7 +849,7 @@ namespace JSC {
CodeOrigin codeOrigin(unsigned index)
{
ASSERT(m_rareData);
RELEASE_ASSERT(m_rareData);
return m_rareData->m_codeOrigins[index].codeOrigin;
}
......@@ -940,15 +940,15 @@ namespace JSC {
size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
......@@ -1158,7 +1158,7 @@ namespace JSC {
void setIdentifiers(const Vector<Identifier>& identifiers)
{
ASSERT(m_identifiers.isEmpty());
RELEASE_ASSERT(m_identifiers.isEmpty());
m_identifiers.appendVector(identifiers);
}
......@@ -1459,9 +1459,9 @@ namespace JSC {
inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
{
ASSERT(inlineCallFrame);
RELEASE_ASSERT(inlineCallFrame);
ExecutableBase* executable = inlineCallFrame->executable.get();
ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
}
......@@ -1495,7 +1495,7 @@ namespace JSC {
inline Register& ExecState::uncheckedR(int index)
{
ASSERT(index < FirstConstantRegisterIndex);
RELEASE_ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
......
......@@ -53,7 +53,7 @@ Vector<CodeOrigin> CodeOrigin::inlineStack() const
unsigned index = result.size() - 2;
for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
result[index--] = current->caller;
ASSERT(!result[0].inlineCallFrame);
RELEASE_ASSERT(!result[0].inlineCallFrame);
return result;
}
......
......@@ -65,8 +65,8 @@ struct CodeOrigin {
, valueProfileOffset(valueProfileOffset)
, inlineCallFrame(inlineCallFrame)
{
ASSERT(bytecodeIndex <= maximumBytecodeIndex);
ASSERT(valueProfileOffset < (1u << 3));
RELEASE_ASSERT(bytecodeIndex <= maximumBytecodeIndex);
RELEASE_ASSERT(valueProfileOffset < (1u << 3));
}
bool isSet() const { return bytecodeIndex != maximumBytecodeIndex; }
......
......@@ -30,6 +30,7 @@
#include "DFGGraph.h"
#include "DFGPhase.h"
#include "JSCellInlines.h"
#include <wtf/FastBitVector.h>
namespace JSC { namespace DFG {
......
......@@ -30,6 +30,7 @@
#include "DFGAssemblyHelpers.h"
#include "DFGSpeculativeJIT.h"
#include "JSCellInlines.h"
namespace JSC { namespace DFG {
......
......@@ -137,14 +137,14 @@ public:
if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
scratchGPR = GPRInfo::toRegister(i);
}
ASSERT(scratchGPR != InvalidGPRReg);
RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
if (m_usedRegisters.getFPRByIndex(i)) {
jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
}
}
ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
......@@ -161,7 +161,7 @@ public:
break;
}
}
ASSERT(scratchGPR != InvalidGPRReg);
RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
......
......@@ -189,7 +189,7 @@ public:
if (spillMe != InvalidVirtualRegister) {
#if USE(JSVALUE32_64)
GenerationInfo& info = m_generationInfo[spillMe];
ASSERT(info.registerFormat() != DataFormatJSDouble);
RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
if ((info.registerFormat() & DataFormatJS))
m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
#endif
......@@ -203,7 +203,7 @@ public:
if (spillMe != InvalidVirtualRegister) {
#if USE(JSVALUE32_64)
GenerationInfo& info = m_generationInfo[spillMe];
ASSERT(info.registerFormat() != DataFormatJSDouble);
RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
if ((info.registerFormat() & DataFormatJS))
m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
#endif
......@@ -497,7 +497,7 @@ public:
default:
// The following code handles JSValues, int32s, and cells.
ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
GPRReg reg = info.gpr();
// We need to box int32 and cell values ...
......@@ -527,7 +527,7 @@ public:
default:
// The following code handles JSValues.
ASSERT(spillFormat & DataFormatJS);
RELEASE_ASSERT(spillFormat & DataFormatJS);
m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
info.spill(*m_stream, spillMe, spillFormat);
......@@ -799,7 +799,7 @@ public:
info.initInteger(nodeIndex, node.refCount(), reg);
} else {
#if USE(JSVALUE64)
ASSERT(format == DataFormatJSInteger);
RELEASE_ASSERT(format == DataFormatJSInteger);
m_jit.jitAssertIsJSInt32(reg);
m_gprs.retain(reg, virtualRegister, SpillOrderJS);
info.initJSValue(nodeIndex, node.refCount(), reg, format);
......
......@@ -70,7 +70,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat
// Tag it, since fillInteger() is used when we want a boxed integer.
m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
} else {
ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
RELEASE_ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
}
......@@ -176,7 +176,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
default:
GPRReg gpr = allocate();
ASSERT(spillFormat & DataFormatJS);
RELEASE_ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
......@@ -309,7 +309,7 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
spillFormat = DataFormatJSDouble;
} else
ASSERT(spillFormat & DataFormatJS);
RELEASE_ASSERT(spillFormat & DataFormatJS);
}
info.fillJSValue(*m_stream, gpr, spillFormat);
}
......@@ -692,7 +692,7 @@ bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool in
if (branchIndexInBlock != UINT_MAX) {
NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
ASSERT(node.adjustedRefCount() == 1);
RELEASE_ASSERT(node.adjustedRefCount() == 1);
nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert);
......@@ -992,7 +992,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
void SpeculativeJIT::emitCall(Node& node)
{
if (node.op() != Call)
ASSERT(node.op() == Construct);
RELEASE_ASSERT(node.op() == Construct);
// For constructors, the this argument is not passed but we have to make space
// for it.
......@@ -1094,7 +1094,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
DataFormat spillFormat = info.spillFormat();
ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
......@@ -1207,7 +1207,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
{
DataFormat mustBeDataFormatInteger;
GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger, BackwardSpeculation);
ASSERT(mustBeDataFormatInteger == DataFormatInteger);
RELEASE_ASSERT(mustBeDataFormatInteger == DataFormatInteger);
return result;
}
......@@ -1272,7 +1272,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex, SpeculationDirec
default:
GPRReg gpr = allocate();
ASSERT(spillFormat & DataFormatJS);
RELEASE_ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
......@@ -1395,7 +1395,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, SpeculationDirecti
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode, direction);
return gpr;
}
ASSERT(info.spillFormat() & DataFormatJS);
RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
......@@ -1471,7 +1471,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex, SpeculationDire
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode, direction);
return gpr;
}
ASSERT(info.spillFormat() & DataFormatJS);
RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
......@@ -2805,7 +2805,7 @@ void SpeculativeJIT::compile(Node& node)
alreadyHandled = true;
break;
case Array::Generic: {
ASSERT(node.op() == PutByVal);
RELEASE_ASSERT(node.op() == PutByVal);
JSValueOperand arg1(this, child1);
JSValueOperand arg2(this, child2);
......@@ -3463,7 +3463,7 @@ void SpeculativeJIT::compile(Node& node)
globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint());
Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node.indexingType());
ASSERT(structure->indexingType() == node.indexingType());
RELEASE_ASSERT(structure->indexingType() == node.indexingType());
ASSERT(
hasUndecided(structure->indexingType())
|| hasInt32(structure->indexingType())
......@@ -3769,7 +3769,7 @@ void SpeculativeJIT::compile(Node& node)
emitAllocateJSArray(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), resultGPR, storageGPR, numElements);
ASSERT(indexingType & IsArray);
RELEASE_ASSERT(indexingType & IsArray);
JSValue* data = m_jit.codeBlock()->constantBuffer(node.startConstant());
if (indexingType == ArrayWithDouble) {
for (unsigned index = 0; index < node.numConstants(); ++index) {
......@@ -4655,7 +4655,7 @@ void SpeculativeJIT::compile(Node& node)
}
case CreateActivation: {
ASSERT(!node.codeOrigin.inlineCallFrame);
RELEASE_ASSERT(!node.codeOrigin.inlineCallFrame);
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
......@@ -4700,7 +4700,7 @@ void SpeculativeJIT::compile(Node& node)
}
case TearOffActivation: {
ASSERT(!node.codeOrigin.inlineCallFrame);
RELEASE_ASSERT(!node.codeOrigin.inlineCallFrame);
JSValueOperand activationValue(this, node.child1());
GPRTemporary scratch(this);
......@@ -4767,7 +4767,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.argumentsRegisterFor(node.codeOrigin))));
}
ASSERT(!node.codeOrigin.inlineCallFrame);
RELEASE_ASSERT(!node.codeOrigin.inlineCallFrame);
m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
m_jit.sub32(TrustedImm32(1), resultGPR);
integerResult(resultGPR, m_compileIndex);
......
......@@ -65,7 +65,7 @@ static inline ValueSourceKind dataFormatToValueSourceKind(DataFormat dataFormat)
case DataFormatArguments:
return ArgumentsSource;
default:
ASSERT(dataFormat & DataFormatJS);
RELEASE_ASSERT(dataFormat & DataFormatJS);
return ValueInJSStack;
}
}
......@@ -122,7 +122,7 @@ public:
explicit ValueSource(NodeIndex nodeIndex)
: m_nodeIndex(nodeIndex)
{
ASSERT(nodeIndex != NoNode);
RELEASE_ASSERT(nodeIndex != NoNode);
ASSERT(kind() == HaveNode);
}
......
......@@ -30,6 +30,7 @@
#include "DFGGraph.h"
#include "DFGScoreBoard.h"
#include "JSCellInlines.h"
namespace JSC { namespace DFG {
......
......@@ -44,7 +44,7 @@ BlockAllocator::BlockAllocator()
, m_blockFreeingThreadShouldQuit(false)
, m_blockFreeingThread(createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree"))
{
ASSERT(m_blockFreeingThread);
RELEASE_ASSERT(m_blockFreeingThread);
m_regionLock.Init();
}
......@@ -69,7 +69,7 @@ void BlockAllocator::releaseFreeRegions()
region = 0;
else {
region = m_emptyRegions.removeHead();
ASSERT(region);
RELEASE_ASSERT(region);
m_numberOfEmptyRegions--;
}
}
......@@ -141,7 +141,7 @@ void BlockAllocator::blockFreeingThreadMain()
region = 0;
else {
region = m_emptyRegions.removeHead();
ASSERT(region);
RELEASE_ASSERT(region);
m_numberOfEmptyRegions--;
}
}
......
......@@ -280,8 +280,8 @@ bool Heap::isPagedOut(double deadline)
// Run all pending finalizers now because we won't get another chance.
void Heap::lastChanceToFinalize()
{
ASSERT(!m_globalData->dynamicGlobalObject);
ASSERT(m_operationInProgress == NoOperation);
RELEASE_ASSERT(!m_globalData->dynamicGlobalObject);
RELEASE_ASSERT(m_operationInProgress == NoOperation);
m_objectSpace.lastChanceToFinalize();
......@@ -718,7 +718,7 @@ void Heap::collect(SweepToggle sweepToggle)
GCPHASE(Collect);
ASSERT(globalData()->apiLock().currentThreadIsHoldingLock());
ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
RELEASE_ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
ASSERT(m_isSafeToCollect);
JAVASCRIPTCORE_GC_BEGIN();
RELEASE_ASSERT(m_operationInProgress == NoOperation);
......
......@@ -790,7 +790,7 @@ NEVER_INLINE HandlerInfo* Interpreter::throwException(CallFrame*& callFrame, JSV
int currentDepth = depth(codeBlock, scope);
int targetDepth = handler->scopeDepth;
scopeDelta = currentDepth - targetDepth;
ASSERT(scopeDelta >= 0);
RELEASE_ASSERT(scopeDelta >= 0);
}
while (scopeDelta--)
scope = scope->next();
......@@ -835,7 +835,9 @@ JSValue Interpreter::execute(ProgramExecutable* program, CallFrame* callFrame, J
ASSERT(isValidThisObject(thisObj, callFrame));
ASSERT(!globalData.exception);
RELEASE_ASSERT(!globalData.isCollectorBusy());
ASSERT(!globalData.isCollectorBusy());
if (globalData.isCollectorBusy())
return jsNull();
StackStats::CheckPoint stackCheckPoint;
const StackBounds& nativeStack = wtfThreadData().stack();
......@@ -1264,7 +1266,7 @@ JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSValue
JSObject* variableObject;
for (JSScope* node = scope; ; node = node->next()) {
ASSERT(node);
RELEASE_ASSERT(node);
if (node->isVariableObject() && !node->isNameScopeObject()) {